1#include "bpf_experimental.h"
2
3struct val_t {
4	long b, c, d;
5};
6
7struct elem {
8	long sum;
9	struct val_t __percpu_kptr *pc;
10};
11
12struct {
13	__uint(type, BPF_MAP_TYPE_ARRAY);
14	__uint(max_entries, 1);
15	__type(key, int);
16	__type(value, struct elem);
17} array SEC(".maps");
18
19void bpf_rcu_read_lock(void) __ksym;
20void bpf_rcu_read_unlock(void) __ksym;
21
22const volatile int nr_cpus;
23
24/* Initialize the percpu object */
25SEC("?fentry/bpf_fentry_test1")
26int BPF_PROG(test_array_map_1)
27{
28	struct val_t __percpu_kptr *p;
29	struct elem *e;
30	int index = 0;
31
32	e = bpf_map_lookup_elem(&array, &index);
33	if (!e)
34		return 0;
35
36	p = bpf_percpu_obj_new(struct val_t);
37	if (!p)
38		return 0;
39
40	p = bpf_kptr_xchg(&e->pc, p);
41	if (p)
42		bpf_percpu_obj_drop(p);
43
44	return 0;
45}
46
47/* Update percpu data */
48SEC("?fentry/bpf_fentry_test2")
49int BPF_PROG(test_array_map_2)
50{
51	struct val_t __percpu_kptr *p;
52	struct val_t *v;
53	struct elem *e;
54	int index = 0;
55
56	e = bpf_map_lookup_elem(&array, &index);
57	if (!e)
58		return 0;
59
60	p = e->pc;
61	if (!p)
62		return 0;
63
64	v = bpf_per_cpu_ptr(p, 0);
65	if (!v)
66		return 0;
67	v->c = 1;
68	v->d = 2;
69
70	return 0;
71}
72
73int cpu0_field_d, sum_field_c;
74int my_pid;
75
76/* Summarize percpu data */
77SEC("?fentry/bpf_fentry_test3")
78int BPF_PROG(test_array_map_3)
79{
80	struct val_t __percpu_kptr *p;
81	int i, index = 0;
82	struct val_t *v;
83	struct elem *e;
84
85	if ((bpf_get_current_pid_tgid() >> 32) != my_pid)
86		return 0;
87
88	e = bpf_map_lookup_elem(&array, &index);
89	if (!e)
90		return 0;
91
92	p = e->pc;
93	if (!p)
94		return 0;
95
96	bpf_for(i, 0, nr_cpus) {
97		v = bpf_per_cpu_ptr(p, i);
98		if (v) {
99			if (i == 0)
100				cpu0_field_d = v->d;
101			sum_field_c += v->c;
102		}
103	}
104
105	return 0;
106}
107
108/* Explicitly free allocated percpu data */
109SEC("?fentry/bpf_fentry_test4")
110int BPF_PROG(test_array_map_4)
111{
112	struct val_t __percpu_kptr *p;
113	struct elem *e;
114	int index = 0;
115
116	e = bpf_map_lookup_elem(&array, &index);
117	if (!e)
118		return 0;
119
120	/* delete */
121	p = bpf_kptr_xchg(&e->pc, NULL);
122	if (p) {
123		bpf_percpu_obj_drop(p);
124	}
125
126	return 0;
127}
128
129SEC("?fentry.s/bpf_fentry_test1")
130int BPF_PROG(test_array_map_10)
131{
132	struct val_t __percpu_kptr *p, *p1;
133	int i, index = 0;
134	struct val_t *v;
135	struct elem *e;
136
137	if ((bpf_get_current_pid_tgid() >> 32) != my_pid)
138		return 0;
139
140	e = bpf_map_lookup_elem(&array, &index);
141	if (!e)
142		return 0;
143
144	bpf_rcu_read_lock();
145	p = e->pc;
146	if (!p) {
147		p = bpf_percpu_obj_new(struct val_t);
148		if (!p)
149			goto out;
150
151		p1 = bpf_kptr_xchg(&e->pc, p);
152		if (p1) {
153			/* race condition */
154			bpf_percpu_obj_drop(p1);
155		}
156	}
157
158	v = bpf_this_cpu_ptr(p);
159	v->c = 3;
160	v = bpf_this_cpu_ptr(p);
161	v->c = 0;
162
163	v = bpf_per_cpu_ptr(p, 0);
164	if (!v)
165		goto out;
166	v->c = 1;
167	v->d = 2;
168
169	/* delete */
170	p1 = bpf_kptr_xchg(&e->pc, NULL);
171	if (!p1)
172		goto out;
173
174	bpf_for(i, 0, nr_cpus) {
175		v = bpf_per_cpu_ptr(p, i);
176		if (v) {
177			if (i == 0)
178				cpu0_field_d = v->d;
179			sum_field_c += v->c;
180		}
181	}
182
183	/* finally release p */
184	bpf_percpu_obj_drop(p1);
185out:
186	bpf_rcu_read_unlock();
187	return 0;
188}
189
190char _license[] SEC("license") = "GPL";
191