1// SPDX-License-Identifier: GPL-2.0-only
2
3#include <test_progs.h>
4#include "test_lookup_and_delete.skel.h"
5
6#define START_VALUE 1234
7#define NEW_VALUE 4321
8#define MAX_ENTRIES 2
9
10static int duration;
11static int nr_cpus;
12
13static int fill_values(int map_fd)
14{
15	__u64 key, value = START_VALUE;
16	int err;
17
18	for (key = 1; key < MAX_ENTRIES + 1; key++) {
19		err = bpf_map_update_elem(map_fd, &key, &value, BPF_NOEXIST);
20		if (!ASSERT_OK(err, "bpf_map_update_elem"))
21			return -1;
22	}
23
24	return 0;
25}
26
27static int fill_values_percpu(int map_fd)
28{
29	__u64 key, value[nr_cpus];
30	int i, err;
31
32	for (i = 0; i < nr_cpus; i++)
33		value[i] = START_VALUE;
34
35	for (key = 1; key < MAX_ENTRIES + 1; key++) {
36		err = bpf_map_update_elem(map_fd, &key, value, BPF_NOEXIST);
37		if (!ASSERT_OK(err, "bpf_map_update_elem"))
38			return -1;
39	}
40
41	return 0;
42}
43
44static struct test_lookup_and_delete *setup_prog(enum bpf_map_type map_type,
45						 int *map_fd)
46{
47	struct test_lookup_and_delete *skel;
48	int err;
49
50	skel = test_lookup_and_delete__open();
51	if (!ASSERT_OK_PTR(skel, "test_lookup_and_delete__open"))
52		return NULL;
53
54	err = bpf_map__set_type(skel->maps.hash_map, map_type);
55	if (!ASSERT_OK(err, "bpf_map__set_type"))
56		goto cleanup;
57
58	err = bpf_map__set_max_entries(skel->maps.hash_map, MAX_ENTRIES);
59	if (!ASSERT_OK(err, "bpf_map__set_max_entries"))
60		goto cleanup;
61
62	err = test_lookup_and_delete__load(skel);
63	if (!ASSERT_OK(err, "test_lookup_and_delete__load"))
64		goto cleanup;
65
66	*map_fd = bpf_map__fd(skel->maps.hash_map);
67	if (!ASSERT_GE(*map_fd, 0, "bpf_map__fd"))
68		goto cleanup;
69
70	return skel;
71
72cleanup:
73	test_lookup_and_delete__destroy(skel);
74	return NULL;
75}
76
77/* Triggers BPF program that updates map with given key and value */
78static int trigger_tp(struct test_lookup_and_delete *skel, __u64 key,
79		      __u64 value)
80{
81	int err;
82
83	skel->bss->set_pid = getpid();
84	skel->bss->set_key = key;
85	skel->bss->set_value = value;
86
87	err = test_lookup_and_delete__attach(skel);
88	if (!ASSERT_OK(err, "test_lookup_and_delete__attach"))
89		return -1;
90
91	syscall(__NR_getpgid);
92
93	test_lookup_and_delete__detach(skel);
94
95	return 0;
96}
97
98static void test_lookup_and_delete_hash(void)
99{
100	struct test_lookup_and_delete *skel;
101	__u64 key, value;
102	int map_fd, err;
103
104	/* Setup program and fill the map. */
105	skel = setup_prog(BPF_MAP_TYPE_HASH, &map_fd);
106	if (!ASSERT_OK_PTR(skel, "setup_prog"))
107		return;
108
109	err = fill_values(map_fd);
110	if (!ASSERT_OK(err, "fill_values"))
111		goto cleanup;
112
113	/* Lookup and delete element. */
114	key = 1;
115	err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
116					      &key, sizeof(key), &value, sizeof(value), 0);
117	if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
118		goto cleanup;
119
120	/* Fetched value should match the initially set value. */
121	if (CHECK(value != START_VALUE, "bpf_map_lookup_and_delete_elem",
122		  "unexpected value=%lld\n", value))
123		goto cleanup;
124
125	/* Check that the entry is non existent. */
126	err = bpf_map_lookup_elem(map_fd, &key, &value);
127	if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
128		goto cleanup;
129
130cleanup:
131	test_lookup_and_delete__destroy(skel);
132}
133
134static void test_lookup_and_delete_percpu_hash(void)
135{
136	struct test_lookup_and_delete *skel;
137	__u64 key, val, value[nr_cpus];
138	int map_fd, err, i;
139
140	/* Setup program and fill the map. */
141	skel = setup_prog(BPF_MAP_TYPE_PERCPU_HASH, &map_fd);
142	if (!ASSERT_OK_PTR(skel, "setup_prog"))
143		return;
144
145	err = fill_values_percpu(map_fd);
146	if (!ASSERT_OK(err, "fill_values_percpu"))
147		goto cleanup;
148
149	/* Lookup and delete element. */
150	key = 1;
151	err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
152					      &key, sizeof(key), value, sizeof(value), 0);
153	if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
154		goto cleanup;
155
156	for (i = 0; i < nr_cpus; i++) {
157		val = value[i];
158
159		/* Fetched value should match the initially set value. */
160		if (CHECK(val != START_VALUE, "map value",
161			  "unexpected for cpu %d: %lld\n", i, val))
162			goto cleanup;
163	}
164
165	/* Check that the entry is non existent. */
166	err = bpf_map_lookup_elem(map_fd, &key, value);
167	if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
168		goto cleanup;
169
170cleanup:
171	test_lookup_and_delete__destroy(skel);
172}
173
174static void test_lookup_and_delete_lru_hash(void)
175{
176	struct test_lookup_and_delete *skel;
177	__u64 key, value;
178	int map_fd, err;
179
180	/* Setup program and fill the LRU map. */
181	skel = setup_prog(BPF_MAP_TYPE_LRU_HASH, &map_fd);
182	if (!ASSERT_OK_PTR(skel, "setup_prog"))
183		return;
184
185	err = fill_values(map_fd);
186	if (!ASSERT_OK(err, "fill_values"))
187		goto cleanup;
188
189	/* Insert new element at key=3, should reuse LRU element. */
190	key = 3;
191	err = trigger_tp(skel, key, NEW_VALUE);
192	if (!ASSERT_OK(err, "trigger_tp"))
193		goto cleanup;
194
195	/* Lookup and delete element 3. */
196	err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
197					      &key, sizeof(key), &value, sizeof(value), 0);
198	if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
199		goto cleanup;
200
201	/* Value should match the new value. */
202	if (CHECK(value != NEW_VALUE, "bpf_map_lookup_and_delete_elem",
203		  "unexpected value=%lld\n", value))
204		goto cleanup;
205
206	/* Check that entries 3 and 1 are non existent. */
207	err = bpf_map_lookup_elem(map_fd, &key, &value);
208	if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
209		goto cleanup;
210
211	key = 1;
212	err = bpf_map_lookup_elem(map_fd, &key, &value);
213	if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
214		goto cleanup;
215
216cleanup:
217	test_lookup_and_delete__destroy(skel);
218}
219
220static void test_lookup_and_delete_lru_percpu_hash(void)
221{
222	struct test_lookup_and_delete *skel;
223	__u64 key, val, value[nr_cpus];
224	int map_fd, err, i, cpucnt = 0;
225
226	/* Setup program and fill the LRU map. */
227	skel = setup_prog(BPF_MAP_TYPE_LRU_PERCPU_HASH, &map_fd);
228	if (!ASSERT_OK_PTR(skel, "setup_prog"))
229		return;
230
231	err = fill_values_percpu(map_fd);
232	if (!ASSERT_OK(err, "fill_values_percpu"))
233		goto cleanup;
234
235	/* Insert new element at key=3, should reuse LRU element 1. */
236	key = 3;
237	err = trigger_tp(skel, key, NEW_VALUE);
238	if (!ASSERT_OK(err, "trigger_tp"))
239		goto cleanup;
240
241	/* Clean value. */
242	for (i = 0; i < nr_cpus; i++)
243		value[i] = 0;
244
245	/* Lookup and delete element 3. */
246	err = bpf_map__lookup_and_delete_elem(skel->maps.hash_map,
247					      &key, sizeof(key), value, sizeof(value), 0);
248	if (!ASSERT_OK(err, "bpf_map_lookup_and_delete_elem"))
249		goto cleanup;
250
251	/* Check if only one CPU has set the value. */
252	for (i = 0; i < nr_cpus; i++) {
253		val = value[i];
254		if (val) {
255			if (CHECK(val != NEW_VALUE, "map value",
256				  "unexpected for cpu %d: %lld\n", i, val))
257				goto cleanup;
258			cpucnt++;
259		}
260	}
261	if (CHECK(cpucnt != 1, "map value", "set for %d CPUs instead of 1!\n",
262		  cpucnt))
263		goto cleanup;
264
265	/* Check that entries 3 and 1 are non existent. */
266	err = bpf_map_lookup_elem(map_fd, &key, &value);
267	if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
268		goto cleanup;
269
270	key = 1;
271	err = bpf_map_lookup_elem(map_fd, &key, &value);
272	if (!ASSERT_ERR(err, "bpf_map_lookup_elem"))
273		goto cleanup;
274
275cleanup:
276	test_lookup_and_delete__destroy(skel);
277}
278
279void test_lookup_and_delete(void)
280{
281	nr_cpus = bpf_num_possible_cpus();
282
283	if (test__start_subtest("lookup_and_delete"))
284		test_lookup_and_delete_hash();
285	if (test__start_subtest("lookup_and_delete_percpu"))
286		test_lookup_and_delete_percpu_hash();
287	if (test__start_subtest("lookup_and_delete_lru"))
288		test_lookup_and_delete_lru_hash();
289	if (test__start_subtest("lookup_and_delete_lru_percpu"))
290		test_lookup_and_delete_lru_percpu_hash();
291}
292