1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2020 Facebook */
3#include <test_progs.h>
4#include <unistd.h>
5#include <sys/syscall.h>
6#include <task_local_storage_helpers.h>
7#include "bpf_iter_ipv6_route.skel.h"
8#include "bpf_iter_netlink.skel.h"
9#include "bpf_iter_bpf_map.skel.h"
10#include "bpf_iter_tasks.skel.h"
11#include "bpf_iter_task_stack.skel.h"
12#include "bpf_iter_task_file.skel.h"
13#include "bpf_iter_task_vmas.skel.h"
14#include "bpf_iter_task_btf.skel.h"
15#include "bpf_iter_tcp4.skel.h"
16#include "bpf_iter_tcp6.skel.h"
17#include "bpf_iter_udp4.skel.h"
18#include "bpf_iter_udp6.skel.h"
19#include "bpf_iter_unix.skel.h"
20#include "bpf_iter_vma_offset.skel.h"
21#include "bpf_iter_test_kern1.skel.h"
22#include "bpf_iter_test_kern2.skel.h"
23#include "bpf_iter_test_kern3.skel.h"
24#include "bpf_iter_test_kern4.skel.h"
25#include "bpf_iter_bpf_hash_map.skel.h"
26#include "bpf_iter_bpf_percpu_hash_map.skel.h"
27#include "bpf_iter_bpf_array_map.skel.h"
28#include "bpf_iter_bpf_percpu_array_map.skel.h"
29#include "bpf_iter_bpf_sk_storage_helpers.skel.h"
30#include "bpf_iter_bpf_sk_storage_map.skel.h"
31#include "bpf_iter_test_kern5.skel.h"
32#include "bpf_iter_test_kern6.skel.h"
33#include "bpf_iter_bpf_link.skel.h"
34#include "bpf_iter_ksym.skel.h"
35#include "bpf_iter_sockmap.skel.h"
36
37static void test_btf_id_or_null(void)
38{
39	struct bpf_iter_test_kern3 *skel;
40
41	skel = bpf_iter_test_kern3__open_and_load();
42	if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern3__open_and_load")) {
43		bpf_iter_test_kern3__destroy(skel);
44		return;
45	}
46}
47
48static void do_dummy_read_opts(struct bpf_program *prog, struct bpf_iter_attach_opts *opts)
49{
50	struct bpf_link *link;
51	char buf[16] = {};
52	int iter_fd, len;
53
54	link = bpf_program__attach_iter(prog, opts);
55	if (!ASSERT_OK_PTR(link, "attach_iter"))
56		return;
57
58	iter_fd = bpf_iter_create(bpf_link__fd(link));
59	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
60		goto free_link;
61
62	/* not check contents, but ensure read() ends without error */
63	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
64		;
65	ASSERT_GE(len, 0, "read");
66
67	close(iter_fd);
68
69free_link:
70	bpf_link__destroy(link);
71}
72
73static void do_dummy_read(struct bpf_program *prog)
74{
75	do_dummy_read_opts(prog, NULL);
76}
77
78static void do_read_map_iter_fd(struct bpf_object_skeleton **skel, struct bpf_program *prog,
79				struct bpf_map *map)
80{
81	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
82	union bpf_iter_link_info linfo;
83	struct bpf_link *link;
84	char buf[16] = {};
85	int iter_fd, len;
86
87	memset(&linfo, 0, sizeof(linfo));
88	linfo.map.map_fd = bpf_map__fd(map);
89	opts.link_info = &linfo;
90	opts.link_info_len = sizeof(linfo);
91	link = bpf_program__attach_iter(prog, &opts);
92	if (!ASSERT_OK_PTR(link, "attach_map_iter"))
93		return;
94
95	iter_fd = bpf_iter_create(bpf_link__fd(link));
96	if (!ASSERT_GE(iter_fd, 0, "create_map_iter")) {
97		bpf_link__destroy(link);
98		return;
99	}
100
101	/* Close link and map fd prematurely */
102	bpf_link__destroy(link);
103	bpf_object__destroy_skeleton(*skel);
104	*skel = NULL;
105
106	/* Try to let map free work to run first if map is freed */
107	usleep(100);
108	/* Memory used by both sock map and sock local storage map are
109	 * freed after two synchronize_rcu() calls, so wait for it
110	 */
111	kern_sync_rcu();
112	kern_sync_rcu();
113
114	/* Read after both map fd and link fd are closed */
115	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
116		;
117	ASSERT_GE(len, 0, "read_iterator");
118
119	close(iter_fd);
120}
121
122static int read_fd_into_buffer(int fd, char *buf, int size)
123{
124	int bufleft = size;
125	int len;
126
127	do {
128		len = read(fd, buf, bufleft);
129		if (len > 0) {
130			buf += len;
131			bufleft -= len;
132		}
133	} while (len > 0);
134
135	return len < 0 ? len : size - bufleft;
136}
137
138static void test_ipv6_route(void)
139{
140	struct bpf_iter_ipv6_route *skel;
141
142	skel = bpf_iter_ipv6_route__open_and_load();
143	if (!ASSERT_OK_PTR(skel, "bpf_iter_ipv6_route__open_and_load"))
144		return;
145
146	do_dummy_read(skel->progs.dump_ipv6_route);
147
148	bpf_iter_ipv6_route__destroy(skel);
149}
150
151static void test_netlink(void)
152{
153	struct bpf_iter_netlink *skel;
154
155	skel = bpf_iter_netlink__open_and_load();
156	if (!ASSERT_OK_PTR(skel, "bpf_iter_netlink__open_and_load"))
157		return;
158
159	do_dummy_read(skel->progs.dump_netlink);
160
161	bpf_iter_netlink__destroy(skel);
162}
163
164static void test_bpf_map(void)
165{
166	struct bpf_iter_bpf_map *skel;
167
168	skel = bpf_iter_bpf_map__open_and_load();
169	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_map__open_and_load"))
170		return;
171
172	do_dummy_read(skel->progs.dump_bpf_map);
173
174	bpf_iter_bpf_map__destroy(skel);
175}
176
177static void check_bpf_link_info(const struct bpf_program *prog)
178{
179	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
180	union bpf_iter_link_info linfo;
181	struct bpf_link_info info = {};
182	struct bpf_link *link;
183	__u32 info_len;
184	int err;
185
186	memset(&linfo, 0, sizeof(linfo));
187	linfo.task.tid = getpid();
188	opts.link_info = &linfo;
189	opts.link_info_len = sizeof(linfo);
190
191	link = bpf_program__attach_iter(prog, &opts);
192	if (!ASSERT_OK_PTR(link, "attach_iter"))
193		return;
194
195	info_len = sizeof(info);
196	err = bpf_link_get_info_by_fd(bpf_link__fd(link), &info, &info_len);
197	ASSERT_OK(err, "bpf_link_get_info_by_fd");
198	ASSERT_EQ(info.iter.task.tid, getpid(), "check_task_tid");
199
200	bpf_link__destroy(link);
201}
202
203static pthread_mutex_t do_nothing_mutex;
204
205static void *do_nothing_wait(void *arg)
206{
207	pthread_mutex_lock(&do_nothing_mutex);
208	pthread_mutex_unlock(&do_nothing_mutex);
209
210	pthread_exit(arg);
211}
212
213static void test_task_common_nocheck(struct bpf_iter_attach_opts *opts,
214				     int *num_unknown, int *num_known)
215{
216	struct bpf_iter_tasks *skel;
217	pthread_t thread_id;
218	void *ret;
219
220	skel = bpf_iter_tasks__open_and_load();
221	if (!ASSERT_OK_PTR(skel, "bpf_iter_tasks__open_and_load"))
222		return;
223
224	ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock");
225
226	ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
227		  "pthread_create");
228
229	skel->bss->tid = getpid();
230
231	do_dummy_read_opts(skel->progs.dump_task, opts);
232
233	*num_unknown = skel->bss->num_unknown_tid;
234	*num_known = skel->bss->num_known_tid;
235
236	ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
237	ASSERT_FALSE(pthread_join(thread_id, &ret) || ret != NULL,
238		     "pthread_join");
239
240	bpf_iter_tasks__destroy(skel);
241}
242
243static void test_task_common(struct bpf_iter_attach_opts *opts, int num_unknown, int num_known)
244{
245	int num_unknown_tid, num_known_tid;
246
247	test_task_common_nocheck(opts, &num_unknown_tid, &num_known_tid);
248	ASSERT_EQ(num_unknown_tid, num_unknown, "check_num_unknown_tid");
249	ASSERT_EQ(num_known_tid, num_known, "check_num_known_tid");
250}
251
252static void test_task_tid(void)
253{
254	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
255	union bpf_iter_link_info linfo;
256	int num_unknown_tid, num_known_tid;
257
258	memset(&linfo, 0, sizeof(linfo));
259	linfo.task.tid = getpid();
260	opts.link_info = &linfo;
261	opts.link_info_len = sizeof(linfo);
262	test_task_common(&opts, 0, 1);
263
264	linfo.task.tid = 0;
265	linfo.task.pid = getpid();
266	test_task_common(&opts, 1, 1);
267
268	test_task_common_nocheck(NULL, &num_unknown_tid, &num_known_tid);
269	ASSERT_GT(num_unknown_tid, 1, "check_num_unknown_tid");
270	ASSERT_EQ(num_known_tid, 1, "check_num_known_tid");
271}
272
273static void test_task_pid(void)
274{
275	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
276	union bpf_iter_link_info linfo;
277
278	memset(&linfo, 0, sizeof(linfo));
279	linfo.task.pid = getpid();
280	opts.link_info = &linfo;
281	opts.link_info_len = sizeof(linfo);
282
283	test_task_common(&opts, 1, 1);
284}
285
286static void test_task_pidfd(void)
287{
288	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
289	union bpf_iter_link_info linfo;
290	int pidfd;
291
292	pidfd = sys_pidfd_open(getpid(), 0);
293	if (!ASSERT_GT(pidfd, 0, "sys_pidfd_open"))
294		return;
295
296	memset(&linfo, 0, sizeof(linfo));
297	linfo.task.pid_fd = pidfd;
298	opts.link_info = &linfo;
299	opts.link_info_len = sizeof(linfo);
300
301	test_task_common(&opts, 1, 1);
302
303	close(pidfd);
304}
305
306static void test_task_sleepable(void)
307{
308	struct bpf_iter_tasks *skel;
309
310	skel = bpf_iter_tasks__open_and_load();
311	if (!ASSERT_OK_PTR(skel, "bpf_iter_tasks__open_and_load"))
312		return;
313
314	do_dummy_read(skel->progs.dump_task_sleepable);
315
316	ASSERT_GT(skel->bss->num_expected_failure_copy_from_user_task, 0,
317		  "num_expected_failure_copy_from_user_task");
318	ASSERT_GT(skel->bss->num_success_copy_from_user_task, 0,
319		  "num_success_copy_from_user_task");
320
321	bpf_iter_tasks__destroy(skel);
322}
323
324static void test_task_stack(void)
325{
326	struct bpf_iter_task_stack *skel;
327
328	skel = bpf_iter_task_stack__open_and_load();
329	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_stack__open_and_load"))
330		return;
331
332	do_dummy_read(skel->progs.dump_task_stack);
333	do_dummy_read(skel->progs.get_task_user_stacks);
334
335	ASSERT_EQ(skel->bss->num_user_stacks, 1, "num_user_stacks");
336
337	bpf_iter_task_stack__destroy(skel);
338}
339
340static void test_task_file(void)
341{
342	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
343	struct bpf_iter_task_file *skel;
344	union bpf_iter_link_info linfo;
345	pthread_t thread_id;
346	void *ret;
347
348	skel = bpf_iter_task_file__open_and_load();
349	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_file__open_and_load"))
350		return;
351
352	skel->bss->tgid = getpid();
353
354	ASSERT_OK(pthread_mutex_lock(&do_nothing_mutex), "pthread_mutex_lock");
355
356	ASSERT_OK(pthread_create(&thread_id, NULL, &do_nothing_wait, NULL),
357		  "pthread_create");
358
359	memset(&linfo, 0, sizeof(linfo));
360	linfo.task.tid = getpid();
361	opts.link_info = &linfo;
362	opts.link_info_len = sizeof(linfo);
363
364	do_dummy_read_opts(skel->progs.dump_task_file, &opts);
365
366	ASSERT_EQ(skel->bss->count, 0, "check_count");
367	ASSERT_EQ(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
368
369	skel->bss->last_tgid = 0;
370	skel->bss->count = 0;
371	skel->bss->unique_tgid_count = 0;
372
373	do_dummy_read(skel->progs.dump_task_file);
374
375	ASSERT_EQ(skel->bss->count, 0, "check_count");
376	ASSERT_GT(skel->bss->unique_tgid_count, 1, "check_unique_tgid_count");
377
378	check_bpf_link_info(skel->progs.dump_task_file);
379
380	ASSERT_OK(pthread_mutex_unlock(&do_nothing_mutex), "pthread_mutex_unlock");
381	ASSERT_OK(pthread_join(thread_id, &ret), "pthread_join");
382	ASSERT_NULL(ret, "pthread_join");
383
384	bpf_iter_task_file__destroy(skel);
385}
386
387#define TASKBUFSZ		32768
388
389static char taskbuf[TASKBUFSZ];
390
391static int do_btf_read(struct bpf_iter_task_btf *skel)
392{
393	struct bpf_program *prog = skel->progs.dump_task_struct;
394	struct bpf_iter_task_btf__bss *bss = skel->bss;
395	int iter_fd = -1, err;
396	struct bpf_link *link;
397	char *buf = taskbuf;
398	int ret = 0;
399
400	link = bpf_program__attach_iter(prog, NULL);
401	if (!ASSERT_OK_PTR(link, "attach_iter"))
402		return ret;
403
404	iter_fd = bpf_iter_create(bpf_link__fd(link));
405	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
406		goto free_link;
407
408	err = read_fd_into_buffer(iter_fd, buf, TASKBUFSZ);
409	if (bss->skip) {
410		printf("%s:SKIP:no __builtin_btf_type_id\n", __func__);
411		ret = 1;
412		test__skip();
413		goto free_link;
414	}
415
416	if (!ASSERT_GE(err, 0, "read"))
417		goto free_link;
418
419	ASSERT_HAS_SUBSTR(taskbuf, "(struct task_struct)",
420	      "check for btf representation of task_struct in iter data");
421free_link:
422	if (iter_fd > 0)
423		close(iter_fd);
424	bpf_link__destroy(link);
425	return ret;
426}
427
428static void test_task_btf(void)
429{
430	struct bpf_iter_task_btf__bss *bss;
431	struct bpf_iter_task_btf *skel;
432	int ret;
433
434	skel = bpf_iter_task_btf__open_and_load();
435	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_btf__open_and_load"))
436		return;
437
438	bss = skel->bss;
439
440	ret = do_btf_read(skel);
441	if (ret)
442		goto cleanup;
443
444	if (!ASSERT_NEQ(bss->tasks, 0, "no task iteration, did BPF program run?"))
445		goto cleanup;
446
447	ASSERT_EQ(bss->seq_err, 0, "check for unexpected err");
448
449cleanup:
450	bpf_iter_task_btf__destroy(skel);
451}
452
453static void test_tcp4(void)
454{
455	struct bpf_iter_tcp4 *skel;
456
457	skel = bpf_iter_tcp4__open_and_load();
458	if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp4__open_and_load"))
459		return;
460
461	do_dummy_read(skel->progs.dump_tcp4);
462
463	bpf_iter_tcp4__destroy(skel);
464}
465
466static void test_tcp6(void)
467{
468	struct bpf_iter_tcp6 *skel;
469
470	skel = bpf_iter_tcp6__open_and_load();
471	if (!ASSERT_OK_PTR(skel, "bpf_iter_tcp6__open_and_load"))
472		return;
473
474	do_dummy_read(skel->progs.dump_tcp6);
475
476	bpf_iter_tcp6__destroy(skel);
477}
478
479static void test_udp4(void)
480{
481	struct bpf_iter_udp4 *skel;
482
483	skel = bpf_iter_udp4__open_and_load();
484	if (!ASSERT_OK_PTR(skel, "bpf_iter_udp4__open_and_load"))
485		return;
486
487	do_dummy_read(skel->progs.dump_udp4);
488
489	bpf_iter_udp4__destroy(skel);
490}
491
492static void test_udp6(void)
493{
494	struct bpf_iter_udp6 *skel;
495
496	skel = bpf_iter_udp6__open_and_load();
497	if (!ASSERT_OK_PTR(skel, "bpf_iter_udp6__open_and_load"))
498		return;
499
500	do_dummy_read(skel->progs.dump_udp6);
501
502	bpf_iter_udp6__destroy(skel);
503}
504
505static void test_unix(void)
506{
507	struct bpf_iter_unix *skel;
508
509	skel = bpf_iter_unix__open_and_load();
510	if (!ASSERT_OK_PTR(skel, "bpf_iter_unix__open_and_load"))
511		return;
512
513	do_dummy_read(skel->progs.dump_unix);
514
515	bpf_iter_unix__destroy(skel);
516}
517
518/* The expected string is less than 16 bytes */
519static int do_read_with_fd(int iter_fd, const char *expected,
520			   bool read_one_char)
521{
522	int len, read_buf_len, start;
523	char buf[16] = {};
524
525	read_buf_len = read_one_char ? 1 : 16;
526	start = 0;
527	while ((len = read(iter_fd, buf + start, read_buf_len)) > 0) {
528		start += len;
529		if (!ASSERT_LT(start, 16, "read"))
530			return -1;
531		read_buf_len = read_one_char ? 1 : 16 - start;
532	}
533	if (!ASSERT_GE(len, 0, "read"))
534		return -1;
535
536	if (!ASSERT_STREQ(buf, expected, "read"))
537		return -1;
538
539	return 0;
540}
541
542static void test_anon_iter(bool read_one_char)
543{
544	struct bpf_iter_test_kern1 *skel;
545	struct bpf_link *link;
546	int iter_fd, err;
547
548	skel = bpf_iter_test_kern1__open_and_load();
549	if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern1__open_and_load"))
550		return;
551
552	err = bpf_iter_test_kern1__attach(skel);
553	if (!ASSERT_OK(err, "bpf_iter_test_kern1__attach")) {
554		goto out;
555	}
556
557	link = skel->links.dump_task;
558	iter_fd = bpf_iter_create(bpf_link__fd(link));
559	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
560		goto out;
561
562	do_read_with_fd(iter_fd, "abcd", read_one_char);
563	close(iter_fd);
564
565out:
566	bpf_iter_test_kern1__destroy(skel);
567}
568
569static int do_read(const char *path, const char *expected)
570{
571	int err, iter_fd;
572
573	iter_fd = open(path, O_RDONLY);
574	if (!ASSERT_GE(iter_fd, 0, "open"))
575		return -1;
576
577	err = do_read_with_fd(iter_fd, expected, false);
578	close(iter_fd);
579	return err;
580}
581
582static void test_file_iter(void)
583{
584	const char *path = "/sys/fs/bpf/bpf_iter_test1";
585	struct bpf_iter_test_kern1 *skel1;
586	struct bpf_iter_test_kern2 *skel2;
587	struct bpf_link *link;
588	int err;
589
590	skel1 = bpf_iter_test_kern1__open_and_load();
591	if (!ASSERT_OK_PTR(skel1, "bpf_iter_test_kern1__open_and_load"))
592		return;
593
594	link = bpf_program__attach_iter(skel1->progs.dump_task, NULL);
595	if (!ASSERT_OK_PTR(link, "attach_iter"))
596		goto out;
597
598	/* unlink this path if it exists. */
599	unlink(path);
600
601	err = bpf_link__pin(link, path);
602	if (!ASSERT_OK(err, "pin_iter"))
603		goto free_link;
604
605	err = do_read(path, "abcd");
606	if (err)
607		goto unlink_path;
608
609	/* file based iterator seems working fine. Let us a link update
610	 * of the underlying link and `cat` the iterator again, its content
611	 * should change.
612	 */
613	skel2 = bpf_iter_test_kern2__open_and_load();
614	if (!ASSERT_OK_PTR(skel2, "bpf_iter_test_kern2__open_and_load"))
615		goto unlink_path;
616
617	err = bpf_link__update_program(link, skel2->progs.dump_task);
618	if (!ASSERT_OK(err, "update_prog"))
619		goto destroy_skel2;
620
621	do_read(path, "ABCD");
622
623destroy_skel2:
624	bpf_iter_test_kern2__destroy(skel2);
625unlink_path:
626	unlink(path);
627free_link:
628	bpf_link__destroy(link);
629out:
630	bpf_iter_test_kern1__destroy(skel1);
631}
632
633static void test_overflow(bool test_e2big_overflow, bool ret1)
634{
635	__u32 map_info_len, total_read_len, expected_read_len;
636	int err, iter_fd, map1_fd, map2_fd, len;
637	struct bpf_map_info map_info = {};
638	struct bpf_iter_test_kern4 *skel;
639	struct bpf_link *link;
640	__u32 iter_size;
641	char *buf;
642
643	skel = bpf_iter_test_kern4__open();
644	if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern4__open"))
645		return;
646
647	/* create two maps: bpf program will only do bpf_seq_write
648	 * for these two maps. The goal is one map output almost
649	 * fills seq_file buffer and then the other will trigger
650	 * overflow and needs restart.
651	 */
652	map1_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
653	if (!ASSERT_GE(map1_fd, 0, "bpf_map_create"))
654		goto out;
655	map2_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL);
656	if (!ASSERT_GE(map2_fd, 0, "bpf_map_create"))
657		goto free_map1;
658
659	/* bpf_seq_printf kernel buffer is 8 pages, so one map
660	 * bpf_seq_write will mostly fill it, and the other map
661	 * will partially fill and then trigger overflow and need
662	 * bpf_seq_read restart.
663	 */
664	iter_size = sysconf(_SC_PAGE_SIZE) << 3;
665
666	if (test_e2big_overflow) {
667		skel->rodata->print_len = (iter_size + 8) / 8;
668		expected_read_len = 2 * (iter_size + 8);
669	} else if (!ret1) {
670		skel->rodata->print_len = (iter_size - 8) / 8;
671		expected_read_len = 2 * (iter_size - 8);
672	} else {
673		skel->rodata->print_len = 1;
674		expected_read_len = 2 * 8;
675	}
676	skel->rodata->ret1 = ret1;
677
678	if (!ASSERT_OK(bpf_iter_test_kern4__load(skel),
679		  "bpf_iter_test_kern4__load"))
680		goto free_map2;
681
682	/* setup filtering map_id in bpf program */
683	map_info_len = sizeof(map_info);
684	err = bpf_map_get_info_by_fd(map1_fd, &map_info, &map_info_len);
685	if (!ASSERT_OK(err, "get_map_info"))
686		goto free_map2;
687	skel->bss->map1_id = map_info.id;
688
689	err = bpf_map_get_info_by_fd(map2_fd, &map_info, &map_info_len);
690	if (!ASSERT_OK(err, "get_map_info"))
691		goto free_map2;
692	skel->bss->map2_id = map_info.id;
693
694	link = bpf_program__attach_iter(skel->progs.dump_bpf_map, NULL);
695	if (!ASSERT_OK_PTR(link, "attach_iter"))
696		goto free_map2;
697
698	iter_fd = bpf_iter_create(bpf_link__fd(link));
699	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
700		goto free_link;
701
702	buf = malloc(expected_read_len);
703	if (!ASSERT_OK_PTR(buf, "malloc"))
704		goto close_iter;
705
706	/* do read */
707	total_read_len = 0;
708	if (test_e2big_overflow) {
709		while ((len = read(iter_fd, buf, expected_read_len)) > 0)
710			total_read_len += len;
711
712		ASSERT_EQ(len, -1, "read");
713		ASSERT_EQ(errno, E2BIG, "read");
714		goto free_buf;
715	} else if (!ret1) {
716		while ((len = read(iter_fd, buf, expected_read_len)) > 0)
717			total_read_len += len;
718
719		if (!ASSERT_GE(len, 0, "read"))
720			goto free_buf;
721	} else {
722		do {
723			len = read(iter_fd, buf, expected_read_len);
724			if (len > 0)
725				total_read_len += len;
726		} while (len > 0 || len == -EAGAIN);
727
728		if (!ASSERT_GE(len, 0, "read"))
729			goto free_buf;
730	}
731
732	if (!ASSERT_EQ(total_read_len, expected_read_len, "read"))
733		goto free_buf;
734
735	if (!ASSERT_EQ(skel->bss->map1_accessed, 1, "map1_accessed"))
736		goto free_buf;
737
738	if (!ASSERT_EQ(skel->bss->map2_accessed, 2, "map2_accessed"))
739		goto free_buf;
740
741	ASSERT_EQ(skel->bss->map2_seqnum1, skel->bss->map2_seqnum2, "map2_seqnum");
742
743free_buf:
744	free(buf);
745close_iter:
746	close(iter_fd);
747free_link:
748	bpf_link__destroy(link);
749free_map2:
750	close(map2_fd);
751free_map1:
752	close(map1_fd);
753out:
754	bpf_iter_test_kern4__destroy(skel);
755}
756
757static void test_bpf_hash_map(void)
758{
759	__u32 expected_key_a = 0, expected_key_b = 0;
760	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
761	struct bpf_iter_bpf_hash_map *skel;
762	int err, i, len, map_fd, iter_fd;
763	union bpf_iter_link_info linfo;
764	__u64 val, expected_val = 0;
765	struct bpf_link *link;
766	struct key_t {
767		int a;
768		int b;
769		int c;
770	} key;
771	char buf[64];
772
773	skel = bpf_iter_bpf_hash_map__open();
774	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_hash_map__open"))
775		return;
776
777	skel->bss->in_test_mode = true;
778
779	err = bpf_iter_bpf_hash_map__load(skel);
780	if (!ASSERT_OK(err, "bpf_iter_bpf_hash_map__load"))
781		goto out;
782
783	/* iterator with hashmap2 and hashmap3 should fail */
784	memset(&linfo, 0, sizeof(linfo));
785	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap2);
786	opts.link_info = &linfo;
787	opts.link_info_len = sizeof(linfo);
788	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
789	if (!ASSERT_ERR_PTR(link, "attach_iter"))
790		goto out;
791
792	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap3);
793	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
794	if (!ASSERT_ERR_PTR(link, "attach_iter"))
795		goto out;
796
797	/* hashmap1 should be good, update map values here */
798	map_fd = bpf_map__fd(skel->maps.hashmap1);
799	for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
800		key.a = i + 1;
801		key.b = i + 2;
802		key.c = i + 3;
803		val = i + 4;
804		expected_key_a += key.a;
805		expected_key_b += key.b;
806		expected_val += val;
807
808		err = bpf_map_update_elem(map_fd, &key, &val, BPF_ANY);
809		if (!ASSERT_OK(err, "map_update"))
810			goto out;
811	}
812
813	/* Sleepable program is prohibited for hash map iterator */
814	linfo.map.map_fd = map_fd;
815	link = bpf_program__attach_iter(skel->progs.sleepable_dummy_dump, &opts);
816	if (!ASSERT_ERR_PTR(link, "attach_sleepable_prog_to_iter"))
817		goto out;
818
819	linfo.map.map_fd = map_fd;
820	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
821	if (!ASSERT_OK_PTR(link, "attach_iter"))
822		goto out;
823
824	iter_fd = bpf_iter_create(bpf_link__fd(link));
825	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
826		goto free_link;
827
828	/* do some tests */
829	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
830		;
831	if (!ASSERT_GE(len, 0, "read"))
832		goto close_iter;
833
834	/* test results */
835	if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
836		goto close_iter;
837	if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
838		goto close_iter;
839	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
840		goto close_iter;
841
842close_iter:
843	close(iter_fd);
844free_link:
845	bpf_link__destroy(link);
846out:
847	bpf_iter_bpf_hash_map__destroy(skel);
848}
849
850static void test_bpf_percpu_hash_map(void)
851{
852	__u32 expected_key_a = 0, expected_key_b = 0;
853	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
854	struct bpf_iter_bpf_percpu_hash_map *skel;
855	int err, i, j, len, map_fd, iter_fd;
856	union bpf_iter_link_info linfo;
857	__u32 expected_val = 0;
858	struct bpf_link *link;
859	struct key_t {
860		int a;
861		int b;
862		int c;
863	} key;
864	char buf[64];
865	void *val;
866
867	skel = bpf_iter_bpf_percpu_hash_map__open();
868	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__open"))
869		return;
870
871	skel->rodata->num_cpus = bpf_num_possible_cpus();
872	val = malloc(8 * bpf_num_possible_cpus());
873	if (!ASSERT_OK_PTR(val, "malloc"))
874		goto out;
875
876	err = bpf_iter_bpf_percpu_hash_map__load(skel);
877	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_hash_map__load"))
878		goto out;
879
880	/* update map values here */
881	map_fd = bpf_map__fd(skel->maps.hashmap1);
882	for (i = 0; i < bpf_map__max_entries(skel->maps.hashmap1); i++) {
883		key.a = i + 1;
884		key.b = i + 2;
885		key.c = i + 3;
886		expected_key_a += key.a;
887		expected_key_b += key.b;
888
889		for (j = 0; j < bpf_num_possible_cpus(); j++) {
890			*(__u32 *)(val + j * 8) = i + j;
891			expected_val += i + j;
892		}
893
894		err = bpf_map_update_elem(map_fd, &key, val, BPF_ANY);
895		if (!ASSERT_OK(err, "map_update"))
896			goto out;
897	}
898
899	memset(&linfo, 0, sizeof(linfo));
900	linfo.map.map_fd = map_fd;
901	opts.link_info = &linfo;
902	opts.link_info_len = sizeof(linfo);
903	link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_hash_map, &opts);
904	if (!ASSERT_OK_PTR(link, "attach_iter"))
905		goto out;
906
907	iter_fd = bpf_iter_create(bpf_link__fd(link));
908	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
909		goto free_link;
910
911	/* do some tests */
912	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
913		;
914	if (!ASSERT_GE(len, 0, "read"))
915		goto close_iter;
916
917	/* test results */
918	if (!ASSERT_EQ(skel->bss->key_sum_a, expected_key_a, "key_sum_a"))
919		goto close_iter;
920	if (!ASSERT_EQ(skel->bss->key_sum_b, expected_key_b, "key_sum_b"))
921		goto close_iter;
922	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
923		goto close_iter;
924
925close_iter:
926	close(iter_fd);
927free_link:
928	bpf_link__destroy(link);
929out:
930	bpf_iter_bpf_percpu_hash_map__destroy(skel);
931	free(val);
932}
933
934static void test_bpf_array_map(void)
935{
936	__u64 val, expected_val = 0, res_first_val, first_val = 0;
937	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
938	__u32 key, expected_key = 0, res_first_key;
939	int err, i, map_fd, hash_fd, iter_fd;
940	struct bpf_iter_bpf_array_map *skel;
941	union bpf_iter_link_info linfo;
942	struct bpf_link *link;
943	char buf[64] = {};
944	int len, start;
945
946	skel = bpf_iter_bpf_array_map__open_and_load();
947	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
948		return;
949
950	map_fd = bpf_map__fd(skel->maps.arraymap1);
951	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
952		val = i + 4;
953		expected_key += i;
954		expected_val += val;
955
956		if (i == 0)
957			first_val = val;
958
959		err = bpf_map_update_elem(map_fd, &i, &val, BPF_ANY);
960		if (!ASSERT_OK(err, "map_update"))
961			goto out;
962	}
963
964	memset(&linfo, 0, sizeof(linfo));
965	linfo.map.map_fd = map_fd;
966	opts.link_info = &linfo;
967	opts.link_info_len = sizeof(linfo);
968	link = bpf_program__attach_iter(skel->progs.dump_bpf_array_map, &opts);
969	if (!ASSERT_OK_PTR(link, "attach_iter"))
970		goto out;
971
972	iter_fd = bpf_iter_create(bpf_link__fd(link));
973	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
974		goto free_link;
975
976	/* do some tests */
977	start = 0;
978	while ((len = read(iter_fd, buf + start, sizeof(buf) - start)) > 0)
979		start += len;
980	if (!ASSERT_GE(len, 0, "read"))
981		goto close_iter;
982
983	/* test results */
984	res_first_key = *(__u32 *)buf;
985	res_first_val = *(__u64 *)(buf + sizeof(__u32));
986	if (!ASSERT_EQ(res_first_key, 0, "bpf_seq_write") ||
987			!ASSERT_EQ(res_first_val, first_val, "bpf_seq_write"))
988		goto close_iter;
989
990	if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
991		goto close_iter;
992	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
993		goto close_iter;
994
995	hash_fd = bpf_map__fd(skel->maps.hashmap1);
996	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
997		err = bpf_map_lookup_elem(map_fd, &i, &val);
998		if (!ASSERT_OK(err, "map_lookup arraymap1"))
999			goto close_iter;
1000		if (!ASSERT_EQ(i, val, "invalid_val arraymap1"))
1001			goto close_iter;
1002
1003		val = i + 4;
1004		err = bpf_map_lookup_elem(hash_fd, &val, &key);
1005		if (!ASSERT_OK(err, "map_lookup hashmap1"))
1006			goto close_iter;
1007		if (!ASSERT_EQ(key, val - 4, "invalid_val hashmap1"))
1008			goto close_iter;
1009	}
1010
1011close_iter:
1012	close(iter_fd);
1013free_link:
1014	bpf_link__destroy(link);
1015out:
1016	bpf_iter_bpf_array_map__destroy(skel);
1017}
1018
1019static void test_bpf_array_map_iter_fd(void)
1020{
1021	struct bpf_iter_bpf_array_map *skel;
1022
1023	skel = bpf_iter_bpf_array_map__open_and_load();
1024	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_array_map__open_and_load"))
1025		return;
1026
1027	do_read_map_iter_fd(&skel->skeleton, skel->progs.dump_bpf_array_map,
1028			    skel->maps.arraymap1);
1029
1030	bpf_iter_bpf_array_map__destroy(skel);
1031}
1032
1033static void test_bpf_percpu_array_map(void)
1034{
1035	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1036	struct bpf_iter_bpf_percpu_array_map *skel;
1037	__u32 expected_key = 0, expected_val = 0;
1038	union bpf_iter_link_info linfo;
1039	int err, i, j, map_fd, iter_fd;
1040	struct bpf_link *link;
1041	char buf[64];
1042	void *val;
1043	int len;
1044
1045	skel = bpf_iter_bpf_percpu_array_map__open();
1046	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__open"))
1047		return;
1048
1049	skel->rodata->num_cpus = bpf_num_possible_cpus();
1050	val = malloc(8 * bpf_num_possible_cpus());
1051	if (!ASSERT_OK_PTR(val, "malloc"))
1052		goto out;
1053
1054	err = bpf_iter_bpf_percpu_array_map__load(skel);
1055	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_percpu_array_map__load"))
1056		goto out;
1057
1058	/* update map values here */
1059	map_fd = bpf_map__fd(skel->maps.arraymap1);
1060	for (i = 0; i < bpf_map__max_entries(skel->maps.arraymap1); i++) {
1061		expected_key += i;
1062
1063		for (j = 0; j < bpf_num_possible_cpus(); j++) {
1064			*(__u32 *)(val + j * 8) = i + j;
1065			expected_val += i + j;
1066		}
1067
1068		err = bpf_map_update_elem(map_fd, &i, val, BPF_ANY);
1069		if (!ASSERT_OK(err, "map_update"))
1070			goto out;
1071	}
1072
1073	memset(&linfo, 0, sizeof(linfo));
1074	linfo.map.map_fd = map_fd;
1075	opts.link_info = &linfo;
1076	opts.link_info_len = sizeof(linfo);
1077	link = bpf_program__attach_iter(skel->progs.dump_bpf_percpu_array_map, &opts);
1078	if (!ASSERT_OK_PTR(link, "attach_iter"))
1079		goto out;
1080
1081	iter_fd = bpf_iter_create(bpf_link__fd(link));
1082	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1083		goto free_link;
1084
1085	/* do some tests */
1086	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1087		;
1088	if (!ASSERT_GE(len, 0, "read"))
1089		goto close_iter;
1090
1091	/* test results */
1092	if (!ASSERT_EQ(skel->bss->key_sum, expected_key, "key_sum"))
1093		goto close_iter;
1094	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1095		goto close_iter;
1096
1097close_iter:
1098	close(iter_fd);
1099free_link:
1100	bpf_link__destroy(link);
1101out:
1102	bpf_iter_bpf_percpu_array_map__destroy(skel);
1103	free(val);
1104}
1105
1106/* An iterator program deletes all local storage in a map. */
1107static void test_bpf_sk_storage_delete(void)
1108{
1109	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1110	struct bpf_iter_bpf_sk_storage_helpers *skel;
1111	union bpf_iter_link_info linfo;
1112	int err, len, map_fd, iter_fd;
1113	struct bpf_link *link;
1114	int sock_fd = -1;
1115	__u32 val = 42;
1116	char buf[64];
1117
1118	skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
1119	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
1120		return;
1121
1122	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1123
1124	sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
1125	if (!ASSERT_GE(sock_fd, 0, "socket"))
1126		goto out;
1127
1128	err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
1129	if (!ASSERT_OK(err, "map_update"))
1130		goto out;
1131
1132	memset(&linfo, 0, sizeof(linfo));
1133	linfo.map.map_fd = map_fd;
1134	opts.link_info = &linfo;
1135	opts.link_info_len = sizeof(linfo);
1136	link = bpf_program__attach_iter(skel->progs.delete_bpf_sk_storage_map,
1137					&opts);
1138	if (!ASSERT_OK_PTR(link, "attach_iter"))
1139		goto out;
1140
1141	iter_fd = bpf_iter_create(bpf_link__fd(link));
1142	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1143		goto free_link;
1144
1145	/* do some tests */
1146	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1147		;
1148	if (!ASSERT_GE(len, 0, "read"))
1149		goto close_iter;
1150
1151	/* test results */
1152	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1153
1154	 /* Note: The following assertions serve to ensure
1155	  * the value was deleted. It does so by asserting
1156	  * that bpf_map_lookup_elem has failed. This might
1157	  * seem counterintuitive at first.
1158	  */
1159	ASSERT_ERR(err, "bpf_map_lookup_elem");
1160	ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem");
1161
1162close_iter:
1163	close(iter_fd);
1164free_link:
1165	bpf_link__destroy(link);
1166out:
1167	if (sock_fd >= 0)
1168		close(sock_fd);
1169	bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1170}
1171
1172/* This creates a socket and its local storage. It then runs a task_iter BPF
1173 * program that replaces the existing socket local storage with the tgid of the
1174 * only task owning a file descriptor to this socket, this process, prog_tests.
1175 * It then runs a tcp socket iterator that negates the value in the existing
1176 * socket local storage, the test verifies that the resulting value is -pid.
1177 */
1178static void test_bpf_sk_storage_get(void)
1179{
1180	struct bpf_iter_bpf_sk_storage_helpers *skel;
1181	int err, map_fd, val = -1;
1182	int sock_fd = -1;
1183
1184	skel = bpf_iter_bpf_sk_storage_helpers__open_and_load();
1185	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_helpers__open_and_load"))
1186		return;
1187
1188	sock_fd = socket(AF_INET6, SOCK_STREAM, 0);
1189	if (!ASSERT_GE(sock_fd, 0, "socket"))
1190		goto out;
1191
1192	err = listen(sock_fd, 1);
1193	if (!ASSERT_OK(err, "listen"))
1194		goto close_socket;
1195
1196	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1197
1198	err = bpf_map_update_elem(map_fd, &sock_fd, &val, BPF_NOEXIST);
1199	if (!ASSERT_OK(err, "bpf_map_update_elem"))
1200		goto close_socket;
1201
1202	do_dummy_read(skel->progs.fill_socket_owner);
1203
1204	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1205	if (!ASSERT_OK(err, "bpf_map_lookup_elem") ||
1206			!ASSERT_EQ(val, getpid(), "bpf_map_lookup_elem"))
1207		goto close_socket;
1208
1209	do_dummy_read(skel->progs.negate_socket_local_storage);
1210
1211	err = bpf_map_lookup_elem(map_fd, &sock_fd, &val);
1212	ASSERT_OK(err, "bpf_map_lookup_elem");
1213	ASSERT_EQ(val, -getpid(), "bpf_map_lookup_elem");
1214
1215close_socket:
1216	close(sock_fd);
1217out:
1218	bpf_iter_bpf_sk_storage_helpers__destroy(skel);
1219}
1220
1221static void test_bpf_sk_stoarge_map_iter_fd(void)
1222{
1223	struct bpf_iter_bpf_sk_storage_map *skel;
1224
1225	skel = bpf_iter_bpf_sk_storage_map__open_and_load();
1226	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
1227		return;
1228
1229	do_read_map_iter_fd(&skel->skeleton, skel->progs.rw_bpf_sk_storage_map,
1230			    skel->maps.sk_stg_map);
1231
1232	bpf_iter_bpf_sk_storage_map__destroy(skel);
1233}
1234
1235static void test_bpf_sk_storage_map(void)
1236{
1237	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1238	int err, i, len, map_fd, iter_fd, num_sockets;
1239	struct bpf_iter_bpf_sk_storage_map *skel;
1240	union bpf_iter_link_info linfo;
1241	int sock_fd[3] = {-1, -1, -1};
1242	__u32 val, expected_val = 0;
1243	struct bpf_link *link;
1244	char buf[64];
1245
1246	skel = bpf_iter_bpf_sk_storage_map__open_and_load();
1247	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_sk_storage_map__open_and_load"))
1248		return;
1249
1250	map_fd = bpf_map__fd(skel->maps.sk_stg_map);
1251	num_sockets = ARRAY_SIZE(sock_fd);
1252	for (i = 0; i < num_sockets; i++) {
1253		sock_fd[i] = socket(AF_INET6, SOCK_STREAM, 0);
1254		if (!ASSERT_GE(sock_fd[i], 0, "socket"))
1255			goto out;
1256
1257		val = i + 1;
1258		expected_val += val;
1259
1260		err = bpf_map_update_elem(map_fd, &sock_fd[i], &val,
1261					  BPF_NOEXIST);
1262		if (!ASSERT_OK(err, "map_update"))
1263			goto out;
1264	}
1265
1266	memset(&linfo, 0, sizeof(linfo));
1267	linfo.map.map_fd = map_fd;
1268	opts.link_info = &linfo;
1269	opts.link_info_len = sizeof(linfo);
1270	link = bpf_program__attach_iter(skel->progs.oob_write_bpf_sk_storage_map, &opts);
1271	err = libbpf_get_error(link);
1272	if (!ASSERT_EQ(err, -EACCES, "attach_oob_write_iter")) {
1273		if (!err)
1274			bpf_link__destroy(link);
1275		goto out;
1276	}
1277
1278	link = bpf_program__attach_iter(skel->progs.rw_bpf_sk_storage_map, &opts);
1279	if (!ASSERT_OK_PTR(link, "attach_iter"))
1280		goto out;
1281
1282	iter_fd = bpf_iter_create(bpf_link__fd(link));
1283	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1284		goto free_link;
1285
1286	skel->bss->to_add_val = time(NULL);
1287	/* do some tests */
1288	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1289		;
1290	if (!ASSERT_GE(len, 0, "read"))
1291		goto close_iter;
1292
1293	/* test results */
1294	if (!ASSERT_EQ(skel->bss->ipv6_sk_count, num_sockets, "ipv6_sk_count"))
1295		goto close_iter;
1296
1297	if (!ASSERT_EQ(skel->bss->val_sum, expected_val, "val_sum"))
1298		goto close_iter;
1299
1300	for (i = 0; i < num_sockets; i++) {
1301		err = bpf_map_lookup_elem(map_fd, &sock_fd[i], &val);
1302		if (!ASSERT_OK(err, "map_lookup") ||
1303		    !ASSERT_EQ(val, i + 1 + skel->bss->to_add_val, "check_map_value"))
1304			break;
1305	}
1306
1307close_iter:
1308	close(iter_fd);
1309free_link:
1310	bpf_link__destroy(link);
1311out:
1312	for (i = 0; i < num_sockets; i++) {
1313		if (sock_fd[i] >= 0)
1314			close(sock_fd[i]);
1315	}
1316	bpf_iter_bpf_sk_storage_map__destroy(skel);
1317}
1318
1319static void test_rdonly_buf_out_of_bound(void)
1320{
1321	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1322	struct bpf_iter_test_kern5 *skel;
1323	union bpf_iter_link_info linfo;
1324	struct bpf_link *link;
1325
1326	skel = bpf_iter_test_kern5__open_and_load();
1327	if (!ASSERT_OK_PTR(skel, "bpf_iter_test_kern5__open_and_load"))
1328		return;
1329
1330	memset(&linfo, 0, sizeof(linfo));
1331	linfo.map.map_fd = bpf_map__fd(skel->maps.hashmap1);
1332	opts.link_info = &linfo;
1333	opts.link_info_len = sizeof(linfo);
1334	link = bpf_program__attach_iter(skel->progs.dump_bpf_hash_map, &opts);
1335	if (!ASSERT_ERR_PTR(link, "attach_iter"))
1336		bpf_link__destroy(link);
1337
1338	bpf_iter_test_kern5__destroy(skel);
1339}
1340
1341static void test_buf_neg_offset(void)
1342{
1343	struct bpf_iter_test_kern6 *skel;
1344
1345	skel = bpf_iter_test_kern6__open_and_load();
1346	if (!ASSERT_ERR_PTR(skel, "bpf_iter_test_kern6__open_and_load"))
1347		bpf_iter_test_kern6__destroy(skel);
1348}
1349
1350static void test_link_iter(void)
1351{
1352	struct bpf_iter_bpf_link *skel;
1353
1354	skel = bpf_iter_bpf_link__open_and_load();
1355	if (!ASSERT_OK_PTR(skel, "bpf_iter_bpf_link__open_and_load"))
1356		return;
1357
1358	do_dummy_read(skel->progs.dump_bpf_link);
1359
1360	bpf_iter_bpf_link__destroy(skel);
1361}
1362
1363static void test_ksym_iter(void)
1364{
1365	struct bpf_iter_ksym *skel;
1366
1367	skel = bpf_iter_ksym__open_and_load();
1368	if (!ASSERT_OK_PTR(skel, "bpf_iter_ksym__open_and_load"))
1369		return;
1370
1371	do_dummy_read(skel->progs.dump_ksym);
1372
1373	bpf_iter_ksym__destroy(skel);
1374}
1375
1376#define CMP_BUFFER_SIZE 1024
1377static char task_vma_output[CMP_BUFFER_SIZE];
1378static char proc_maps_output[CMP_BUFFER_SIZE];
1379
1380/* remove \0 and \t from str, and only keep the first line */
1381static void str_strip_first_line(char *str)
1382{
1383	char *dst = str, *src = str;
1384
1385	do {
1386		if (*src == ' ' || *src == '\t')
1387			src++;
1388		else
1389			*(dst++) = *(src++);
1390
1391	} while (*src != '\0' && *src != '\n');
1392
1393	*dst = '\0';
1394}
1395
1396static void test_task_vma_common(struct bpf_iter_attach_opts *opts)
1397{
1398	int err, iter_fd = -1, proc_maps_fd = -1;
1399	struct bpf_iter_task_vmas *skel;
1400	int len, read_size = 4;
1401	char maps_path[64];
1402
1403	skel = bpf_iter_task_vmas__open();
1404	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vmas__open"))
1405		return;
1406
1407	skel->bss->pid = getpid();
1408	skel->bss->one_task = opts ? 1 : 0;
1409
1410	err = bpf_iter_task_vmas__load(skel);
1411	if (!ASSERT_OK(err, "bpf_iter_task_vmas__load"))
1412		goto out;
1413
1414	skel->links.proc_maps = bpf_program__attach_iter(
1415		skel->progs.proc_maps, opts);
1416
1417	if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
1418		skel->links.proc_maps = NULL;
1419		goto out;
1420	}
1421
1422	iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
1423	if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1424		goto out;
1425
1426	/* Read CMP_BUFFER_SIZE (1kB) from bpf_iter. Read in small chunks
1427	 * to trigger seq_file corner cases.
1428	 */
1429	len = 0;
1430	while (len < CMP_BUFFER_SIZE) {
1431		err = read_fd_into_buffer(iter_fd, task_vma_output + len,
1432					  MIN(read_size, CMP_BUFFER_SIZE - len));
1433		if (!err)
1434			break;
1435		if (!ASSERT_GE(err, 0, "read_iter_fd"))
1436			goto out;
1437		len += err;
1438	}
1439	if (opts)
1440		ASSERT_EQ(skel->bss->one_task_error, 0, "unexpected task");
1441
1442	/* read CMP_BUFFER_SIZE (1kB) from /proc/pid/maps */
1443	snprintf(maps_path, 64, "/proc/%u/maps", skel->bss->pid);
1444	proc_maps_fd = open(maps_path, O_RDONLY);
1445	if (!ASSERT_GE(proc_maps_fd, 0, "open_proc_maps"))
1446		goto out;
1447	err = read_fd_into_buffer(proc_maps_fd, proc_maps_output, CMP_BUFFER_SIZE);
1448	if (!ASSERT_GE(err, 0, "read_prog_maps_fd"))
1449		goto out;
1450
1451	/* strip and compare the first line of the two files */
1452	str_strip_first_line(task_vma_output);
1453	str_strip_first_line(proc_maps_output);
1454
1455	ASSERT_STREQ(task_vma_output, proc_maps_output, "compare_output");
1456
1457	check_bpf_link_info(skel->progs.proc_maps);
1458
1459out:
1460	close(proc_maps_fd);
1461	close(iter_fd);
1462	bpf_iter_task_vmas__destroy(skel);
1463}
1464
1465static void test_task_vma_dead_task(void)
1466{
1467	struct bpf_iter_task_vmas *skel;
1468	int wstatus, child_pid = -1;
1469	time_t start_tm, cur_tm;
1470	int err, iter_fd = -1;
1471	int wait_sec = 3;
1472
1473	skel = bpf_iter_task_vmas__open();
1474	if (!ASSERT_OK_PTR(skel, "bpf_iter_task_vmas__open"))
1475		return;
1476
1477	skel->bss->pid = getpid();
1478
1479	err = bpf_iter_task_vmas__load(skel);
1480	if (!ASSERT_OK(err, "bpf_iter_task_vmas__load"))
1481		goto out;
1482
1483	skel->links.proc_maps = bpf_program__attach_iter(
1484		skel->progs.proc_maps, NULL);
1485
1486	if (!ASSERT_OK_PTR(skel->links.proc_maps, "bpf_program__attach_iter")) {
1487		skel->links.proc_maps = NULL;
1488		goto out;
1489	}
1490
1491	start_tm = time(NULL);
1492	cur_tm = start_tm;
1493
1494	child_pid = fork();
1495	if (child_pid == 0) {
1496		/* Fork short-lived processes in the background. */
1497		while (cur_tm < start_tm + wait_sec) {
1498			system("echo > /dev/null");
1499			cur_tm = time(NULL);
1500		}
1501		exit(0);
1502	}
1503
1504	if (!ASSERT_GE(child_pid, 0, "fork_child"))
1505		goto out;
1506
1507	while (cur_tm < start_tm + wait_sec) {
1508		iter_fd = bpf_iter_create(bpf_link__fd(skel->links.proc_maps));
1509		if (!ASSERT_GE(iter_fd, 0, "create_iter"))
1510			goto out;
1511
1512		/* Drain all data from iter_fd. */
1513		while (cur_tm < start_tm + wait_sec) {
1514			err = read_fd_into_buffer(iter_fd, task_vma_output, CMP_BUFFER_SIZE);
1515			if (!ASSERT_GE(err, 0, "read_iter_fd"))
1516				goto out;
1517
1518			cur_tm = time(NULL);
1519
1520			if (err == 0)
1521				break;
1522		}
1523
1524		close(iter_fd);
1525		iter_fd = -1;
1526	}
1527
1528	check_bpf_link_info(skel->progs.proc_maps);
1529
1530out:
1531	waitpid(child_pid, &wstatus, 0);
1532	close(iter_fd);
1533	bpf_iter_task_vmas__destroy(skel);
1534}
1535
1536void test_bpf_sockmap_map_iter_fd(void)
1537{
1538	struct bpf_iter_sockmap *skel;
1539
1540	skel = bpf_iter_sockmap__open_and_load();
1541	if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load"))
1542		return;
1543
1544	do_read_map_iter_fd(&skel->skeleton, skel->progs.copy, skel->maps.sockmap);
1545
1546	bpf_iter_sockmap__destroy(skel);
1547}
1548
1549static void test_task_vma(void)
1550{
1551	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1552	union bpf_iter_link_info linfo;
1553
1554	memset(&linfo, 0, sizeof(linfo));
1555	linfo.task.tid = getpid();
1556	opts.link_info = &linfo;
1557	opts.link_info_len = sizeof(linfo);
1558
1559	test_task_vma_common(&opts);
1560	test_task_vma_common(NULL);
1561}
1562
1563/* uprobe attach point */
1564static noinline int trigger_func(int arg)
1565{
1566	asm volatile ("");
1567	return arg + 1;
1568}
1569
1570static void test_task_vma_offset_common(struct bpf_iter_attach_opts *opts, bool one_proc)
1571{
1572	struct bpf_iter_vma_offset *skel;
1573	char buf[16] = {};
1574	int iter_fd, len;
1575	int pgsz, shift;
1576
1577	skel = bpf_iter_vma_offset__open_and_load();
1578	if (!ASSERT_OK_PTR(skel, "bpf_iter_vma_offset__open_and_load"))
1579		return;
1580
1581	skel->bss->pid = getpid();
1582	skel->bss->address = (uintptr_t)trigger_func;
1583	for (pgsz = getpagesize(), shift = 0; pgsz > 1; pgsz >>= 1, shift++)
1584		;
1585	skel->bss->page_shift = shift;
1586
1587	skel->links.get_vma_offset = bpf_program__attach_iter(skel->progs.get_vma_offset, opts);
1588	if (!ASSERT_OK_PTR(skel->links.get_vma_offset, "attach_iter"))
1589		goto exit;
1590
1591	iter_fd = bpf_iter_create(bpf_link__fd(skel->links.get_vma_offset));
1592	if (!ASSERT_GT(iter_fd, 0, "create_iter"))
1593		goto exit;
1594
1595	while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
1596		;
1597	buf[15] = 0;
1598	ASSERT_EQ(strcmp(buf, "OK\n"), 0, "strcmp");
1599
1600	ASSERT_EQ(skel->bss->offset, get_uprobe_offset(trigger_func), "offset");
1601	if (one_proc)
1602		ASSERT_EQ(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
1603	else
1604		ASSERT_GT(skel->bss->unique_tgid_cnt, 1, "unique_tgid_count");
1605
1606	close(iter_fd);
1607
1608exit:
1609	bpf_iter_vma_offset__destroy(skel);
1610}
1611
1612static void test_task_vma_offset(void)
1613{
1614	LIBBPF_OPTS(bpf_iter_attach_opts, opts);
1615	union bpf_iter_link_info linfo;
1616
1617	memset(&linfo, 0, sizeof(linfo));
1618	linfo.task.pid = getpid();
1619	opts.link_info = &linfo;
1620	opts.link_info_len = sizeof(linfo);
1621
1622	test_task_vma_offset_common(&opts, true);
1623
1624	linfo.task.pid = 0;
1625	linfo.task.tid = getpid();
1626	test_task_vma_offset_common(&opts, true);
1627
1628	test_task_vma_offset_common(NULL, false);
1629}
1630
1631void test_bpf_iter(void)
1632{
1633	ASSERT_OK(pthread_mutex_init(&do_nothing_mutex, NULL), "pthread_mutex_init");
1634
1635	if (test__start_subtest("btf_id_or_null"))
1636		test_btf_id_or_null();
1637	if (test__start_subtest("ipv6_route"))
1638		test_ipv6_route();
1639	if (test__start_subtest("netlink"))
1640		test_netlink();
1641	if (test__start_subtest("bpf_map"))
1642		test_bpf_map();
1643	if (test__start_subtest("task_tid"))
1644		test_task_tid();
1645	if (test__start_subtest("task_pid"))
1646		test_task_pid();
1647	if (test__start_subtest("task_pidfd"))
1648		test_task_pidfd();
1649	if (test__start_subtest("task_sleepable"))
1650		test_task_sleepable();
1651	if (test__start_subtest("task_stack"))
1652		test_task_stack();
1653	if (test__start_subtest("task_file"))
1654		test_task_file();
1655	if (test__start_subtest("task_vma"))
1656		test_task_vma();
1657	if (test__start_subtest("task_vma_dead_task"))
1658		test_task_vma_dead_task();
1659	if (test__start_subtest("task_btf"))
1660		test_task_btf();
1661	if (test__start_subtest("tcp4"))
1662		test_tcp4();
1663	if (test__start_subtest("tcp6"))
1664		test_tcp6();
1665	if (test__start_subtest("udp4"))
1666		test_udp4();
1667	if (test__start_subtest("udp6"))
1668		test_udp6();
1669	if (test__start_subtest("unix"))
1670		test_unix();
1671	if (test__start_subtest("anon"))
1672		test_anon_iter(false);
1673	if (test__start_subtest("anon-read-one-char"))
1674		test_anon_iter(true);
1675	if (test__start_subtest("file"))
1676		test_file_iter();
1677	if (test__start_subtest("overflow"))
1678		test_overflow(false, false);
1679	if (test__start_subtest("overflow-e2big"))
1680		test_overflow(true, false);
1681	if (test__start_subtest("prog-ret-1"))
1682		test_overflow(false, true);
1683	if (test__start_subtest("bpf_hash_map"))
1684		test_bpf_hash_map();
1685	if (test__start_subtest("bpf_percpu_hash_map"))
1686		test_bpf_percpu_hash_map();
1687	if (test__start_subtest("bpf_array_map"))
1688		test_bpf_array_map();
1689	if (test__start_subtest("bpf_array_map_iter_fd"))
1690		test_bpf_array_map_iter_fd();
1691	if (test__start_subtest("bpf_percpu_array_map"))
1692		test_bpf_percpu_array_map();
1693	if (test__start_subtest("bpf_sk_storage_map"))
1694		test_bpf_sk_storage_map();
1695	if (test__start_subtest("bpf_sk_storage_map_iter_fd"))
1696		test_bpf_sk_stoarge_map_iter_fd();
1697	if (test__start_subtest("bpf_sk_storage_delete"))
1698		test_bpf_sk_storage_delete();
1699	if (test__start_subtest("bpf_sk_storage_get"))
1700		test_bpf_sk_storage_get();
1701	if (test__start_subtest("rdonly-buf-out-of-bound"))
1702		test_rdonly_buf_out_of_bound();
1703	if (test__start_subtest("buf-neg-offset"))
1704		test_buf_neg_offset();
1705	if (test__start_subtest("link-iter"))
1706		test_link_iter();
1707	if (test__start_subtest("ksym"))
1708		test_ksym_iter();
1709	if (test__start_subtest("bpf_sockmap_map_iter_fd"))
1710		test_bpf_sockmap_map_iter_fd();
1711	if (test__start_subtest("vma_offset"))
1712		test_task_vma_offset();
1713}
1714