1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates.*/
3
4#define _GNU_SOURCE
5#include <unistd.h>
6#include <sys/syscall.h>
7#include <sys/types.h>
8#include <test_progs.h>
9#include "cgrp_ls_tp_btf.skel.h"
10#include "cgrp_ls_recursion.skel.h"
11#include "cgrp_ls_attach_cgroup.skel.h"
12#include "cgrp_ls_negative.skel.h"
13#include "cgrp_ls_sleepable.skel.h"
14#include "network_helpers.h"
15#include "cgroup_helpers.h"
16
17struct socket_cookie {
18	__u64 cookie_key;
19	__u64 cookie_value;
20};
21
22static bool is_cgroup1;
23static int target_hid;
24
25#define CGROUP_MODE_SET(skel)			\
26{						\
27	skel->bss->is_cgroup1 = is_cgroup1;	\
28	skel->bss->target_hid = target_hid;	\
29}
30
31static void cgroup_mode_value_init(bool cgroup, int hid)
32{
33	is_cgroup1 = cgroup;
34	target_hid = hid;
35}
36
37static void test_tp_btf(int cgroup_fd)
38{
39	struct cgrp_ls_tp_btf *skel;
40	long val1 = 1, val2 = 0;
41	int err;
42
43	skel = cgrp_ls_tp_btf__open_and_load();
44	if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
45		return;
46
47	CGROUP_MODE_SET(skel);
48
49	/* populate a value in map_b */
50	err = bpf_map_update_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val1, BPF_ANY);
51	if (!ASSERT_OK(err, "map_update_elem"))
52		goto out;
53
54	/* check value */
55	err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val2);
56	if (!ASSERT_OK(err, "map_lookup_elem"))
57		goto out;
58	if (!ASSERT_EQ(val2, 1, "map_lookup_elem, invalid val"))
59		goto out;
60
61	/* delete value */
62	err = bpf_map_delete_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd);
63	if (!ASSERT_OK(err, "map_delete_elem"))
64		goto out;
65
66	skel->bss->target_pid = syscall(SYS_gettid);
67
68	err = cgrp_ls_tp_btf__attach(skel);
69	if (!ASSERT_OK(err, "skel_attach"))
70		goto out;
71
72	syscall(SYS_gettid);
73	syscall(SYS_gettid);
74
75	skel->bss->target_pid = 0;
76
77	/* 3x syscalls: 1x attach and 2x gettid */
78	ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt");
79	ASSERT_EQ(skel->bss->exit_cnt, 3, "exit_cnt");
80	ASSERT_EQ(skel->bss->mismatch_cnt, 0, "mismatch_cnt");
81out:
82	cgrp_ls_tp_btf__destroy(skel);
83}
84
85static void test_attach_cgroup(int cgroup_fd)
86{
87	int server_fd = 0, client_fd = 0, err = 0;
88	socklen_t addr_len = sizeof(struct sockaddr_in6);
89	struct cgrp_ls_attach_cgroup *skel;
90	__u32 cookie_expected_value;
91	struct sockaddr_in6 addr;
92	struct socket_cookie val;
93
94	skel = cgrp_ls_attach_cgroup__open_and_load();
95	if (!ASSERT_OK_PTR(skel, "skel_open"))
96		return;
97
98	skel->links.set_cookie = bpf_program__attach_cgroup(
99		skel->progs.set_cookie, cgroup_fd);
100	if (!ASSERT_OK_PTR(skel->links.set_cookie, "prog_attach"))
101		goto out;
102
103	skel->links.update_cookie_sockops = bpf_program__attach_cgroup(
104		skel->progs.update_cookie_sockops, cgroup_fd);
105	if (!ASSERT_OK_PTR(skel->links.update_cookie_sockops, "prog_attach"))
106		goto out;
107
108	skel->links.update_cookie_tracing = bpf_program__attach(
109		skel->progs.update_cookie_tracing);
110	if (!ASSERT_OK_PTR(skel->links.update_cookie_tracing, "prog_attach"))
111		goto out;
112
113	server_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
114	if (!ASSERT_GE(server_fd, 0, "start_server"))
115		goto out;
116
117	client_fd = connect_to_fd(server_fd, 0);
118	if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
119		goto close_server_fd;
120
121	err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.socket_cookies),
122				  &cgroup_fd, &val);
123	if (!ASSERT_OK(err, "map_lookup(socket_cookies)"))
124		goto close_client_fd;
125
126	err = getsockname(client_fd, (struct sockaddr *)&addr, &addr_len);
127	if (!ASSERT_OK(err, "getsockname"))
128		goto close_client_fd;
129
130	cookie_expected_value = (ntohs(addr.sin6_port) << 8) | 0xFF;
131	ASSERT_EQ(val.cookie_value, cookie_expected_value, "cookie_value");
132
133close_client_fd:
134	close(client_fd);
135close_server_fd:
136	close(server_fd);
137out:
138	cgrp_ls_attach_cgroup__destroy(skel);
139}
140
141static void test_recursion(int cgroup_fd)
142{
143	struct cgrp_ls_recursion *skel;
144	int err;
145
146	skel = cgrp_ls_recursion__open_and_load();
147	if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
148		return;
149
150	CGROUP_MODE_SET(skel);
151
152	err = cgrp_ls_recursion__attach(skel);
153	if (!ASSERT_OK(err, "skel_attach"))
154		goto out;
155
156	/* trigger sys_enter, make sure it does not cause deadlock */
157	syscall(SYS_gettid);
158
159out:
160	cgrp_ls_recursion__destroy(skel);
161}
162
163static void test_negative(void)
164{
165	struct cgrp_ls_negative *skel;
166
167	skel = cgrp_ls_negative__open_and_load();
168	if (!ASSERT_ERR_PTR(skel, "skel_open_and_load")) {
169		cgrp_ls_negative__destroy(skel);
170		return;
171	}
172}
173
174static void test_cgroup_iter_sleepable(int cgroup_fd, __u64 cgroup_id)
175{
176	DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
177	union bpf_iter_link_info linfo;
178	struct cgrp_ls_sleepable *skel;
179	struct bpf_link *link;
180	int err, iter_fd;
181	char buf[16];
182
183	skel = cgrp_ls_sleepable__open();
184	if (!ASSERT_OK_PTR(skel, "skel_open"))
185		return;
186
187	CGROUP_MODE_SET(skel);
188
189	bpf_program__set_autoload(skel->progs.cgroup_iter, true);
190	err = cgrp_ls_sleepable__load(skel);
191	if (!ASSERT_OK(err, "skel_load"))
192		goto out;
193
194	memset(&linfo, 0, sizeof(linfo));
195	linfo.cgroup.cgroup_fd = cgroup_fd;
196	linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY;
197	opts.link_info = &linfo;
198	opts.link_info_len = sizeof(linfo);
199	link = bpf_program__attach_iter(skel->progs.cgroup_iter, &opts);
200	if (!ASSERT_OK_PTR(link, "attach_iter"))
201		goto out;
202
203	iter_fd = bpf_iter_create(bpf_link__fd(link));
204	if (!ASSERT_GE(iter_fd, 0, "iter_create"))
205		goto out;
206
207	/* trigger the program run */
208	(void)read(iter_fd, buf, sizeof(buf));
209
210	ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id");
211
212	close(iter_fd);
213out:
214	cgrp_ls_sleepable__destroy(skel);
215}
216
217static void test_yes_rcu_lock(__u64 cgroup_id)
218{
219	struct cgrp_ls_sleepable *skel;
220	int err;
221
222	skel = cgrp_ls_sleepable__open();
223	if (!ASSERT_OK_PTR(skel, "skel_open"))
224		return;
225
226	CGROUP_MODE_SET(skel);
227	skel->bss->target_pid = syscall(SYS_gettid);
228
229	bpf_program__set_autoload(skel->progs.yes_rcu_lock, true);
230	err = cgrp_ls_sleepable__load(skel);
231	if (!ASSERT_OK(err, "skel_load"))
232		goto out;
233
234	err = cgrp_ls_sleepable__attach(skel);
235	if (!ASSERT_OK(err, "skel_attach"))
236		goto out;
237
238	syscall(SYS_getpgid);
239
240	ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id");
241out:
242	cgrp_ls_sleepable__destroy(skel);
243}
244
245static void test_no_rcu_lock(void)
246{
247	struct cgrp_ls_sleepable *skel;
248	int err;
249
250	skel = cgrp_ls_sleepable__open();
251	if (!ASSERT_OK_PTR(skel, "skel_open"))
252		return;
253
254	CGROUP_MODE_SET(skel);
255
256	bpf_program__set_autoload(skel->progs.no_rcu_lock, true);
257	err = cgrp_ls_sleepable__load(skel);
258	ASSERT_ERR(err, "skel_load");
259
260	cgrp_ls_sleepable__destroy(skel);
261}
262
263static void test_cgrp1_no_rcu_lock(void)
264{
265	struct cgrp_ls_sleepable *skel;
266	int err;
267
268	skel = cgrp_ls_sleepable__open();
269	if (!ASSERT_OK_PTR(skel, "skel_open"))
270		return;
271
272	CGROUP_MODE_SET(skel);
273
274	bpf_program__set_autoload(skel->progs.cgrp1_no_rcu_lock, true);
275	err = cgrp_ls_sleepable__load(skel);
276	ASSERT_OK(err, "skel_load");
277
278	cgrp_ls_sleepable__destroy(skel);
279}
280
281static void cgrp2_local_storage(void)
282{
283	__u64 cgroup_id;
284	int cgroup_fd;
285
286	cgroup_fd = test__join_cgroup("/cgrp_local_storage");
287	if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /cgrp_local_storage"))
288		return;
289
290	cgroup_mode_value_init(0, -1);
291
292	cgroup_id = get_cgroup_id("/cgrp_local_storage");
293	if (test__start_subtest("tp_btf"))
294		test_tp_btf(cgroup_fd);
295	if (test__start_subtest("attach_cgroup"))
296		test_attach_cgroup(cgroup_fd);
297	if (test__start_subtest("recursion"))
298		test_recursion(cgroup_fd);
299	if (test__start_subtest("negative"))
300		test_negative();
301	if (test__start_subtest("cgroup_iter_sleepable"))
302		test_cgroup_iter_sleepable(cgroup_fd, cgroup_id);
303	if (test__start_subtest("yes_rcu_lock"))
304		test_yes_rcu_lock(cgroup_id);
305	if (test__start_subtest("no_rcu_lock"))
306		test_no_rcu_lock();
307
308	close(cgroup_fd);
309}
310
311static void cgrp1_local_storage(void)
312{
313	int cgrp1_fd, cgrp1_hid, cgrp1_id, err;
314
315	/* Setup cgroup1 hierarchy */
316	err = setup_classid_environment();
317	if (!ASSERT_OK(err, "setup_classid_environment"))
318		return;
319
320	err = join_classid();
321	if (!ASSERT_OK(err, "join_cgroup1"))
322		goto cleanup;
323
324	cgrp1_fd = open_classid();
325	if (!ASSERT_GE(cgrp1_fd, 0, "cgroup1 fd"))
326		goto cleanup;
327
328	cgrp1_id = get_classid_cgroup_id();
329	if (!ASSERT_GE(cgrp1_id, 0, "cgroup1 id"))
330		goto close_fd;
331
332	cgrp1_hid = get_cgroup1_hierarchy_id("net_cls");
333	if (!ASSERT_GE(cgrp1_hid, 0, "cgroup1 hid"))
334		goto close_fd;
335
336	cgroup_mode_value_init(1, cgrp1_hid);
337
338	if (test__start_subtest("cgrp1_tp_btf"))
339		test_tp_btf(cgrp1_fd);
340	if (test__start_subtest("cgrp1_recursion"))
341		test_recursion(cgrp1_fd);
342	if (test__start_subtest("cgrp1_negative"))
343		test_negative();
344	if (test__start_subtest("cgrp1_iter_sleepable"))
345		test_cgroup_iter_sleepable(cgrp1_fd, cgrp1_id);
346	if (test__start_subtest("cgrp1_yes_rcu_lock"))
347		test_yes_rcu_lock(cgrp1_id);
348	if (test__start_subtest("cgrp1_no_rcu_lock"))
349		test_cgrp1_no_rcu_lock();
350
351close_fd:
352	close(cgrp1_fd);
353cleanup:
354	cleanup_classid_environment();
355}
356
357void test_cgrp_local_storage(void)
358{
359	cgrp2_local_storage();
360	cgrp1_local_storage();
361}
362