1// SPDX-License-Identifier: GPL-2.0
2
3#include <sys/types.h>
4#include <sys/socket.h>
5#include <test_progs.h>
6#include <bpf/btf.h>
7
8#include "lsm_cgroup.skel.h"
9#include "lsm_cgroup_nonvoid.skel.h"
10#include "cgroup_helpers.h"
11#include "network_helpers.h"
12
13#ifndef ENOTSUPP
14#define ENOTSUPP 524
15#endif
16
17static struct btf *btf;
18
19static __u32 query_prog_cnt(int cgroup_fd, const char *attach_func)
20{
21	LIBBPF_OPTS(bpf_prog_query_opts, p);
22	int cnt = 0;
23	int i;
24
25	ASSERT_OK(bpf_prog_query_opts(cgroup_fd, BPF_LSM_CGROUP, &p), "prog_query");
26
27	if (!attach_func)
28		return p.prog_cnt;
29
30	/* When attach_func is provided, count the number of progs that
31	 * attach to the given symbol.
32	 */
33
34	if (!btf)
35		btf = btf__load_vmlinux_btf();
36	if (!ASSERT_OK(libbpf_get_error(btf), "btf_vmlinux"))
37		return -1;
38
39	p.prog_ids = malloc(sizeof(u32) * p.prog_cnt);
40	p.prog_attach_flags = malloc(sizeof(u32) * p.prog_cnt);
41	ASSERT_OK(bpf_prog_query_opts(cgroup_fd, BPF_LSM_CGROUP, &p), "prog_query");
42
43	for (i = 0; i < p.prog_cnt; i++) {
44		struct bpf_prog_info info = {};
45		__u32 info_len = sizeof(info);
46		int fd;
47
48		fd = bpf_prog_get_fd_by_id(p.prog_ids[i]);
49		ASSERT_GE(fd, 0, "prog_get_fd_by_id");
50		ASSERT_OK(bpf_prog_get_info_by_fd(fd, &info, &info_len),
51			  "prog_info_by_fd");
52		close(fd);
53
54		if (info.attach_btf_id ==
55		    btf__find_by_name_kind(btf, attach_func, BTF_KIND_FUNC))
56			cnt++;
57	}
58
59	free(p.prog_ids);
60	free(p.prog_attach_flags);
61
62	return cnt;
63}
64
65static void test_lsm_cgroup_functional(void)
66{
67	DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, attach_opts);
68	DECLARE_LIBBPF_OPTS(bpf_link_update_opts, update_opts);
69	int cgroup_fd = -1, cgroup_fd2 = -1, cgroup_fd3 = -1;
70	int listen_fd, client_fd, accepted_fd;
71	struct lsm_cgroup *skel = NULL;
72	int post_create_prog_fd2 = -1;
73	int post_create_prog_fd = -1;
74	int bind_link_fd2 = -1;
75	int bind_prog_fd2 = -1;
76	int alloc_prog_fd = -1;
77	int bind_prog_fd = -1;
78	int bind_link_fd = -1;
79	int clone_prog_fd = -1;
80	int err, fd, prio;
81	socklen_t socklen;
82
83	cgroup_fd3 = test__join_cgroup("/sock_policy_empty");
84	if (!ASSERT_GE(cgroup_fd3, 0, "create empty cgroup"))
85		goto close_cgroup;
86
87	cgroup_fd2 = test__join_cgroup("/sock_policy_reuse");
88	if (!ASSERT_GE(cgroup_fd2, 0, "create cgroup for reuse"))
89		goto close_cgroup;
90
91	cgroup_fd = test__join_cgroup("/sock_policy");
92	if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup"))
93		goto close_cgroup;
94
95	skel = lsm_cgroup__open_and_load();
96	if (!ASSERT_OK_PTR(skel, "open_and_load"))
97		goto close_cgroup;
98
99	post_create_prog_fd = bpf_program__fd(skel->progs.socket_post_create);
100	post_create_prog_fd2 = bpf_program__fd(skel->progs.socket_post_create2);
101	bind_prog_fd = bpf_program__fd(skel->progs.socket_bind);
102	bind_prog_fd2 = bpf_program__fd(skel->progs.socket_bind2);
103	alloc_prog_fd = bpf_program__fd(skel->progs.socket_alloc);
104	clone_prog_fd = bpf_program__fd(skel->progs.socket_clone);
105
106	ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_sk_alloc_security"), 0, "prog count");
107	ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 0, "total prog count");
108	err = bpf_prog_attach(alloc_prog_fd, cgroup_fd, BPF_LSM_CGROUP, 0);
109	if (err == -ENOTSUPP) {
110		test__skip();
111		goto close_cgroup;
112	}
113	if (!ASSERT_OK(err, "attach alloc_prog_fd"))
114		goto detach_cgroup;
115	ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_sk_alloc_security"), 1, "prog count");
116	ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 1, "total prog count");
117
118	ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_inet_csk_clone"), 0, "prog count");
119	err = bpf_prog_attach(clone_prog_fd, cgroup_fd, BPF_LSM_CGROUP, 0);
120	if (!ASSERT_OK(err, "attach clone_prog_fd"))
121		goto detach_cgroup;
122	ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_inet_csk_clone"), 1, "prog count");
123	ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 2, "total prog count");
124
125	/* Make sure replacing works. */
126
127	ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_post_create"), 0, "prog count");
128	err = bpf_prog_attach(post_create_prog_fd, cgroup_fd,
129			      BPF_LSM_CGROUP, 0);
130	if (!ASSERT_OK(err, "attach post_create_prog_fd"))
131		goto detach_cgroup;
132	ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_post_create"), 1, "prog count");
133	ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 3, "total prog count");
134
135	attach_opts.replace_prog_fd = post_create_prog_fd;
136	err = bpf_prog_attach_opts(post_create_prog_fd2, cgroup_fd,
137				   BPF_LSM_CGROUP, &attach_opts);
138	if (!ASSERT_OK(err, "prog replace post_create_prog_fd"))
139		goto detach_cgroup;
140	ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_post_create"), 1, "prog count");
141	ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 3, "total prog count");
142
143	/* Try the same attach/replace via link API. */
144
145	ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 0, "prog count");
146	bind_link_fd = bpf_link_create(bind_prog_fd, cgroup_fd,
147				       BPF_LSM_CGROUP, NULL);
148	if (!ASSERT_GE(bind_link_fd, 0, "link create bind_prog_fd"))
149		goto detach_cgroup;
150	ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 1, "prog count");
151	ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count");
152
153	update_opts.old_prog_fd = bind_prog_fd;
154	update_opts.flags = BPF_F_REPLACE;
155
156	err = bpf_link_update(bind_link_fd, bind_prog_fd2, &update_opts);
157	if (!ASSERT_OK(err, "link update bind_prog_fd"))
158		goto detach_cgroup;
159	ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 1, "prog count");
160	ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count");
161
162	/* Attach another instance of bind program to another cgroup.
163	 * This should trigger the reuse of the trampoline shim (two
164	 * programs attaching to the same btf_id).
165	 */
166
167	ASSERT_EQ(query_prog_cnt(cgroup_fd, "bpf_lsm_socket_bind"), 1, "prog count");
168	ASSERT_EQ(query_prog_cnt(cgroup_fd2, "bpf_lsm_socket_bind"), 0, "prog count");
169	bind_link_fd2 = bpf_link_create(bind_prog_fd2, cgroup_fd2,
170					BPF_LSM_CGROUP, NULL);
171	if (!ASSERT_GE(bind_link_fd2, 0, "link create bind_prog_fd2"))
172		goto detach_cgroup;
173	ASSERT_EQ(query_prog_cnt(cgroup_fd2, "bpf_lsm_socket_bind"), 1, "prog count");
174	ASSERT_EQ(query_prog_cnt(cgroup_fd, NULL), 4, "total prog count");
175	ASSERT_EQ(query_prog_cnt(cgroup_fd2, NULL), 1, "total prog count");
176
177	fd = socket(AF_UNIX, SOCK_STREAM, 0);
178	if (!(skel->kconfig->CONFIG_SECURITY_APPARMOR
179	    || skel->kconfig->CONFIG_SECURITY_SELINUX
180	    || skel->kconfig->CONFIG_SECURITY_SMACK))
181		/* AF_UNIX is prohibited. */
182		ASSERT_LT(fd, 0, "socket(AF_UNIX)");
183	close(fd);
184
185	/* AF_INET6 gets default policy (sk_priority). */
186
187	fd = socket(AF_INET6, SOCK_STREAM, 0);
188	if (!ASSERT_GE(fd, 0, "socket(SOCK_STREAM)"))
189		goto detach_cgroup;
190
191	prio = 0;
192	socklen = sizeof(prio);
193	ASSERT_GE(getsockopt(fd, SOL_SOCKET, SO_PRIORITY, &prio, &socklen), 0,
194		  "getsockopt");
195	ASSERT_EQ(prio, 123, "sk_priority");
196
197	close(fd);
198
199	/* TX-only AF_PACKET is allowed. */
200
201	ASSERT_LT(socket(AF_PACKET, SOCK_RAW, htons(ETH_P_ALL)), 0,
202		  "socket(AF_PACKET, ..., ETH_P_ALL)");
203
204	fd = socket(AF_PACKET, SOCK_RAW, 0);
205	ASSERT_GE(fd, 0, "socket(AF_PACKET, ..., 0)");
206
207	/* TX-only AF_PACKET can not be rebound. */
208
209	struct sockaddr_ll sa = {
210		.sll_family = AF_PACKET,
211		.sll_protocol = htons(ETH_P_ALL),
212	};
213	ASSERT_LT(bind(fd, (struct sockaddr *)&sa, sizeof(sa)), 0,
214		  "bind(ETH_P_ALL)");
215
216	close(fd);
217
218	/* Trigger passive open. */
219
220	listen_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
221	ASSERT_GE(listen_fd, 0, "start_server");
222	client_fd = connect_to_fd(listen_fd, 0);
223	ASSERT_GE(client_fd, 0, "connect_to_fd");
224	accepted_fd = accept(listen_fd, NULL, NULL);
225	ASSERT_GE(accepted_fd, 0, "accept");
226
227	prio = 0;
228	socklen = sizeof(prio);
229	ASSERT_GE(getsockopt(accepted_fd, SOL_SOCKET, SO_PRIORITY, &prio, &socklen), 0,
230		  "getsockopt");
231	ASSERT_EQ(prio, 234, "sk_priority");
232
233	/* These are replaced and never called. */
234	ASSERT_EQ(skel->bss->called_socket_post_create, 0, "called_create");
235	ASSERT_EQ(skel->bss->called_socket_bind, 0, "called_bind");
236
237	/* AF_INET6+SOCK_STREAM
238	 * AF_PACKET+SOCK_RAW
239	 * AF_UNIX+SOCK_RAW if already have non-bpf lsms installed
240	 * listen_fd
241	 * client_fd
242	 * accepted_fd
243	 */
244	if (skel->kconfig->CONFIG_SECURITY_APPARMOR
245	    || skel->kconfig->CONFIG_SECURITY_SELINUX
246	    || skel->kconfig->CONFIG_SECURITY_SMACK)
247		/* AF_UNIX+SOCK_RAW if already have non-bpf lsms installed */
248		ASSERT_EQ(skel->bss->called_socket_post_create2, 6, "called_create2");
249	else
250		ASSERT_EQ(skel->bss->called_socket_post_create2, 5, "called_create2");
251
252	/* start_server
253	 * bind(ETH_P_ALL)
254	 */
255	ASSERT_EQ(skel->bss->called_socket_bind2, 2, "called_bind2");
256	/* Single accept(). */
257	ASSERT_EQ(skel->bss->called_socket_clone, 1, "called_clone");
258
259	/* AF_UNIX+SOCK_STREAM (failed)
260	 * AF_INET6+SOCK_STREAM
261	 * AF_PACKET+SOCK_RAW (failed)
262	 * AF_PACKET+SOCK_RAW
263	 * listen_fd
264	 * client_fd
265	 * accepted_fd
266	 */
267	ASSERT_EQ(skel->bss->called_socket_alloc, 7, "called_alloc");
268
269	close(listen_fd);
270	close(client_fd);
271	close(accepted_fd);
272
273	/* Make sure other cgroup doesn't trigger the programs. */
274
275	if (!ASSERT_OK(join_cgroup("/sock_policy_empty"), "join root cgroup"))
276		goto detach_cgroup;
277
278	fd = socket(AF_INET6, SOCK_STREAM, 0);
279	if (!ASSERT_GE(fd, 0, "socket(SOCK_STREAM)"))
280		goto detach_cgroup;
281
282	prio = 0;
283	socklen = sizeof(prio);
284	ASSERT_GE(getsockopt(fd, SOL_SOCKET, SO_PRIORITY, &prio, &socklen), 0,
285		  "getsockopt");
286	ASSERT_EQ(prio, 0, "sk_priority");
287
288	close(fd);
289
290detach_cgroup:
291	ASSERT_GE(bpf_prog_detach2(post_create_prog_fd2, cgroup_fd,
292				   BPF_LSM_CGROUP), 0, "detach_create");
293	close(bind_link_fd);
294	/* Don't close bind_link_fd2, exercise cgroup release cleanup. */
295	ASSERT_GE(bpf_prog_detach2(alloc_prog_fd, cgroup_fd,
296				   BPF_LSM_CGROUP), 0, "detach_alloc");
297	ASSERT_GE(bpf_prog_detach2(clone_prog_fd, cgroup_fd,
298				   BPF_LSM_CGROUP), 0, "detach_clone");
299
300close_cgroup:
301	close(cgroup_fd);
302	close(cgroup_fd2);
303	close(cgroup_fd3);
304	lsm_cgroup__destroy(skel);
305}
306
307static void test_lsm_cgroup_nonvoid(void)
308{
309	struct lsm_cgroup_nonvoid *skel = NULL;
310
311	skel = lsm_cgroup_nonvoid__open_and_load();
312	ASSERT_NULL(skel, "open succeeds");
313	lsm_cgroup_nonvoid__destroy(skel);
314}
315
316void test_lsm_cgroup(void)
317{
318	if (test__start_subtest("functional"))
319		test_lsm_cgroup_functional();
320	if (test__start_subtest("nonvoid"))
321		test_lsm_cgroup_nonvoid();
322	btf__free(btf);
323}
324