1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2022, Oracle and/or its affiliates. */
3
4#include <test_progs.h>
5#include <bpf/btf.h>
6
7#include "test_unpriv_bpf_disabled.skel.h"
8
9#include "cap_helpers.h"
10
11/* Using CAP_LAST_CAP is risky here, since it can get pulled in from
12 * an old /usr/include/linux/capability.h and be < CAP_BPF; as a result
13 * CAP_BPF would not be included in ALL_CAPS.  Instead use CAP_BPF as
14 * we know its value is correct since it is explicitly defined in
15 * cap_helpers.h.
16 */
17#define ALL_CAPS	((2ULL << CAP_BPF) - 1)
18
19#define PINPATH		"/sys/fs/bpf/unpriv_bpf_disabled_"
20#define NUM_MAPS	7
21
22static __u32 got_perfbuf_val;
23static __u32 got_ringbuf_val;
24
25static int process_ringbuf(void *ctx, void *data, size_t len)
26{
27	if (ASSERT_EQ(len, sizeof(__u32), "ringbuf_size_valid"))
28		got_ringbuf_val = *(__u32 *)data;
29	return 0;
30}
31
32static void process_perfbuf(void *ctx, int cpu, void *data, __u32 len)
33{
34	if (ASSERT_EQ(len, sizeof(__u32), "perfbuf_size_valid"))
35		got_perfbuf_val = *(__u32 *)data;
36}
37
38static int sysctl_set(const char *sysctl_path, char *old_val, const char *new_val)
39{
40	int ret = 0;
41	FILE *fp;
42
43	fp = fopen(sysctl_path, "r+");
44	if (!fp)
45		return -errno;
46	if (old_val && fscanf(fp, "%s", old_val) <= 0) {
47		ret = -ENOENT;
48	} else if (!old_val || strcmp(old_val, new_val) != 0) {
49		fseek(fp, 0, SEEK_SET);
50		if (fprintf(fp, "%s", new_val) < 0)
51			ret = -errno;
52	}
53	fclose(fp);
54
55	return ret;
56}
57
58static void test_unpriv_bpf_disabled_positive(struct test_unpriv_bpf_disabled *skel,
59					      __u32 prog_id, int prog_fd, int perf_fd,
60					      char **map_paths, int *map_fds)
61{
62	struct perf_buffer *perfbuf = NULL;
63	struct ring_buffer *ringbuf = NULL;
64	int i, nr_cpus, link_fd = -1;
65
66	nr_cpus = bpf_num_possible_cpus();
67
68	skel->bss->perfbuf_val = 1;
69	skel->bss->ringbuf_val = 2;
70
71	/* Positive tests for unprivileged BPF disabled. Verify we can
72	 * - retrieve and interact with pinned maps;
73	 * - set up and interact with perf buffer;
74	 * - set up and interact with ring buffer;
75	 * - create a link
76	 */
77	perfbuf = perf_buffer__new(bpf_map__fd(skel->maps.perfbuf), 8, process_perfbuf, NULL, NULL,
78				   NULL);
79	if (!ASSERT_OK_PTR(perfbuf, "perf_buffer__new"))
80		goto cleanup;
81
82	ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf), process_ringbuf, NULL, NULL);
83	if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new"))
84		goto cleanup;
85
86	/* trigger & validate perf event, ringbuf output */
87	usleep(1);
88
89	ASSERT_GT(perf_buffer__poll(perfbuf, 100), -1, "perf_buffer__poll");
90	ASSERT_EQ(got_perfbuf_val, skel->bss->perfbuf_val, "check_perfbuf_val");
91	ASSERT_EQ(ring_buffer__consume(ringbuf), 1, "ring_buffer__consume");
92	ASSERT_EQ(got_ringbuf_val, skel->bss->ringbuf_val, "check_ringbuf_val");
93
94	for (i = 0; i < NUM_MAPS; i++) {
95		map_fds[i] = bpf_obj_get(map_paths[i]);
96		if (!ASSERT_GT(map_fds[i], -1, "obj_get"))
97			goto cleanup;
98	}
99
100	for (i = 0; i < NUM_MAPS; i++) {
101		bool prog_array = strstr(map_paths[i], "prog_array") != NULL;
102		bool array = strstr(map_paths[i], "array") != NULL;
103		bool buf = strstr(map_paths[i], "buf") != NULL;
104		__u32 key = 0, vals[nr_cpus], lookup_vals[nr_cpus];
105		__u32 expected_val = 1;
106		int j;
107
108		/* skip ringbuf, perfbuf */
109		if (buf)
110			continue;
111
112		for (j = 0; j < nr_cpus; j++)
113			vals[j] = expected_val;
114
115		if (prog_array) {
116			/* need valid prog array value */
117			vals[0] = prog_fd;
118			/* prog array lookup returns prog id, not fd */
119			expected_val = prog_id;
120		}
121		ASSERT_OK(bpf_map_update_elem(map_fds[i], &key, vals, 0), "map_update_elem");
122		ASSERT_OK(bpf_map_lookup_elem(map_fds[i], &key, &lookup_vals), "map_lookup_elem");
123		ASSERT_EQ(lookup_vals[0], expected_val, "map_lookup_elem_values");
124		if (!array)
125			ASSERT_OK(bpf_map_delete_elem(map_fds[i], &key), "map_delete_elem");
126	}
127
128	link_fd = bpf_link_create(bpf_program__fd(skel->progs.handle_perf_event), perf_fd,
129				  BPF_PERF_EVENT, NULL);
130	ASSERT_GT(link_fd, 0, "link_create");
131
132cleanup:
133	if (link_fd)
134		close(link_fd);
135	if (perfbuf)
136		perf_buffer__free(perfbuf);
137	if (ringbuf)
138		ring_buffer__free(ringbuf);
139}
140
141static void test_unpriv_bpf_disabled_negative(struct test_unpriv_bpf_disabled *skel,
142					      __u32 prog_id, int prog_fd, int perf_fd,
143					      char **map_paths, int *map_fds)
144{
145	const struct bpf_insn prog_insns[] = {
146		BPF_MOV64_IMM(BPF_REG_0, 0),
147		BPF_EXIT_INSN(),
148	};
149	const size_t prog_insn_cnt = sizeof(prog_insns) / sizeof(struct bpf_insn);
150	LIBBPF_OPTS(bpf_prog_load_opts, load_opts);
151	struct bpf_map_info map_info = {};
152	__u32 map_info_len = sizeof(map_info);
153	struct bpf_link_info link_info = {};
154	__u32 link_info_len = sizeof(link_info);
155	struct btf *btf = NULL;
156	__u32 attach_flags = 0;
157	__u32 prog_ids[3] = {};
158	__u32 prog_cnt = 3;
159	__u32 next;
160	int i;
161
162	/* Negative tests for unprivileged BPF disabled.  Verify we cannot
163	 * - load BPF programs;
164	 * - create BPF maps;
165	 * - get a prog/map/link fd by id;
166	 * - get next prog/map/link id
167	 * - query prog
168	 * - BTF load
169	 */
170	ASSERT_EQ(bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, "simple_prog", "GPL",
171				prog_insns, prog_insn_cnt, &load_opts),
172		  -EPERM, "prog_load_fails");
173
174	/* some map types require particular correct parameters which could be
175	 * sanity-checked before enforcing -EPERM, so only validate that
176	 * the simple ARRAY and HASH maps are failing with -EPERM
177	 */
178	for (i = BPF_MAP_TYPE_HASH; i <= BPF_MAP_TYPE_ARRAY; i++)
179		ASSERT_EQ(bpf_map_create(i, NULL, sizeof(int), sizeof(int), 1, NULL),
180			  -EPERM, "map_create_fails");
181
182	ASSERT_EQ(bpf_prog_get_fd_by_id(prog_id), -EPERM, "prog_get_fd_by_id_fails");
183	ASSERT_EQ(bpf_prog_get_next_id(prog_id, &next), -EPERM, "prog_get_next_id_fails");
184	ASSERT_EQ(bpf_prog_get_next_id(0, &next), -EPERM, "prog_get_next_id_fails");
185
186	if (ASSERT_OK(bpf_map_get_info_by_fd(map_fds[0], &map_info, &map_info_len),
187		      "obj_get_info_by_fd")) {
188		ASSERT_EQ(bpf_map_get_fd_by_id(map_info.id), -EPERM, "map_get_fd_by_id_fails");
189		ASSERT_EQ(bpf_map_get_next_id(map_info.id, &next), -EPERM,
190			  "map_get_next_id_fails");
191	}
192	ASSERT_EQ(bpf_map_get_next_id(0, &next), -EPERM, "map_get_next_id_fails");
193
194	if (ASSERT_OK(bpf_link_get_info_by_fd(bpf_link__fd(skel->links.sys_nanosleep_enter),
195					      &link_info, &link_info_len),
196		      "obj_get_info_by_fd")) {
197		ASSERT_EQ(bpf_link_get_fd_by_id(link_info.id), -EPERM, "link_get_fd_by_id_fails");
198		ASSERT_EQ(bpf_link_get_next_id(link_info.id, &next), -EPERM,
199			  "link_get_next_id_fails");
200	}
201	ASSERT_EQ(bpf_link_get_next_id(0, &next), -EPERM, "link_get_next_id_fails");
202
203	ASSERT_EQ(bpf_prog_query(prog_fd, BPF_TRACE_FENTRY, 0, &attach_flags, prog_ids,
204				 &prog_cnt), -EPERM, "prog_query_fails");
205
206	btf = btf__new_empty();
207	if (ASSERT_OK_PTR(btf, "empty_btf") &&
208	    ASSERT_GT(btf__add_int(btf, "int", 4, 0), 0, "unpriv_int_type")) {
209		const void *raw_btf_data;
210		__u32 raw_btf_size;
211
212		raw_btf_data = btf__raw_data(btf, &raw_btf_size);
213		if (ASSERT_OK_PTR(raw_btf_data, "raw_btf_data_good"))
214			ASSERT_EQ(bpf_btf_load(raw_btf_data, raw_btf_size, NULL), -EPERM,
215				  "bpf_btf_load_fails");
216	}
217	btf__free(btf);
218}
219
220void test_unpriv_bpf_disabled(void)
221{
222	char *map_paths[NUM_MAPS] = {	PINPATH	"array",
223					PINPATH "percpu_array",
224					PINPATH "hash",
225					PINPATH "percpu_hash",
226					PINPATH "perfbuf",
227					PINPATH "ringbuf",
228					PINPATH "prog_array" };
229	int map_fds[NUM_MAPS];
230	struct test_unpriv_bpf_disabled *skel;
231	char unprivileged_bpf_disabled_orig[32] = {};
232	char perf_event_paranoid_orig[32] = {};
233	struct bpf_prog_info prog_info = {};
234	__u32 prog_info_len = sizeof(prog_info);
235	struct perf_event_attr attr = {};
236	int prog_fd, perf_fd = -1, i, ret;
237	__u64 save_caps = 0;
238	__u32 prog_id;
239
240	skel = test_unpriv_bpf_disabled__open_and_load();
241	if (!ASSERT_OK_PTR(skel, "skel_open"))
242		return;
243
244	skel->bss->test_pid = getpid();
245
246	map_fds[0] = bpf_map__fd(skel->maps.array);
247	map_fds[1] = bpf_map__fd(skel->maps.percpu_array);
248	map_fds[2] = bpf_map__fd(skel->maps.hash);
249	map_fds[3] = bpf_map__fd(skel->maps.percpu_hash);
250	map_fds[4] = bpf_map__fd(skel->maps.perfbuf);
251	map_fds[5] = bpf_map__fd(skel->maps.ringbuf);
252	map_fds[6] = bpf_map__fd(skel->maps.prog_array);
253
254	for (i = 0; i < NUM_MAPS; i++)
255		ASSERT_OK(bpf_obj_pin(map_fds[i], map_paths[i]), "pin map_fd");
256
257	/* allow user without caps to use perf events */
258	if (!ASSERT_OK(sysctl_set("/proc/sys/kernel/perf_event_paranoid", perf_event_paranoid_orig,
259				  "-1"),
260		       "set_perf_event_paranoid"))
261		goto cleanup;
262	/* ensure unprivileged bpf disabled is set */
263	ret = sysctl_set("/proc/sys/kernel/unprivileged_bpf_disabled",
264			 unprivileged_bpf_disabled_orig, "2");
265	if (ret == -EPERM) {
266		/* if unprivileged_bpf_disabled=1, we get -EPERM back; that's okay. */
267		if (!ASSERT_OK(strcmp(unprivileged_bpf_disabled_orig, "1"),
268			       "unprivileged_bpf_disabled_on"))
269			goto cleanup;
270	} else {
271		if (!ASSERT_OK(ret, "set unprivileged_bpf_disabled"))
272			goto cleanup;
273	}
274
275	prog_fd = bpf_program__fd(skel->progs.sys_nanosleep_enter);
276	ASSERT_OK(bpf_prog_get_info_by_fd(prog_fd, &prog_info, &prog_info_len),
277		  "obj_get_info_by_fd");
278	prog_id = prog_info.id;
279	ASSERT_GT(prog_id, 0, "valid_prog_id");
280
281	attr.size = sizeof(attr);
282	attr.type = PERF_TYPE_SOFTWARE;
283	attr.config = PERF_COUNT_SW_CPU_CLOCK;
284	attr.freq = 1;
285	attr.sample_freq = 1000;
286	perf_fd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC);
287	if (!ASSERT_GE(perf_fd, 0, "perf_fd"))
288		goto cleanup;
289
290	if (!ASSERT_OK(test_unpriv_bpf_disabled__attach(skel), "skel_attach"))
291		goto cleanup;
292
293	if (!ASSERT_OK(cap_disable_effective(ALL_CAPS, &save_caps), "disable caps"))
294		goto cleanup;
295
296	if (test__start_subtest("unpriv_bpf_disabled_positive"))
297		test_unpriv_bpf_disabled_positive(skel, prog_id, prog_fd, perf_fd, map_paths,
298						  map_fds);
299
300	if (test__start_subtest("unpriv_bpf_disabled_negative"))
301		test_unpriv_bpf_disabled_negative(skel, prog_id, prog_fd, perf_fd, map_paths,
302						  map_fds);
303
304cleanup:
305	close(perf_fd);
306	if (save_caps)
307		cap_enable_effective(save_caps, NULL);
308	if (strlen(perf_event_paranoid_orig) > 0)
309		sysctl_set("/proc/sys/kernel/perf_event_paranoid", NULL, perf_event_paranoid_orig);
310	if (strlen(unprivileged_bpf_disabled_orig) > 0)
311		sysctl_set("/proc/sys/kernel/unprivileged_bpf_disabled", NULL,
312			   unprivileged_bpf_disabled_orig);
313	for (i = 0; i < NUM_MAPS; i++)
314		unlink(map_paths[i]);
315	test_unpriv_bpf_disabled__destroy(skel);
316}
317