1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2020 Google */
3
4#include <test_progs.h>
5#include <bpf/libbpf.h>
6#include <bpf/btf.h>
7#include "test_ksyms_btf.skel.h"
8#include "test_ksyms_btf_null_check.skel.h"
9#include "test_ksyms_weak.skel.h"
10#include "test_ksyms_weak.lskel.h"
11#include "test_ksyms_btf_write_check.skel.h"
12
13static int duration;
14
15static void test_basic(void)
16{
17	__u64 runqueues_addr, bpf_prog_active_addr;
18	__u32 this_rq_cpu;
19	int this_bpf_prog_active;
20	struct test_ksyms_btf *skel = NULL;
21	struct test_ksyms_btf__data *data;
22	int err;
23
24	err = kallsyms_find("runqueues", &runqueues_addr);
25	if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno))
26		return;
27	if (CHECK(err == -ENOENT, "ksym_find", "symbol 'runqueues' not found\n"))
28		return;
29
30	err = kallsyms_find("bpf_prog_active", &bpf_prog_active_addr);
31	if (CHECK(err == -EINVAL, "kallsyms_fopen", "failed to open: %d\n", errno))
32		return;
33	if (CHECK(err == -ENOENT, "ksym_find", "symbol 'bpf_prog_active' not found\n"))
34		return;
35
36	skel = test_ksyms_btf__open_and_load();
37	if (CHECK(!skel, "skel_open", "failed to open and load skeleton\n"))
38		goto cleanup;
39
40	err = test_ksyms_btf__attach(skel);
41	if (CHECK(err, "skel_attach", "skeleton attach failed: %d\n", err))
42		goto cleanup;
43
44	/* trigger tracepoint */
45	usleep(1);
46
47	data = skel->data;
48	CHECK(data->out__runqueues_addr != runqueues_addr, "runqueues_addr",
49	      "got %llu, exp %llu\n",
50	      (unsigned long long)data->out__runqueues_addr,
51	      (unsigned long long)runqueues_addr);
52	CHECK(data->out__bpf_prog_active_addr != bpf_prog_active_addr, "bpf_prog_active_addr",
53	      "got %llu, exp %llu\n",
54	      (unsigned long long)data->out__bpf_prog_active_addr,
55	      (unsigned long long)bpf_prog_active_addr);
56
57	CHECK(data->out__rq_cpu == -1, "rq_cpu",
58	      "got %u, exp != -1\n", data->out__rq_cpu);
59	CHECK(data->out__bpf_prog_active < 0, "bpf_prog_active",
60	      "got %d, exp >= 0\n", data->out__bpf_prog_active);
61	CHECK(data->out__cpu_0_rq_cpu != 0, "cpu_rq(0)->cpu",
62	      "got %u, exp 0\n", data->out__cpu_0_rq_cpu);
63
64	this_rq_cpu = data->out__this_rq_cpu;
65	CHECK(this_rq_cpu != data->out__rq_cpu, "this_rq_cpu",
66	      "got %u, exp %u\n", this_rq_cpu, data->out__rq_cpu);
67
68	this_bpf_prog_active = data->out__this_bpf_prog_active;
69	CHECK(this_bpf_prog_active != data->out__bpf_prog_active, "this_bpf_prog_active",
70	      "got %d, exp %d\n", this_bpf_prog_active,
71	      data->out__bpf_prog_active);
72
73cleanup:
74	test_ksyms_btf__destroy(skel);
75}
76
77static void test_null_check(void)
78{
79	struct test_ksyms_btf_null_check *skel;
80
81	skel = test_ksyms_btf_null_check__open_and_load();
82	CHECK(skel, "skel_open", "unexpected load of a prog missing null check\n");
83
84	test_ksyms_btf_null_check__destroy(skel);
85}
86
87static void test_weak_syms(void)
88{
89	struct test_ksyms_weak *skel;
90	struct test_ksyms_weak__data *data;
91	int err;
92
93	skel = test_ksyms_weak__open_and_load();
94	if (!ASSERT_OK_PTR(skel, "test_ksyms_weak__open_and_load"))
95		return;
96
97	err = test_ksyms_weak__attach(skel);
98	if (!ASSERT_OK(err, "test_ksyms_weak__attach"))
99		goto cleanup;
100
101	/* trigger tracepoint */
102	usleep(1);
103
104	data = skel->data;
105	ASSERT_EQ(data->out__existing_typed, 0, "existing typed ksym");
106	ASSERT_NEQ(data->out__existing_typeless, -1, "existing typeless ksym");
107	ASSERT_EQ(data->out__non_existent_typeless, 0, "nonexistent typeless ksym");
108	ASSERT_EQ(data->out__non_existent_typed, 0, "nonexistent typed ksym");
109
110cleanup:
111	test_ksyms_weak__destroy(skel);
112}
113
114static void test_weak_syms_lskel(void)
115{
116	struct test_ksyms_weak_lskel *skel;
117	struct test_ksyms_weak_lskel__data *data;
118	int err;
119
120	skel = test_ksyms_weak_lskel__open_and_load();
121	if (!ASSERT_OK_PTR(skel, "test_ksyms_weak_lskel__open_and_load"))
122		return;
123
124	err = test_ksyms_weak_lskel__attach(skel);
125	if (!ASSERT_OK(err, "test_ksyms_weak_lskel__attach"))
126		goto cleanup;
127
128	/* trigger tracepoint */
129	usleep(1);
130
131	data = skel->data;
132	ASSERT_EQ(data->out__existing_typed, 0, "existing typed ksym");
133	ASSERT_NEQ(data->out__existing_typeless, -1, "existing typeless ksym");
134	ASSERT_EQ(data->out__non_existent_typeless, 0, "nonexistent typeless ksym");
135	ASSERT_EQ(data->out__non_existent_typed, 0, "nonexistent typed ksym");
136
137cleanup:
138	test_ksyms_weak_lskel__destroy(skel);
139}
140
141static void test_write_check(bool test_handler1)
142{
143	struct test_ksyms_btf_write_check *skel;
144
145	skel = test_ksyms_btf_write_check__open();
146	if (!ASSERT_OK_PTR(skel, "test_ksyms_btf_write_check__open"))
147		return;
148	bpf_program__set_autoload(test_handler1 ? skel->progs.handler2 : skel->progs.handler1, false);
149	ASSERT_ERR(test_ksyms_btf_write_check__load(skel),
150		   "unexpected load of a prog writing to ksym memory\n");
151
152	test_ksyms_btf_write_check__destroy(skel);
153}
154
155void test_ksyms_btf(void)
156{
157	int percpu_datasec;
158	struct btf *btf;
159
160	btf = libbpf_find_kernel_btf();
161	if (!ASSERT_OK_PTR(btf, "btf_exists"))
162		return;
163
164	percpu_datasec = btf__find_by_name_kind(btf, ".data..percpu",
165						BTF_KIND_DATASEC);
166	btf__free(btf);
167	if (percpu_datasec < 0) {
168		printf("%s:SKIP:no PERCPU DATASEC in kernel btf\n",
169		       __func__);
170		test__skip();
171		return;
172	}
173
174	if (test__start_subtest("basic"))
175		test_basic();
176
177	if (test__start_subtest("null_check"))
178		test_null_check();
179
180	if (test__start_subtest("weak_ksyms"))
181		test_weak_syms();
182
183	if (test__start_subtest("weak_ksyms_lskel"))
184		test_weak_syms_lskel();
185
186	if (test__start_subtest("write_check1"))
187		test_write_check(true);
188
189	if (test__start_subtest("write_check2"))
190		test_write_check(false);
191}
192