1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * test_kprobes.c - simple sanity test for *probes
4 *
5 * Copyright IBM Corp. 2008
6 */
7
8#include <linux/kernel.h>
9#include <linux/kprobes.h>
10#include <linux/random.h>
11#include <kunit/test.h>
12
13#define div_factor 3
14
15static u32 rand1, preh_val, posth_val;
16static u32 (*target)(u32 value);
17static u32 (*recursed_target)(u32 value);
18static u32 (*target2)(u32 value);
19static struct kunit *current_test;
20
21static unsigned long (*internal_target)(void);
22static unsigned long (*stacktrace_target)(void);
23static unsigned long (*stacktrace_driver)(void);
24static unsigned long target_return_address[2];
25
26static noinline u32 kprobe_target(u32 value)
27{
28	return (value / div_factor);
29}
30
31static noinline u32 kprobe_recursed_target(u32 value)
32{
33	return (value / div_factor);
34}
35
36static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs)
37{
38	KUNIT_EXPECT_FALSE(current_test, preemptible());
39
40	preh_val = recursed_target(rand1);
41	return 0;
42}
43
44static void kp_post_handler(struct kprobe *p, struct pt_regs *regs,
45		unsigned long flags)
46{
47	u32 expval = recursed_target(rand1);
48
49	KUNIT_EXPECT_FALSE(current_test, preemptible());
50	KUNIT_EXPECT_EQ(current_test, preh_val, expval);
51
52	posth_val = preh_val + div_factor;
53}
54
55static struct kprobe kp = {
56	.symbol_name = "kprobe_target",
57	.pre_handler = kp_pre_handler,
58	.post_handler = kp_post_handler
59};
60
61static void test_kprobe(struct kunit *test)
62{
63	current_test = test;
64	KUNIT_EXPECT_EQ(test, 0, register_kprobe(&kp));
65	target(rand1);
66	unregister_kprobe(&kp);
67	KUNIT_EXPECT_NE(test, 0, preh_val);
68	KUNIT_EXPECT_NE(test, 0, posth_val);
69}
70
71static noinline u32 kprobe_target2(u32 value)
72{
73	return (value / div_factor) + 1;
74}
75
76static noinline unsigned long kprobe_stacktrace_internal_target(void)
77{
78	if (!target_return_address[0])
79		target_return_address[0] = (unsigned long)__builtin_return_address(0);
80	return target_return_address[0];
81}
82
83static noinline unsigned long kprobe_stacktrace_target(void)
84{
85	if (!target_return_address[1])
86		target_return_address[1] = (unsigned long)__builtin_return_address(0);
87
88	if (internal_target)
89		internal_target();
90
91	return target_return_address[1];
92}
93
94static noinline unsigned long kprobe_stacktrace_driver(void)
95{
96	if (stacktrace_target)
97		stacktrace_target();
98
99	/* This is for preventing inlining the function */
100	return (unsigned long)__builtin_return_address(0);
101}
102
103static int kp_pre_handler2(struct kprobe *p, struct pt_regs *regs)
104{
105	preh_val = (rand1 / div_factor) + 1;
106	return 0;
107}
108
109static void kp_post_handler2(struct kprobe *p, struct pt_regs *regs,
110		unsigned long flags)
111{
112	KUNIT_EXPECT_EQ(current_test, preh_val, (rand1 / div_factor) + 1);
113	posth_val = preh_val + div_factor;
114}
115
116static struct kprobe kp2 = {
117	.symbol_name = "kprobe_target2",
118	.pre_handler = kp_pre_handler2,
119	.post_handler = kp_post_handler2
120};
121
122static void test_kprobes(struct kunit *test)
123{
124	struct kprobe *kps[2] = {&kp, &kp2};
125
126	current_test = test;
127
128	/* addr and flags should be cleard for reusing kprobe. */
129	kp.addr = NULL;
130	kp.flags = 0;
131
132	KUNIT_EXPECT_EQ(test, 0, register_kprobes(kps, 2));
133	preh_val = 0;
134	posth_val = 0;
135	target(rand1);
136
137	KUNIT_EXPECT_NE(test, 0, preh_val);
138	KUNIT_EXPECT_NE(test, 0, posth_val);
139
140	preh_val = 0;
141	posth_val = 0;
142	target2(rand1);
143
144	KUNIT_EXPECT_NE(test, 0, preh_val);
145	KUNIT_EXPECT_NE(test, 0, posth_val);
146	unregister_kprobes(kps, 2);
147}
148
149static struct kprobe kp_missed = {
150	.symbol_name = "kprobe_recursed_target",
151	.pre_handler = kp_pre_handler,
152	.post_handler = kp_post_handler,
153};
154
155static void test_kprobe_missed(struct kunit *test)
156{
157	current_test = test;
158	preh_val = 0;
159	posth_val = 0;
160
161	KUNIT_EXPECT_EQ(test, 0, register_kprobe(&kp_missed));
162
163	recursed_target(rand1);
164
165	KUNIT_EXPECT_EQ(test, 2, kp_missed.nmissed);
166	KUNIT_EXPECT_NE(test, 0, preh_val);
167	KUNIT_EXPECT_NE(test, 0, posth_val);
168
169	unregister_kprobe(&kp_missed);
170}
171
172#ifdef CONFIG_KRETPROBES
173static u32 krph_val;
174
175static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
176{
177	KUNIT_EXPECT_FALSE(current_test, preemptible());
178	krph_val = (rand1 / div_factor);
179	return 0;
180}
181
182static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
183{
184	unsigned long ret = regs_return_value(regs);
185
186	KUNIT_EXPECT_FALSE(current_test, preemptible());
187	KUNIT_EXPECT_EQ(current_test, ret, rand1 / div_factor);
188	KUNIT_EXPECT_NE(current_test, krph_val, 0);
189	krph_val = rand1;
190	return 0;
191}
192
193static struct kretprobe rp = {
194	.handler	= return_handler,
195	.entry_handler  = entry_handler,
196	.kp.symbol_name = "kprobe_target"
197};
198
199static void test_kretprobe(struct kunit *test)
200{
201	current_test = test;
202	KUNIT_EXPECT_EQ(test, 0, register_kretprobe(&rp));
203	target(rand1);
204	unregister_kretprobe(&rp);
205	KUNIT_EXPECT_EQ(test, krph_val, rand1);
206}
207
208static int return_handler2(struct kretprobe_instance *ri, struct pt_regs *regs)
209{
210	unsigned long ret = regs_return_value(regs);
211
212	KUNIT_EXPECT_EQ(current_test, ret, (rand1 / div_factor) + 1);
213	KUNIT_EXPECT_NE(current_test, krph_val, 0);
214	krph_val = rand1;
215	return 0;
216}
217
218static struct kretprobe rp2 = {
219	.handler	= return_handler2,
220	.entry_handler  = entry_handler,
221	.kp.symbol_name = "kprobe_target2"
222};
223
224static void test_kretprobes(struct kunit *test)
225{
226	struct kretprobe *rps[2] = {&rp, &rp2};
227
228	current_test = test;
229	/* addr and flags should be cleard for reusing kprobe. */
230	rp.kp.addr = NULL;
231	rp.kp.flags = 0;
232	KUNIT_EXPECT_EQ(test, 0, register_kretprobes(rps, 2));
233
234	krph_val = 0;
235	target(rand1);
236	KUNIT_EXPECT_EQ(test, krph_val, rand1);
237
238	krph_val = 0;
239	target2(rand1);
240	KUNIT_EXPECT_EQ(test, krph_val, rand1);
241	unregister_kretprobes(rps, 2);
242}
243
244#ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
245#define STACK_BUF_SIZE 16
246static unsigned long stack_buf[STACK_BUF_SIZE];
247
248static int stacktrace_return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
249{
250	unsigned long retval = regs_return_value(regs);
251	int i, ret;
252
253	KUNIT_EXPECT_FALSE(current_test, preemptible());
254	KUNIT_EXPECT_EQ(current_test, retval, target_return_address[1]);
255
256	/*
257	 * Test stacktrace inside the kretprobe handler, this will involves
258	 * kretprobe trampoline, but must include correct return address
259	 * of the target function.
260	 */
261	ret = stack_trace_save(stack_buf, STACK_BUF_SIZE, 0);
262	KUNIT_EXPECT_NE(current_test, ret, 0);
263
264	for (i = 0; i < ret; i++) {
265		if (stack_buf[i] == target_return_address[1])
266			break;
267	}
268	KUNIT_EXPECT_NE(current_test, i, ret);
269
270#if !IS_MODULE(CONFIG_KPROBES_SANITY_TEST)
271	/*
272	 * Test stacktrace from pt_regs at the return address. Thus the stack
273	 * trace must start from the target return address.
274	 */
275	ret = stack_trace_save_regs(regs, stack_buf, STACK_BUF_SIZE, 0);
276	KUNIT_EXPECT_NE(current_test, ret, 0);
277	KUNIT_EXPECT_EQ(current_test, stack_buf[0], target_return_address[1]);
278#endif
279
280	return 0;
281}
282
283static struct kretprobe rp3 = {
284	.handler	= stacktrace_return_handler,
285	.kp.symbol_name = "kprobe_stacktrace_target"
286};
287
288static void test_stacktrace_on_kretprobe(struct kunit *test)
289{
290	unsigned long myretaddr = (unsigned long)__builtin_return_address(0);
291
292	current_test = test;
293	rp3.kp.addr = NULL;
294	rp3.kp.flags = 0;
295
296	/*
297	 * Run the stacktrace_driver() to record correct return address in
298	 * stacktrace_target() and ensure stacktrace_driver() call is not
299	 * inlined by checking the return address of stacktrace_driver()
300	 * and the return address of this function is different.
301	 */
302	KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
303
304	KUNIT_ASSERT_EQ(test, 0, register_kretprobe(&rp3));
305	KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
306	unregister_kretprobe(&rp3);
307}
308
309static int stacktrace_internal_return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
310{
311	unsigned long retval = regs_return_value(regs);
312	int i, ret;
313
314	KUNIT_EXPECT_FALSE(current_test, preemptible());
315	KUNIT_EXPECT_EQ(current_test, retval, target_return_address[0]);
316
317	/*
318	 * Test stacktrace inside the kretprobe handler for nested case.
319	 * The unwinder will find the kretprobe_trampoline address on the
320	 * return address, and kretprobe must solve that.
321	 */
322	ret = stack_trace_save(stack_buf, STACK_BUF_SIZE, 0);
323	KUNIT_EXPECT_NE(current_test, ret, 0);
324
325	for (i = 0; i < ret - 1; i++) {
326		if (stack_buf[i] == target_return_address[0]) {
327			KUNIT_EXPECT_EQ(current_test, stack_buf[i + 1], target_return_address[1]);
328			break;
329		}
330	}
331	KUNIT_EXPECT_NE(current_test, i, ret);
332
333#if !IS_MODULE(CONFIG_KPROBES_SANITY_TEST)
334	/* Ditto for the regs version. */
335	ret = stack_trace_save_regs(regs, stack_buf, STACK_BUF_SIZE, 0);
336	KUNIT_EXPECT_NE(current_test, ret, 0);
337	KUNIT_EXPECT_EQ(current_test, stack_buf[0], target_return_address[0]);
338	KUNIT_EXPECT_EQ(current_test, stack_buf[1], target_return_address[1]);
339#endif
340
341	return 0;
342}
343
344static struct kretprobe rp4 = {
345	.handler	= stacktrace_internal_return_handler,
346	.kp.symbol_name = "kprobe_stacktrace_internal_target"
347};
348
349static void test_stacktrace_on_nested_kretprobe(struct kunit *test)
350{
351	unsigned long myretaddr = (unsigned long)__builtin_return_address(0);
352	struct kretprobe *rps[2] = {&rp3, &rp4};
353
354	current_test = test;
355	rp3.kp.addr = NULL;
356	rp3.kp.flags = 0;
357
358	//KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
359
360	KUNIT_ASSERT_EQ(test, 0, register_kretprobes(rps, 2));
361	KUNIT_ASSERT_NE(test, myretaddr, stacktrace_driver());
362	unregister_kretprobes(rps, 2);
363}
364#endif /* CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE */
365
366#endif /* CONFIG_KRETPROBES */
367
368static int kprobes_test_init(struct kunit *test)
369{
370	target = kprobe_target;
371	target2 = kprobe_target2;
372	recursed_target = kprobe_recursed_target;
373	stacktrace_target = kprobe_stacktrace_target;
374	internal_target = kprobe_stacktrace_internal_target;
375	stacktrace_driver = kprobe_stacktrace_driver;
376	rand1 = get_random_u32_above(div_factor);
377	return 0;
378}
379
380static struct kunit_case kprobes_testcases[] = {
381	KUNIT_CASE(test_kprobe),
382	KUNIT_CASE(test_kprobes),
383	KUNIT_CASE(test_kprobe_missed),
384#ifdef CONFIG_KRETPROBES
385	KUNIT_CASE(test_kretprobe),
386	KUNIT_CASE(test_kretprobes),
387#ifdef CONFIG_ARCH_CORRECT_STACKTRACE_ON_KRETPROBE
388	KUNIT_CASE(test_stacktrace_on_kretprobe),
389	KUNIT_CASE(test_stacktrace_on_nested_kretprobe),
390#endif
391#endif
392	{}
393};
394
395static struct kunit_suite kprobes_test_suite = {
396	.name = "kprobes_test",
397	.init = kprobes_test_init,
398	.test_cases = kprobes_testcases,
399};
400
401kunit_test_suites(&kprobes_test_suite);
402
403MODULE_LICENSE("GPL");
404