1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KUnit test for hw_breakpoint constraints accounting logic.
4 *
5 * Copyright (C) 2022, Google LLC.
6 */
7
8#include <kunit/test.h>
9#include <linux/cpumask.h>
10#include <linux/hw_breakpoint.h>
11#include <linux/kthread.h>
12#include <linux/perf_event.h>
13#include <asm/hw_breakpoint.h>
14
15#define TEST_REQUIRES_BP_SLOTS(test, slots)						\
16	do {										\
17		if ((slots) > get_test_bp_slots()) {					\
18			kunit_skip((test), "Requires breakpoint slots: %d > %d", slots,	\
19				   get_test_bp_slots());				\
20		}									\
21	} while (0)
22
23#define TEST_EXPECT_NOSPC(expr) KUNIT_EXPECT_EQ(test, -ENOSPC, PTR_ERR(expr))
24
25#define MAX_TEST_BREAKPOINTS 512
26
27static char break_vars[MAX_TEST_BREAKPOINTS];
28static struct perf_event *test_bps[MAX_TEST_BREAKPOINTS];
29static struct task_struct *__other_task;
30
31static struct perf_event *register_test_bp(int cpu, struct task_struct *tsk, int idx)
32{
33	struct perf_event_attr attr = {};
34
35	if (WARN_ON(idx < 0 || idx >= MAX_TEST_BREAKPOINTS))
36		return NULL;
37
38	hw_breakpoint_init(&attr);
39	attr.bp_addr = (unsigned long)&break_vars[idx];
40	attr.bp_len = HW_BREAKPOINT_LEN_1;
41	attr.bp_type = HW_BREAKPOINT_RW;
42	return perf_event_create_kernel_counter(&attr, cpu, tsk, NULL, NULL);
43}
44
45static void unregister_test_bp(struct perf_event **bp)
46{
47	if (WARN_ON(IS_ERR(*bp)))
48		return;
49	if (WARN_ON(!*bp))
50		return;
51	unregister_hw_breakpoint(*bp);
52	*bp = NULL;
53}
54
55static int get_test_bp_slots(void)
56{
57	static int slots;
58
59	if (!slots)
60		slots = hw_breakpoint_slots(TYPE_DATA);
61
62	return slots;
63}
64
65static void fill_one_bp_slot(struct kunit *test, int *id, int cpu, struct task_struct *tsk)
66{
67	struct perf_event *bp = register_test_bp(cpu, tsk, *id);
68
69	KUNIT_ASSERT_NOT_NULL(test, bp);
70	KUNIT_ASSERT_FALSE(test, IS_ERR(bp));
71	KUNIT_ASSERT_NULL(test, test_bps[*id]);
72	test_bps[(*id)++] = bp;
73}
74
75/*
76 * Fills up the given @cpu/@tsk with breakpoints, only leaving @skip slots free.
77 *
78 * Returns true if this can be called again, continuing at @id.
79 */
80static bool fill_bp_slots(struct kunit *test, int *id, int cpu, struct task_struct *tsk, int skip)
81{
82	for (int i = 0; i < get_test_bp_slots() - skip; ++i)
83		fill_one_bp_slot(test, id, cpu, tsk);
84
85	return *id + get_test_bp_slots() <= MAX_TEST_BREAKPOINTS;
86}
87
88static int dummy_kthread(void *arg)
89{
90	return 0;
91}
92
93static struct task_struct *get_other_task(struct kunit *test)
94{
95	struct task_struct *tsk;
96
97	if (__other_task)
98		return __other_task;
99
100	tsk = kthread_create(dummy_kthread, NULL, "hw_breakpoint_dummy_task");
101	KUNIT_ASSERT_FALSE(test, IS_ERR(tsk));
102	__other_task = tsk;
103	return __other_task;
104}
105
106static int get_test_cpu(int num)
107{
108	int cpu;
109
110	WARN_ON(num < 0);
111
112	for_each_online_cpu(cpu) {
113		if (num-- <= 0)
114			break;
115	}
116
117	return cpu;
118}
119
120/* ===== Test cases ===== */
121
122static void test_one_cpu(struct kunit *test)
123{
124	int idx = 0;
125
126	fill_bp_slots(test, &idx, get_test_cpu(0), NULL, 0);
127	TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
128	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
129}
130
131static void test_many_cpus(struct kunit *test)
132{
133	int idx = 0;
134	int cpu;
135
136	/* Test that CPUs are independent. */
137	for_each_online_cpu(cpu) {
138		bool do_continue = fill_bp_slots(test, &idx, cpu, NULL, 0);
139
140		TEST_EXPECT_NOSPC(register_test_bp(cpu, NULL, idx));
141		if (!do_continue)
142			break;
143	}
144}
145
146static void test_one_task_on_all_cpus(struct kunit *test)
147{
148	int idx = 0;
149
150	fill_bp_slots(test, &idx, -1, current, 0);
151	TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
152	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
153	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
154	/* Remove one and adding back CPU-target should work. */
155	unregister_test_bp(&test_bps[0]);
156	fill_one_bp_slot(test, &idx, get_test_cpu(0), NULL);
157}
158
159static void test_two_tasks_on_all_cpus(struct kunit *test)
160{
161	int idx = 0;
162
163	/* Test that tasks are independent. */
164	fill_bp_slots(test, &idx, -1, current, 0);
165	fill_bp_slots(test, &idx, -1, get_other_task(test), 0);
166
167	TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
168	TEST_EXPECT_NOSPC(register_test_bp(-1, get_other_task(test), idx));
169	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
170	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), get_other_task(test), idx));
171	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
172	/* Remove one from first task and adding back CPU-target should not work. */
173	unregister_test_bp(&test_bps[0]);
174	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
175}
176
177static void test_one_task_on_one_cpu(struct kunit *test)
178{
179	int idx = 0;
180
181	fill_bp_slots(test, &idx, get_test_cpu(0), current, 0);
182	TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
183	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
184	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
185	/*
186	 * Remove one and adding back CPU-target should work; this case is
187	 * special vs. above because the task's constraints are CPU-dependent.
188	 */
189	unregister_test_bp(&test_bps[0]);
190	fill_one_bp_slot(test, &idx, get_test_cpu(0), NULL);
191}
192
193static void test_one_task_mixed(struct kunit *test)
194{
195	int idx = 0;
196
197	TEST_REQUIRES_BP_SLOTS(test, 3);
198
199	fill_one_bp_slot(test, &idx, get_test_cpu(0), current);
200	fill_bp_slots(test, &idx, -1, current, 1);
201	TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
202	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
203	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
204
205	/* Transition from CPU-dependent pinned count to CPU-independent. */
206	unregister_test_bp(&test_bps[0]);
207	unregister_test_bp(&test_bps[1]);
208	fill_one_bp_slot(test, &idx, get_test_cpu(0), NULL);
209	fill_one_bp_slot(test, &idx, get_test_cpu(0), NULL);
210	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
211}
212
213static void test_two_tasks_on_one_cpu(struct kunit *test)
214{
215	int idx = 0;
216
217	fill_bp_slots(test, &idx, get_test_cpu(0), current, 0);
218	fill_bp_slots(test, &idx, get_test_cpu(0), get_other_task(test), 0);
219
220	TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
221	TEST_EXPECT_NOSPC(register_test_bp(-1, get_other_task(test), idx));
222	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
223	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), get_other_task(test), idx));
224	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
225	/* Can still create breakpoints on some other CPU. */
226	fill_bp_slots(test, &idx, get_test_cpu(1), NULL, 0);
227}
228
229static void test_two_tasks_on_one_all_cpus(struct kunit *test)
230{
231	int idx = 0;
232
233	fill_bp_slots(test, &idx, get_test_cpu(0), current, 0);
234	fill_bp_slots(test, &idx, -1, get_other_task(test), 0);
235
236	TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
237	TEST_EXPECT_NOSPC(register_test_bp(-1, get_other_task(test), idx));
238	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
239	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), get_other_task(test), idx));
240	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
241	/* Cannot create breakpoints on some other CPU either. */
242	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(1), NULL, idx));
243}
244
245static void test_task_on_all_and_one_cpu(struct kunit *test)
246{
247	int tsk_on_cpu_idx, cpu_idx;
248	int idx = 0;
249
250	TEST_REQUIRES_BP_SLOTS(test, 3);
251
252	fill_bp_slots(test, &idx, -1, current, 2);
253	/* Transitioning from only all CPU breakpoints to mixed. */
254	tsk_on_cpu_idx = idx;
255	fill_one_bp_slot(test, &idx, get_test_cpu(0), current);
256	fill_one_bp_slot(test, &idx, -1, current);
257
258	TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
259	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
260	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
261
262	/* We should still be able to use up another CPU's slots. */
263	cpu_idx = idx;
264	fill_one_bp_slot(test, &idx, get_test_cpu(1), NULL);
265	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(1), NULL, idx));
266
267	/* Transitioning back to task target on all CPUs. */
268	unregister_test_bp(&test_bps[tsk_on_cpu_idx]);
269	/* Still have a CPU target breakpoint in get_test_cpu(1). */
270	TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
271	/* Remove it and try again. */
272	unregister_test_bp(&test_bps[cpu_idx]);
273	fill_one_bp_slot(test, &idx, -1, current);
274
275	TEST_EXPECT_NOSPC(register_test_bp(-1, current, idx));
276	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), current, idx));
277	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(0), NULL, idx));
278	TEST_EXPECT_NOSPC(register_test_bp(get_test_cpu(1), NULL, idx));
279}
280
281static struct kunit_case hw_breakpoint_test_cases[] = {
282	KUNIT_CASE(test_one_cpu),
283	KUNIT_CASE(test_many_cpus),
284	KUNIT_CASE(test_one_task_on_all_cpus),
285	KUNIT_CASE(test_two_tasks_on_all_cpus),
286	KUNIT_CASE(test_one_task_on_one_cpu),
287	KUNIT_CASE(test_one_task_mixed),
288	KUNIT_CASE(test_two_tasks_on_one_cpu),
289	KUNIT_CASE(test_two_tasks_on_one_all_cpus),
290	KUNIT_CASE(test_task_on_all_and_one_cpu),
291	{},
292};
293
294static int test_init(struct kunit *test)
295{
296	/* Most test cases want 2 distinct CPUs. */
297	if (num_online_cpus() < 2)
298		kunit_skip(test, "not enough cpus");
299
300	/* Want the system to not use breakpoints elsewhere. */
301	if (hw_breakpoint_is_used())
302		kunit_skip(test, "hw breakpoint already in use");
303
304	return 0;
305}
306
307static void test_exit(struct kunit *test)
308{
309	for (int i = 0; i < MAX_TEST_BREAKPOINTS; ++i) {
310		if (test_bps[i])
311			unregister_test_bp(&test_bps[i]);
312	}
313
314	if (__other_task) {
315		kthread_stop(__other_task);
316		__other_task = NULL;
317	}
318
319	/* Verify that internal state agrees that no breakpoints are in use. */
320	KUNIT_EXPECT_FALSE(test, hw_breakpoint_is_used());
321}
322
323static struct kunit_suite hw_breakpoint_test_suite = {
324	.name = "hw_breakpoint",
325	.test_cases = hw_breakpoint_test_cases,
326	.init = test_init,
327	.exit = test_exit,
328};
329
330kunit_test_suites(&hw_breakpoint_test_suite);
331
332MODULE_AUTHOR("Marco Elver <elver@google.com>");
333