1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Runtime test cases for CONFIG_FORTIFY_SOURCE. For testing memcpy(),
4 * see FORTIFY_MEM_* tests in LKDTM (drivers/misc/lkdtm/fortify.c).
5 *
6 * For corner cases with UBSAN, try testing with:
7 *
8 * ./tools/testing/kunit/kunit.py run --arch=x86_64 \
9 *	--kconfig_add CONFIG_FORTIFY_SOURCE=y \
10 *	--kconfig_add CONFIG_UBSAN=y \
11 *	--kconfig_add CONFIG_UBSAN_TRAP=y \
12 *	--kconfig_add CONFIG_UBSAN_BOUNDS=y \
13 *	--kconfig_add CONFIG_UBSAN_LOCAL_BOUNDS=y \
14 *	--make_options LLVM=1 fortify
15 */
16#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18/* Redefine fortify_panic() to track failures. */
19void fortify_add_kunit_error(int write);
20#define fortify_panic(func, write, avail, size, retfail) do {		\
21	__fortify_report(FORTIFY_REASON(func, write), avail, size);	\
22	fortify_add_kunit_error(write);					\
23	return (retfail);						\
24} while (0)
25
26#include <kunit/device.h>
27#include <kunit/test.h>
28#include <kunit/test-bug.h>
29#include <linux/device.h>
30#include <linux/slab.h>
31#include <linux/string.h>
32#include <linux/vmalloc.h>
33
34/* Handle being built without CONFIG_FORTIFY_SOURCE */
35#ifndef __compiletime_strlen
36# define __compiletime_strlen __builtin_strlen
37#endif
38
39static struct kunit_resource read_resource;
40static struct kunit_resource write_resource;
41static int fortify_read_overflows;
42static int fortify_write_overflows;
43
44static const char array_of_10[] = "this is 10";
45static const char *ptr_of_11 = "this is 11!";
46static char array_unknown[] = "compiler thinks I might change";
47
48void fortify_add_kunit_error(int write)
49{
50	struct kunit_resource *resource;
51	struct kunit *current_test;
52
53	current_test = kunit_get_current_test();
54	if (!current_test)
55		return;
56
57	resource = kunit_find_named_resource(current_test,
58			write ? "fortify_write_overflows"
59			      : "fortify_read_overflows");
60	if (!resource)
61		return;
62
63	(*(int *)resource->data)++;
64	kunit_put_resource(resource);
65}
66
67static void known_sizes_test(struct kunit *test)
68{
69	KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8);
70	KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_of_10), 10);
71	KUNIT_EXPECT_EQ(test, __compiletime_strlen(ptr_of_11), 11);
72
73	KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_unknown), SIZE_MAX);
74	/* Externally defined and dynamically sized string pointer: */
75	KUNIT_EXPECT_EQ(test, __compiletime_strlen(test->name), SIZE_MAX);
76}
77
78/* This is volatile so the optimizer can't perform DCE below. */
79static volatile int pick;
80
81/* Not inline to keep optimizer from figuring out which string we want. */
82static noinline size_t want_minus_one(int pick)
83{
84	const char *str;
85
86	switch (pick) {
87	case 1:
88		str = "4444";
89		break;
90	case 2:
91		str = "333";
92		break;
93	default:
94		str = "1";
95		break;
96	}
97	return __compiletime_strlen(str);
98}
99
100static void control_flow_split_test(struct kunit *test)
101{
102	KUNIT_EXPECT_EQ(test, want_minus_one(pick), SIZE_MAX);
103}
104
105#define KUNIT_EXPECT_BOS(test, p, expected, name)			\
106	KUNIT_EXPECT_EQ_MSG(test, __builtin_object_size(p, 1),		\
107		expected,						\
108		"__alloc_size() not working with __bos on " name "\n")
109
110#if !__has_builtin(__builtin_dynamic_object_size)
111#define KUNIT_EXPECT_BDOS(test, p, expected, name)			\
112	/* Silence "unused variable 'expected'" warning. */		\
113	KUNIT_EXPECT_EQ(test, expected, expected)
114#else
115#define KUNIT_EXPECT_BDOS(test, p, expected, name)			\
116	KUNIT_EXPECT_EQ_MSG(test, __builtin_dynamic_object_size(p, 1),	\
117		expected,						\
118		"__alloc_size() not working with __bdos on " name "\n")
119#endif
120
121/* If the execpted size is a constant value, __bos can see it. */
122#define check_const(_expected, alloc, free)		do {		\
123	size_t expected = (_expected);					\
124	void *p = alloc;						\
125	KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n");	\
126	KUNIT_EXPECT_BOS(test, p, expected, #alloc);			\
127	KUNIT_EXPECT_BDOS(test, p, expected, #alloc);			\
128	free;								\
129} while (0)
130
131/* If the execpted size is NOT a constant value, __bos CANNOT see it. */
132#define check_dynamic(_expected, alloc, free)		do {		\
133	size_t expected = (_expected);					\
134	void *p = alloc;						\
135	KUNIT_EXPECT_TRUE_MSG(test, p != NULL, #alloc " failed?!\n");	\
136	KUNIT_EXPECT_BOS(test, p, SIZE_MAX, #alloc);			\
137	KUNIT_EXPECT_BDOS(test, p, expected, #alloc);			\
138	free;								\
139} while (0)
140
141/* Assortment of constant-value kinda-edge cases. */
142#define CONST_TEST_BODY(TEST_alloc)	do {				\
143	/* Special-case vmalloc()-family to skip 0-sized allocs. */	\
144	if (strcmp(#TEST_alloc, "TEST_vmalloc") != 0)			\
145		TEST_alloc(check_const, 0, 0);				\
146	TEST_alloc(check_const, 1, 1);					\
147	TEST_alloc(check_const, 128, 128);				\
148	TEST_alloc(check_const, 1023, 1023);				\
149	TEST_alloc(check_const, 1025, 1025);				\
150	TEST_alloc(check_const, 4096, 4096);				\
151	TEST_alloc(check_const, 4097, 4097);				\
152} while (0)
153
154static volatile size_t zero_size;
155static volatile size_t unknown_size = 50;
156
157#if !__has_builtin(__builtin_dynamic_object_size)
158#define DYNAMIC_TEST_BODY(TEST_alloc)					\
159	kunit_skip(test, "Compiler is missing __builtin_dynamic_object_size() support\n")
160#else
161#define DYNAMIC_TEST_BODY(TEST_alloc)	do {				\
162	size_t size = unknown_size;					\
163									\
164	/*								\
165	 * Expected size is "size" in each test, before it is then	\
166	 * internally incremented in each test.	Requires we disable	\
167	 * -Wunsequenced.						\
168	 */								\
169	TEST_alloc(check_dynamic, size, size++);			\
170	/* Make sure incrementing actually happened. */			\
171	KUNIT_EXPECT_NE(test, size, unknown_size);			\
172} while (0)
173#endif
174
175#define DEFINE_ALLOC_SIZE_TEST_PAIR(allocator)				\
176static void alloc_size_##allocator##_const_test(struct kunit *test)	\
177{									\
178	CONST_TEST_BODY(TEST_##allocator);				\
179}									\
180static void alloc_size_##allocator##_dynamic_test(struct kunit *test)	\
181{									\
182	DYNAMIC_TEST_BODY(TEST_##allocator);				\
183}
184
185#define TEST_kmalloc(checker, expected_size, alloc_size)	do {	\
186	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;				\
187	void *orig;							\
188	size_t len;							\
189									\
190	checker(expected_size, kmalloc(alloc_size, gfp),		\
191		kfree(p));						\
192	checker(expected_size,						\
193		kmalloc_node(alloc_size, gfp, NUMA_NO_NODE),		\
194		kfree(p));						\
195	checker(expected_size, kzalloc(alloc_size, gfp),		\
196		kfree(p));						\
197	checker(expected_size,						\
198		kzalloc_node(alloc_size, gfp, NUMA_NO_NODE),		\
199		kfree(p));						\
200	checker(expected_size, kcalloc(1, alloc_size, gfp),		\
201		kfree(p));						\
202	checker(expected_size, kcalloc(alloc_size, 1, gfp),		\
203		kfree(p));						\
204	checker(expected_size,						\
205		kcalloc_node(1, alloc_size, gfp, NUMA_NO_NODE),		\
206		kfree(p));						\
207	checker(expected_size,						\
208		kcalloc_node(alloc_size, 1, gfp, NUMA_NO_NODE),		\
209		kfree(p));						\
210	checker(expected_size, kmalloc_array(1, alloc_size, gfp),	\
211		kfree(p));						\
212	checker(expected_size, kmalloc_array(alloc_size, 1, gfp),	\
213		kfree(p));						\
214	checker(expected_size,						\
215		kmalloc_array_node(1, alloc_size, gfp, NUMA_NO_NODE),	\
216		kfree(p));						\
217	checker(expected_size,						\
218		kmalloc_array_node(alloc_size, 1, gfp, NUMA_NO_NODE),	\
219		kfree(p));						\
220	checker(expected_size, __kmalloc(alloc_size, gfp),		\
221		kfree(p));						\
222	checker(expected_size,						\
223		__kmalloc_node(alloc_size, gfp, NUMA_NO_NODE),		\
224		kfree(p));						\
225									\
226	orig = kmalloc(alloc_size, gfp);				\
227	KUNIT_EXPECT_TRUE(test, orig != NULL);				\
228	checker((expected_size) * 2,					\
229		krealloc(orig, (alloc_size) * 2, gfp),			\
230		kfree(p));						\
231	orig = kmalloc(alloc_size, gfp);				\
232	KUNIT_EXPECT_TRUE(test, orig != NULL);				\
233	checker((expected_size) * 2,					\
234		krealloc_array(orig, 1, (alloc_size) * 2, gfp),		\
235		kfree(p));						\
236	orig = kmalloc(alloc_size, gfp);				\
237	KUNIT_EXPECT_TRUE(test, orig != NULL);				\
238	checker((expected_size) * 2,					\
239		krealloc_array(orig, (alloc_size) * 2, 1, gfp),		\
240		kfree(p));						\
241									\
242	len = 11;							\
243	/* Using memdup() with fixed size, so force unknown length. */	\
244	if (!__builtin_constant_p(expected_size))			\
245		len += zero_size;					\
246	checker(len, kmemdup("hello there", len, gfp), kfree(p));	\
247} while (0)
248DEFINE_ALLOC_SIZE_TEST_PAIR(kmalloc)
249
250/* Sizes are in pages, not bytes. */
251#define TEST_vmalloc(checker, expected_pages, alloc_pages)	do {	\
252	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;				\
253	checker((expected_pages) * PAGE_SIZE,				\
254		vmalloc((alloc_pages) * PAGE_SIZE),	   vfree(p));	\
255	checker((expected_pages) * PAGE_SIZE,				\
256		vzalloc((alloc_pages) * PAGE_SIZE),	   vfree(p));	\
257	checker((expected_pages) * PAGE_SIZE,				\
258		__vmalloc((alloc_pages) * PAGE_SIZE, gfp), vfree(p));	\
259} while (0)
260DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc)
261
262/* Sizes are in pages (and open-coded for side-effects), not bytes. */
263#define TEST_kvmalloc(checker, expected_pages, alloc_pages)	do {	\
264	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;				\
265	size_t prev_size;						\
266	void *orig;							\
267									\
268	checker((expected_pages) * PAGE_SIZE,				\
269		kvmalloc((alloc_pages) * PAGE_SIZE, gfp),		\
270		vfree(p));						\
271	checker((expected_pages) * PAGE_SIZE,				\
272		kvmalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
273		vfree(p));						\
274	checker((expected_pages) * PAGE_SIZE,				\
275		kvzalloc((alloc_pages) * PAGE_SIZE, gfp),		\
276		vfree(p));						\
277	checker((expected_pages) * PAGE_SIZE,				\
278		kvzalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
279		vfree(p));						\
280	checker((expected_pages) * PAGE_SIZE,				\
281		kvcalloc(1, (alloc_pages) * PAGE_SIZE, gfp),		\
282		vfree(p));						\
283	checker((expected_pages) * PAGE_SIZE,				\
284		kvcalloc((alloc_pages) * PAGE_SIZE, 1, gfp),		\
285		vfree(p));						\
286	checker((expected_pages) * PAGE_SIZE,				\
287		kvmalloc_array(1, (alloc_pages) * PAGE_SIZE, gfp),	\
288		vfree(p));						\
289	checker((expected_pages) * PAGE_SIZE,				\
290		kvmalloc_array((alloc_pages) * PAGE_SIZE, 1, gfp),	\
291		vfree(p));						\
292									\
293	prev_size = (expected_pages) * PAGE_SIZE;			\
294	orig = kvmalloc(prev_size, gfp);				\
295	KUNIT_EXPECT_TRUE(test, orig != NULL);				\
296	checker(((expected_pages) * PAGE_SIZE) * 2,			\
297		kvrealloc(orig, prev_size,				\
298			  ((alloc_pages) * PAGE_SIZE) * 2, gfp),	\
299		kvfree(p));						\
300} while (0)
301DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
302
303#define TEST_devm_kmalloc(checker, expected_size, alloc_size)	do {	\
304	gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;				\
305	const char dev_name[] = "fortify-test";				\
306	struct device *dev;						\
307	void *orig;							\
308	size_t len;							\
309									\
310	/* Create dummy device for devm_kmalloc()-family tests. */	\
311	dev = kunit_device_register(test, dev_name);			\
312	KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev),			\
313			       "Cannot register test device\n");	\
314									\
315	checker(expected_size, devm_kmalloc(dev, alloc_size, gfp),	\
316		devm_kfree(dev, p));					\
317	checker(expected_size, devm_kzalloc(dev, alloc_size, gfp),	\
318		devm_kfree(dev, p));					\
319	checker(expected_size,						\
320		devm_kmalloc_array(dev, 1, alloc_size, gfp),		\
321		devm_kfree(dev, p));					\
322	checker(expected_size,						\
323		devm_kmalloc_array(dev, alloc_size, 1, gfp),		\
324		devm_kfree(dev, p));					\
325	checker(expected_size,						\
326		devm_kcalloc(dev, 1, alloc_size, gfp),			\
327		devm_kfree(dev, p));					\
328	checker(expected_size,						\
329		devm_kcalloc(dev, alloc_size, 1, gfp),			\
330		devm_kfree(dev, p));					\
331									\
332	orig = devm_kmalloc(dev, alloc_size, gfp);			\
333	KUNIT_EXPECT_TRUE(test, orig != NULL);				\
334	checker((expected_size) * 2,					\
335		devm_krealloc(dev, orig, (alloc_size) * 2, gfp),	\
336		devm_kfree(dev, p));					\
337									\
338	len = 4;							\
339	/* Using memdup() with fixed size, so force unknown length. */	\
340	if (!__builtin_constant_p(expected_size))			\
341		len += zero_size;					\
342	checker(len, devm_kmemdup(dev, "Ohai", len, gfp),		\
343		devm_kfree(dev, p));					\
344									\
345	kunit_device_unregister(test, dev);				\
346} while (0)
347DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc)
348
349/*
350 * We can't have an array at the end of a structure or else
351 * builds without -fstrict-flex-arrays=3 will report them as
352 * being an unknown length. Additionally, add bytes before
353 * and after the string to catch over/underflows if tests
354 * fail.
355 */
356struct fortify_padding {
357	unsigned long bytes_before;
358	char buf[32];
359	unsigned long bytes_after;
360};
361/* Force compiler into not being able to resolve size at compile-time. */
362static volatile int unconst;
363
364static void strlen_test(struct kunit *test)
365{
366	struct fortify_padding pad = { };
367	int i, end = sizeof(pad.buf) - 1;
368
369	/* Fill 31 bytes with valid characters. */
370	for (i = 0; i < sizeof(pad.buf) - 1; i++)
371		pad.buf[i] = i + '0';
372	/* Trailing bytes are still %NUL. */
373	KUNIT_EXPECT_EQ(test, pad.buf[end], '\0');
374	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
375
376	/* String is terminated, so strlen() is valid. */
377	KUNIT_EXPECT_EQ(test, strlen(pad.buf), end);
378	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
379
380	/* Make string unterminated, and recount. */
381	pad.buf[end] = 'A';
382	end = sizeof(pad.buf);
383	KUNIT_EXPECT_EQ(test, strlen(pad.buf), end);
384	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
385}
386
387static void strnlen_test(struct kunit *test)
388{
389	struct fortify_padding pad = { };
390	int i, end = sizeof(pad.buf) - 1;
391
392	/* Fill 31 bytes with valid characters. */
393	for (i = 0; i < sizeof(pad.buf) - 1; i++)
394		pad.buf[i] = i + '0';
395	/* Trailing bytes are still %NUL. */
396	KUNIT_EXPECT_EQ(test, pad.buf[end], '\0');
397	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
398
399	/* String is terminated, so strnlen() is valid. */
400	KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf)), end);
401	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
402	/* A truncated strnlen() will be safe, too. */
403	KUNIT_EXPECT_EQ(test, strnlen(pad.buf, sizeof(pad.buf) / 2),
404					sizeof(pad.buf) / 2);
405	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
406
407	/* Make string unterminated, and recount. */
408	pad.buf[end] = 'A';
409	end = sizeof(pad.buf);
410	/* Reading beyond with strncpy() will fail. */
411	KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 1), end);
412	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
413	KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end + 2), end);
414	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
415
416	/* Early-truncated is safe still, though. */
417	KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end);
418	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
419
420	end = sizeof(pad.buf) / 2;
421	KUNIT_EXPECT_EQ(test, strnlen(pad.buf, end), end);
422	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
423}
424
425static void strcpy_test(struct kunit *test)
426{
427	struct fortify_padding pad = { };
428	char src[sizeof(pad.buf) + 1] = { };
429	int i;
430
431	/* Fill 31 bytes with valid characters. */
432	for (i = 0; i < sizeof(src) - 2; i++)
433		src[i] = i + '0';
434
435	/* Destination is %NUL-filled to start with. */
436	KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
437	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
438	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
439	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
440	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
441
442	/* Legitimate strcpy() 1 less than of max size. */
443	KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
444				== pad.buf);
445	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
446	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
447	/* Only last byte should be %NUL */
448	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
449	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
450	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
451
452	src[sizeof(src) - 2] = 'A';
453	/* But now we trip the overflow checking. */
454	KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
455				== pad.buf);
456	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
457	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
458	/* Trailing %NUL -- thanks to FORTIFY. */
459	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
460	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
461	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
462	/* And we will not have gone beyond. */
463	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
464
465	src[sizeof(src) - 1] = 'A';
466	/* And for sure now, two bytes past. */
467	KUNIT_ASSERT_TRUE(test, strcpy(pad.buf, src)
468				== pad.buf);
469	/*
470	 * Which trips both the strlen() on the unterminated src,
471	 * and the resulting copy attempt.
472	 */
473	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
474	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
475	/* Trailing %NUL -- thanks to FORTIFY. */
476	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
477	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
478	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
479	/* And we will not have gone beyond. */
480	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
481}
482
483static void strncpy_test(struct kunit *test)
484{
485	struct fortify_padding pad = { };
486	char src[] = "Copy me fully into a small buffer and I will overflow!";
487
488	/* Destination is %NUL-filled to start with. */
489	KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
490	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
491	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
492	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
493	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
494
495	/* Legitimate strncpy() 1 less than of max size. */
496	KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
497					sizeof(pad.buf) + unconst - 1)
498				== pad.buf);
499	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
500	/* Only last byte should be %NUL */
501	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
502	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
503	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
504
505	/* Legitimate (though unterminated) max-size strncpy. */
506	KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
507					sizeof(pad.buf) + unconst)
508				== pad.buf);
509	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
510	/* No trailing %NUL -- thanks strncpy API. */
511	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
512	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
513	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
514	/* But we will not have gone beyond. */
515	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
516
517	/* Now verify that FORTIFY is working... */
518	KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
519					sizeof(pad.buf) + unconst + 1)
520				== pad.buf);
521	/* Should catch the overflow. */
522	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
523	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
524	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
525	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
526	/* And we will not have gone beyond. */
527	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
528
529	/* And further... */
530	KUNIT_ASSERT_TRUE(test, strncpy(pad.buf, src,
531					sizeof(pad.buf) + unconst + 2)
532				== pad.buf);
533	/* Should catch the overflow. */
534	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
535	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
536	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
537	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
538	/* And we will not have gone beyond. */
539	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
540}
541
542static void strscpy_test(struct kunit *test)
543{
544	struct fortify_padding pad = { };
545	char src[] = "Copy me fully into a small buffer and I will overflow!";
546
547	/* Destination is %NUL-filled to start with. */
548	KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
549	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
550	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
551	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
552	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
553
554	/* Legitimate strscpy() 1 less than of max size. */
555	KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
556				      sizeof(pad.buf) + unconst - 1),
557			-E2BIG);
558	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
559	/* Keeping space for %NUL, last two bytes should be %NUL */
560	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
561	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
562	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
563
564	/* Legitimate max-size strscpy. */
565	KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
566				      sizeof(pad.buf) + unconst),
567			-E2BIG);
568	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
569	/* A trailing %NUL will exist. */
570	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
571	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
572	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
573
574	/* Now verify that FORTIFY is working... */
575	KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
576				      sizeof(pad.buf) + unconst + 1),
577			-E2BIG);
578	/* Should catch the overflow. */
579	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
580	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
581	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
582	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
583	/* And we will not have gone beyond. */
584	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
585
586	/* And much further... */
587	KUNIT_ASSERT_EQ(test, strscpy(pad.buf, src,
588				      sizeof(src) * 2 + unconst),
589			-E2BIG);
590	/* Should catch the overflow. */
591	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
592	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
593	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
594	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
595	/* And we will not have gone beyond. */
596	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
597}
598
599static void strcat_test(struct kunit *test)
600{
601	struct fortify_padding pad = { };
602	char src[sizeof(pad.buf) / 2] = { };
603	char one[] = "A";
604	char two[] = "BC";
605	int i;
606
607	/* Fill 15 bytes with valid characters. */
608	for (i = 0; i < sizeof(src) - 1; i++)
609		src[i] = i + 'A';
610
611	/* Destination is %NUL-filled to start with. */
612	KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
613	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
614	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
615	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
616	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
617
618	/* Legitimate strcat() using less than half max size. */
619	KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf);
620	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
621	/* Legitimate strcat() now 2 bytes shy of end. */
622	KUNIT_ASSERT_TRUE(test, strcat(pad.buf, src) == pad.buf);
623	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
624	/* Last two bytes should be %NUL */
625	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
626	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
627	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
628
629	/* Add one more character to the end. */
630	KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf);
631	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
632	/* Last byte should be %NUL */
633	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
634	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
635	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
636
637	/* And this one char will overflow. */
638	KUNIT_ASSERT_TRUE(test, strcat(pad.buf, one) == pad.buf);
639	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
640	/* Last byte should be %NUL thanks to FORTIFY. */
641	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
642	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
643	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
644	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
645
646	/* And adding two will overflow more. */
647	KUNIT_ASSERT_TRUE(test, strcat(pad.buf, two) == pad.buf);
648	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
649	/* Last byte should be %NUL thanks to FORTIFY. */
650	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
651	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
652	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
653	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
654}
655
656static void strncat_test(struct kunit *test)
657{
658	struct fortify_padding pad = { };
659	char src[sizeof(pad.buf)] = { };
660	int i, partial;
661
662	/* Fill 31 bytes with valid characters. */
663	partial = sizeof(src) / 2 - 1;
664	for (i = 0; i < partial; i++)
665		src[i] = i + 'A';
666
667	/* Destination is %NUL-filled to start with. */
668	KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
669	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
670	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
671	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
672	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
673
674	/* Legitimate strncat() using less than half max size. */
675	KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf);
676	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
677	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
678	/* Legitimate strncat() now 2 bytes shy of end. */
679	KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, partial) == pad.buf);
680	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
681	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
682	/* Last two bytes should be %NUL */
683	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
684	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
685	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
686
687	/* Add one more character to the end. */
688	KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
689	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
690	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
691	/* Last byte should be %NUL */
692	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
693	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
694	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
695
696	/* And this one char will overflow. */
697	KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
698	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
699	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
700	/* Last byte should be %NUL thanks to FORTIFY. */
701	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
702	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
703	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
704	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
705
706	/* And adding two will overflow more. */
707	KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 2) == pad.buf);
708	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
709	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
710	/* Last byte should be %NUL thanks to FORTIFY. */
711	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
712	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
713	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
714	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
715
716	/* Force an unterminated destination, and overflow. */
717	pad.buf[sizeof(pad.buf) - 1] = 'A';
718	KUNIT_ASSERT_TRUE(test, strncat(pad.buf, src, 1) == pad.buf);
719	/* This will have tripped both strlen() and strcat(). */
720	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
721	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3);
722	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
723	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
724	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
725	/* But we should not go beyond the end. */
726	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
727}
728
729static void strlcat_test(struct kunit *test)
730{
731	struct fortify_padding pad = { };
732	char src[sizeof(pad.buf)] = { };
733	int i, partial;
734	int len = sizeof(pad.buf) + unconst;
735
736	/* Fill 15 bytes with valid characters. */
737	partial = sizeof(src) / 2 - 1;
738	for (i = 0; i < partial; i++)
739		src[i] = i + 'A';
740
741	/* Destination is %NUL-filled to start with. */
742	KUNIT_EXPECT_EQ(test, pad.bytes_before, 0);
743	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
744	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
745	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 3], '\0');
746	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
747
748	/* Legitimate strlcat() using less than half max size. */
749	KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial);
750	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
751	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
752	/* Legitimate strlcat() now 2 bytes shy of end. */
753	KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len), partial * 2);
754	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
755	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
756	/* Last two bytes should be %NUL */
757	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
758	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 2], '\0');
759	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
760
761	/* Add one more character to the end. */
762	KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "Q", len), partial * 2 + 1);
763	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
764	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0);
765	/* Last byte should be %NUL */
766	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
767	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
768	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
769
770	/* And this one char will overflow. */
771	KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "V", len * 2), len);
772	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
773	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1);
774	/* Last byte should be %NUL thanks to FORTIFY. */
775	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
776	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
777	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
778	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
779
780	/* And adding two will overflow more. */
781	KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "QQ", len * 2), len + 1);
782	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
783	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
784	/* Last byte should be %NUL thanks to FORTIFY. */
785	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
786	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
787	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
788	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
789
790	/* Force an unterminated destination, and overflow. */
791	pad.buf[sizeof(pad.buf) - 1] = 'A';
792	KUNIT_ASSERT_EQ(test, strlcat(pad.buf, "TT", len * 2), len + 2);
793	/* This will have tripped both strlen() and strlcat(). */
794	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
795	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2);
796	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 1], '\0');
797	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 2], '\0');
798	KUNIT_EXPECT_NE(test, pad.buf[sizeof(pad.buf) - 3], '\0');
799	/* But we should not go beyond the end. */
800	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
801
802	/* Force an unterminated source, and overflow. */
803	memset(src, 'B', sizeof(src));
804	pad.buf[sizeof(pad.buf) - 1] = '\0';
805	KUNIT_ASSERT_EQ(test, strlcat(pad.buf, src, len * 3), len - 1 + sizeof(src));
806	/* This will have tripped both strlen() and strlcat(). */
807	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
808	KUNIT_EXPECT_EQ(test, fortify_write_overflows, 3);
809	KUNIT_EXPECT_EQ(test, pad.buf[sizeof(pad.buf) - 1], '\0');
810	/* But we should not go beyond the end. */
811	KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
812}
813
814static void memscan_test(struct kunit *test)
815{
816	char haystack[] = "Where oh where is my memory range?";
817	char *mem = haystack + strlen("Where oh where is ");
818	char needle = 'm';
819	size_t len = sizeof(haystack) + unconst;
820
821	KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len),
822				  mem);
823	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
824	/* Catch too-large range. */
825	KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len + 1),
826				  NULL);
827	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
828	KUNIT_ASSERT_PTR_EQ(test, memscan(haystack, needle, len * 2),
829				  NULL);
830	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
831}
832
833static void memchr_test(struct kunit *test)
834{
835	char haystack[] = "Where oh where is my memory range?";
836	char *mem = haystack + strlen("Where oh where is ");
837	char needle = 'm';
838	size_t len = sizeof(haystack) + unconst;
839
840	KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len),
841				  mem);
842	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
843	/* Catch too-large range. */
844	KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len + 1),
845				  NULL);
846	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
847	KUNIT_ASSERT_PTR_EQ(test, memchr(haystack, needle, len * 2),
848				  NULL);
849	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
850}
851
852static void memchr_inv_test(struct kunit *test)
853{
854	char haystack[] = "Where oh where is my memory range?";
855	char *mem = haystack + 1;
856	char needle = 'W';
857	size_t len = sizeof(haystack) + unconst;
858
859	/* Normal search is okay. */
860	KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len),
861				  mem);
862	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
863	/* Catch too-large range. */
864	KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len + 1),
865				  NULL);
866	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
867	KUNIT_ASSERT_PTR_EQ(test, memchr_inv(haystack, needle, len * 2),
868				  NULL);
869	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
870}
871
872static void memcmp_test(struct kunit *test)
873{
874	char one[] = "My mind is going ...";
875	char two[] = "My mind is going ... I can feel it.";
876	size_t one_len = sizeof(one) + unconst - 1;
877	size_t two_len = sizeof(two) + unconst - 1;
878
879	/* We match the first string (ignoring the %NUL). */
880	KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len), 0);
881	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
882	/* Still in bounds, but no longer matching. */
883	KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len + 1), -32);
884	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
885
886	/* Catch too-large ranges. */
887	KUNIT_ASSERT_EQ(test, memcmp(one, two, one_len + 2), INT_MIN);
888	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
889
890	KUNIT_ASSERT_EQ(test, memcmp(two, one, two_len + 2), INT_MIN);
891	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
892}
893
894static void kmemdup_test(struct kunit *test)
895{
896	char src[] = "I got Doom running on it!";
897	char *copy;
898	size_t len = sizeof(src) + unconst;
899
900	/* Copy is within bounds. */
901	copy = kmemdup(src, len, GFP_KERNEL);
902	KUNIT_EXPECT_NOT_NULL(test, copy);
903	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
904	kfree(copy);
905
906	/* Without %NUL. */
907	copy = kmemdup(src, len - 1, GFP_KERNEL);
908	KUNIT_EXPECT_NOT_NULL(test, copy);
909	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
910	kfree(copy);
911
912	/* Tiny bounds. */
913	copy = kmemdup(src, 1, GFP_KERNEL);
914	KUNIT_EXPECT_NOT_NULL(test, copy);
915	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0);
916	kfree(copy);
917
918	/* Out of bounds by 1 byte. */
919	copy = kmemdup(src, len + 1, GFP_KERNEL);
920	KUNIT_EXPECT_NULL(test, copy);
921	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
922	kfree(copy);
923
924	/* Way out of bounds. */
925	copy = kmemdup(src, len * 2, GFP_KERNEL);
926	KUNIT_EXPECT_NULL(test, copy);
927	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
928	kfree(copy);
929
930	/* Starting offset causing out of bounds. */
931	copy = kmemdup(src + 1, len, GFP_KERNEL);
932	KUNIT_EXPECT_NULL(test, copy);
933	KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
934	kfree(copy);
935}
936
937static int fortify_test_init(struct kunit *test)
938{
939	if (!IS_ENABLED(CONFIG_FORTIFY_SOURCE))
940		kunit_skip(test, "Not built with CONFIG_FORTIFY_SOURCE=y");
941
942	fortify_read_overflows = 0;
943	kunit_add_named_resource(test, NULL, NULL, &read_resource,
944				 "fortify_read_overflows",
945				 &fortify_read_overflows);
946	fortify_write_overflows = 0;
947	kunit_add_named_resource(test, NULL, NULL, &write_resource,
948				 "fortify_write_overflows",
949				 &fortify_write_overflows);
950	return 0;
951}
952
953static struct kunit_case fortify_test_cases[] = {
954	KUNIT_CASE(known_sizes_test),
955	KUNIT_CASE(control_flow_split_test),
956	KUNIT_CASE(alloc_size_kmalloc_const_test),
957	KUNIT_CASE(alloc_size_kmalloc_dynamic_test),
958	KUNIT_CASE(alloc_size_vmalloc_const_test),
959	KUNIT_CASE(alloc_size_vmalloc_dynamic_test),
960	KUNIT_CASE(alloc_size_kvmalloc_const_test),
961	KUNIT_CASE(alloc_size_kvmalloc_dynamic_test),
962	KUNIT_CASE(alloc_size_devm_kmalloc_const_test),
963	KUNIT_CASE(alloc_size_devm_kmalloc_dynamic_test),
964	KUNIT_CASE(strlen_test),
965	KUNIT_CASE(strnlen_test),
966	KUNIT_CASE(strcpy_test),
967	KUNIT_CASE(strncpy_test),
968	KUNIT_CASE(strscpy_test),
969	KUNIT_CASE(strcat_test),
970	KUNIT_CASE(strncat_test),
971	KUNIT_CASE(strlcat_test),
972	/* skip memset: performs bounds checking on whole structs */
973	/* skip memcpy: still using warn-and-overwrite instead of hard-fail */
974	KUNIT_CASE(memscan_test),
975	KUNIT_CASE(memchr_test),
976	KUNIT_CASE(memchr_inv_test),
977	KUNIT_CASE(memcmp_test),
978	KUNIT_CASE(kmemdup_test),
979	{}
980};
981
982static struct kunit_suite fortify_test_suite = {
983	.name = "fortify",
984	.init = fortify_test_init,
985	.test_cases = fortify_test_cases,
986};
987
988kunit_test_suite(fortify_test_suite);
989
990MODULE_LICENSE("GPL");
991