1// SPDX-License-Identifier: GPL-2.0
2/*
3 * This is for all the tests relating directly to heap memory, including
4 * page allocation and slab allocations.
5 */
6#include "lkdtm.h"
7#include <linux/kfence.h>
8#include <linux/slab.h>
9#include <linux/vmalloc.h>
10#include <linux/sched.h>
11
12static struct kmem_cache *double_free_cache;
13static struct kmem_cache *a_cache;
14static struct kmem_cache *b_cache;
15
16/*
17 * Using volatile here means the compiler cannot ever make assumptions
18 * about this value. This means compile-time length checks involving
19 * this variable cannot be performed; only run-time checks.
20 */
21static volatile int __offset = 1;
22
23/*
24 * If there aren't guard pages, it's likely that a consecutive allocation will
25 * let us overflow into the second allocation without overwriting something real.
26 *
27 * This should always be caught because there is an unconditional unmapped
28 * page after vmap allocations.
29 */
30static void lkdtm_VMALLOC_LINEAR_OVERFLOW(void)
31{
32	char *one, *two;
33
34	one = vzalloc(PAGE_SIZE);
35	OPTIMIZER_HIDE_VAR(one);
36	two = vzalloc(PAGE_SIZE);
37
38	pr_info("Attempting vmalloc linear overflow ...\n");
39	memset(one, 0xAA, PAGE_SIZE + __offset);
40
41	vfree(two);
42	vfree(one);
43}
44
45/*
46 * This tries to stay within the next largest power-of-2 kmalloc cache
47 * to avoid actually overwriting anything important if it's not detected
48 * correctly.
49 *
50 * This should get caught by either memory tagging, KASan, or by using
51 * CONFIG_SLUB_DEBUG=y and slab_debug=ZF (or CONFIG_SLUB_DEBUG_ON=y).
52 */
53static void lkdtm_SLAB_LINEAR_OVERFLOW(void)
54{
55	size_t len = 1020;
56	u32 *data = kmalloc(len, GFP_KERNEL);
57	if (!data)
58		return;
59
60	pr_info("Attempting slab linear overflow ...\n");
61	OPTIMIZER_HIDE_VAR(data);
62	data[1024 / sizeof(u32)] = 0x12345678;
63	kfree(data);
64}
65
66static void lkdtm_WRITE_AFTER_FREE(void)
67{
68	int *base, *again;
69	size_t len = 1024;
70	/*
71	 * The slub allocator uses the first word to store the free
72	 * pointer in some configurations. Use the middle of the
73	 * allocation to avoid running into the freelist
74	 */
75	size_t offset = (len / sizeof(*base)) / 2;
76
77	base = kmalloc(len, GFP_KERNEL);
78	if (!base)
79		return;
80	pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
81	pr_info("Attempting bad write to freed memory at %p\n",
82		&base[offset]);
83	kfree(base);
84	base[offset] = 0x0abcdef0;
85	/* Attempt to notice the overwrite. */
86	again = kmalloc(len, GFP_KERNEL);
87	kfree(again);
88	if (again != base)
89		pr_info("Hmm, didn't get the same memory range.\n");
90}
91
92static void lkdtm_READ_AFTER_FREE(void)
93{
94	int *base, *val, saw;
95	size_t len = 1024;
96	/*
97	 * The slub allocator will use the either the first word or
98	 * the middle of the allocation to store the free pointer,
99	 * depending on configurations. Store in the second word to
100	 * avoid running into the freelist.
101	 */
102	size_t offset = sizeof(*base);
103
104	base = kmalloc(len, GFP_KERNEL);
105	if (!base) {
106		pr_info("Unable to allocate base memory.\n");
107		return;
108	}
109
110	val = kmalloc(len, GFP_KERNEL);
111	if (!val) {
112		pr_info("Unable to allocate val memory.\n");
113		kfree(base);
114		return;
115	}
116
117	*val = 0x12345678;
118	base[offset] = *val;
119	pr_info("Value in memory before free: %x\n", base[offset]);
120
121	kfree(base);
122
123	pr_info("Attempting bad read from freed memory\n");
124	saw = base[offset];
125	if (saw != *val) {
126		/* Good! Poisoning happened, so declare a win. */
127		pr_info("Memory correctly poisoned (%x)\n", saw);
128	} else {
129		pr_err("FAIL: Memory was not poisoned!\n");
130		pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
131	}
132
133	kfree(val);
134}
135
136static void lkdtm_KFENCE_READ_AFTER_FREE(void)
137{
138	int *base, val, saw;
139	unsigned long timeout, resched_after;
140	size_t len = 1024;
141	/*
142	 * The slub allocator will use the either the first word or
143	 * the middle of the allocation to store the free pointer,
144	 * depending on configurations. Store in the second word to
145	 * avoid running into the freelist.
146	 */
147	size_t offset = sizeof(*base);
148
149	/*
150	 * 100x the sample interval should be more than enough to ensure we get
151	 * a KFENCE allocation eventually.
152	 */
153	timeout = jiffies + msecs_to_jiffies(100 * kfence_sample_interval);
154	/*
155	 * Especially for non-preemption kernels, ensure the allocation-gate
156	 * timer can catch up: after @resched_after, every failed allocation
157	 * attempt yields, to ensure the allocation-gate timer is scheduled.
158	 */
159	resched_after = jiffies + msecs_to_jiffies(kfence_sample_interval);
160	do {
161		base = kmalloc(len, GFP_KERNEL);
162		if (!base) {
163			pr_err("FAIL: Unable to allocate kfence memory!\n");
164			return;
165		}
166
167		if (is_kfence_address(base)) {
168			val = 0x12345678;
169			base[offset] = val;
170			pr_info("Value in memory before free: %x\n", base[offset]);
171
172			kfree(base);
173
174			pr_info("Attempting bad read from freed memory\n");
175			saw = base[offset];
176			if (saw != val) {
177				/* Good! Poisoning happened, so declare a win. */
178				pr_info("Memory correctly poisoned (%x)\n", saw);
179			} else {
180				pr_err("FAIL: Memory was not poisoned!\n");
181				pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
182			}
183			return;
184		}
185
186		kfree(base);
187		if (time_after(jiffies, resched_after))
188			cond_resched();
189	} while (time_before(jiffies, timeout));
190
191	pr_err("FAIL: kfence memory never allocated!\n");
192}
193
194static void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
195{
196	unsigned long p = __get_free_page(GFP_KERNEL);
197	if (!p) {
198		pr_info("Unable to allocate free page\n");
199		return;
200	}
201
202	pr_info("Writing to the buddy page before free\n");
203	memset((void *)p, 0x3, PAGE_SIZE);
204	free_page(p);
205	schedule();
206	pr_info("Attempting bad write to the buddy page after free\n");
207	memset((void *)p, 0x78, PAGE_SIZE);
208	/* Attempt to notice the overwrite. */
209	p = __get_free_page(GFP_KERNEL);
210	free_page(p);
211	schedule();
212}
213
214static void lkdtm_READ_BUDDY_AFTER_FREE(void)
215{
216	unsigned long p = __get_free_page(GFP_KERNEL);
217	int saw, *val;
218	int *base;
219
220	if (!p) {
221		pr_info("Unable to allocate free page\n");
222		return;
223	}
224
225	val = kmalloc(1024, GFP_KERNEL);
226	if (!val) {
227		pr_info("Unable to allocate val memory.\n");
228		free_page(p);
229		return;
230	}
231
232	base = (int *)p;
233
234	*val = 0x12345678;
235	base[0] = *val;
236	pr_info("Value in memory before free: %x\n", base[0]);
237	free_page(p);
238	pr_info("Attempting to read from freed memory\n");
239	saw = base[0];
240	if (saw != *val) {
241		/* Good! Poisoning happened, so declare a win. */
242		pr_info("Memory correctly poisoned (%x)\n", saw);
243	} else {
244		pr_err("FAIL: Buddy page was not poisoned!\n");
245		pr_expected_config_param(CONFIG_INIT_ON_FREE_DEFAULT_ON, "init_on_free");
246	}
247
248	kfree(val);
249}
250
251static void lkdtm_SLAB_INIT_ON_ALLOC(void)
252{
253	u8 *first;
254	u8 *val;
255
256	first = kmalloc(512, GFP_KERNEL);
257	if (!first) {
258		pr_info("Unable to allocate 512 bytes the first time.\n");
259		return;
260	}
261
262	memset(first, 0xAB, 512);
263	kfree(first);
264
265	val = kmalloc(512, GFP_KERNEL);
266	if (!val) {
267		pr_info("Unable to allocate 512 bytes the second time.\n");
268		return;
269	}
270	if (val != first) {
271		pr_warn("Reallocation missed clobbered memory.\n");
272	}
273
274	if (memchr(val, 0xAB, 512) == NULL) {
275		pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
276	} else {
277		pr_err("FAIL: Slab was not initialized\n");
278		pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
279	}
280	kfree(val);
281}
282
283static void lkdtm_BUDDY_INIT_ON_ALLOC(void)
284{
285	u8 *first;
286	u8 *val;
287
288	first = (u8 *)__get_free_page(GFP_KERNEL);
289	if (!first) {
290		pr_info("Unable to allocate first free page\n");
291		return;
292	}
293
294	memset(first, 0xAB, PAGE_SIZE);
295	free_page((unsigned long)first);
296
297	val = (u8 *)__get_free_page(GFP_KERNEL);
298	if (!val) {
299		pr_info("Unable to allocate second free page\n");
300		return;
301	}
302
303	if (val != first) {
304		pr_warn("Reallocation missed clobbered memory.\n");
305	}
306
307	if (memchr(val, 0xAB, PAGE_SIZE) == NULL) {
308		pr_info("Memory appears initialized (%x, no earlier values)\n", *val);
309	} else {
310		pr_err("FAIL: Slab was not initialized\n");
311		pr_expected_config_param(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, "init_on_alloc");
312	}
313	free_page((unsigned long)val);
314}
315
316static void lkdtm_SLAB_FREE_DOUBLE(void)
317{
318	int *val;
319
320	val = kmem_cache_alloc(double_free_cache, GFP_KERNEL);
321	if (!val) {
322		pr_info("Unable to allocate double_free_cache memory.\n");
323		return;
324	}
325
326	/* Just make sure we got real memory. */
327	*val = 0x12345678;
328	pr_info("Attempting double slab free ...\n");
329	kmem_cache_free(double_free_cache, val);
330	kmem_cache_free(double_free_cache, val);
331}
332
333static void lkdtm_SLAB_FREE_CROSS(void)
334{
335	int *val;
336
337	val = kmem_cache_alloc(a_cache, GFP_KERNEL);
338	if (!val) {
339		pr_info("Unable to allocate a_cache memory.\n");
340		return;
341	}
342
343	/* Just make sure we got real memory. */
344	*val = 0x12345679;
345	pr_info("Attempting cross-cache slab free ...\n");
346	kmem_cache_free(b_cache, val);
347}
348
349static void lkdtm_SLAB_FREE_PAGE(void)
350{
351	unsigned long p = __get_free_page(GFP_KERNEL);
352
353	pr_info("Attempting non-Slab slab free ...\n");
354	kmem_cache_free(NULL, (void *)p);
355	free_page(p);
356}
357
358/*
359 * We have constructors to keep the caches distinctly separated without
360 * needing to boot with "slab_nomerge".
361 */
362static void ctor_double_free(void *region)
363{ }
364static void ctor_a(void *region)
365{ }
366static void ctor_b(void *region)
367{ }
368
369void __init lkdtm_heap_init(void)
370{
371	double_free_cache = kmem_cache_create("lkdtm-heap-double_free",
372					      64, 0, 0, ctor_double_free);
373	a_cache = kmem_cache_create("lkdtm-heap-a", 64, 0, 0, ctor_a);
374	b_cache = kmem_cache_create("lkdtm-heap-b", 64, 0, 0, ctor_b);
375}
376
377void __exit lkdtm_heap_exit(void)
378{
379	kmem_cache_destroy(double_free_cache);
380	kmem_cache_destroy(a_cache);
381	kmem_cache_destroy(b_cache);
382}
383
384static struct crashtype crashtypes[] = {
385	CRASHTYPE(SLAB_LINEAR_OVERFLOW),
386	CRASHTYPE(VMALLOC_LINEAR_OVERFLOW),
387	CRASHTYPE(WRITE_AFTER_FREE),
388	CRASHTYPE(READ_AFTER_FREE),
389	CRASHTYPE(KFENCE_READ_AFTER_FREE),
390	CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
391	CRASHTYPE(READ_BUDDY_AFTER_FREE),
392	CRASHTYPE(SLAB_INIT_ON_ALLOC),
393	CRASHTYPE(BUDDY_INIT_ON_ALLOC),
394	CRASHTYPE(SLAB_FREE_DOUBLE),
395	CRASHTYPE(SLAB_FREE_CROSS),
396	CRASHTYPE(SLAB_FREE_PAGE),
397};
398
399struct crashtype_category heap_crashtypes = {
400	.crashtypes = crashtypes,
401	.len	    = ARRAY_SIZE(crashtypes),
402};
403