1// SPDX-License-Identifier: GPL-2.0
2/*
3 * This file contains core generic KASAN code.
4 *
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 *
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 *        Andrey Konovalov <andreyknvl@gmail.com>
10 */
11
12#include <linux/export.h>
13#include <linux/interrupt.h>
14#include <linux/init.h>
15#include <linux/kasan.h>
16#include <linux/kernel.h>
17#include <linux/kfence.h>
18#include <linux/kmemleak.h>
19#include <linux/linkage.h>
20#include <linux/memblock.h>
21#include <linux/memory.h>
22#include <linux/mm.h>
23#include <linux/module.h>
24#include <linux/printk.h>
25#include <linux/sched.h>
26#include <linux/sched/task_stack.h>
27#include <linux/slab.h>
28#include <linux/spinlock.h>
29#include <linux/stackdepot.h>
30#include <linux/stacktrace.h>
31#include <linux/string.h>
32#include <linux/types.h>
33#include <linux/vmalloc.h>
34#include <linux/bug.h>
35
36#include "kasan.h"
37#include "../slab.h"
38
39/*
40 * All functions below always inlined so compiler could
41 * perform better optimizations in each of __asan_loadX/__assn_storeX
42 * depending on memory access size X.
43 */
44
45static __always_inline bool memory_is_poisoned_1(const void *addr)
46{
47	s8 shadow_value = *(s8 *)kasan_mem_to_shadow(addr);
48
49	if (unlikely(shadow_value)) {
50		s8 last_accessible_byte = (unsigned long)addr & KASAN_GRANULE_MASK;
51		return unlikely(last_accessible_byte >= shadow_value);
52	}
53
54	return false;
55}
56
57static __always_inline bool memory_is_poisoned_2_4_8(const void *addr,
58						unsigned long size)
59{
60	u8 *shadow_addr = (u8 *)kasan_mem_to_shadow(addr);
61
62	/*
63	 * Access crosses 8(shadow size)-byte boundary. Such access maps
64	 * into 2 shadow bytes, so we need to check them both.
65	 */
66	if (unlikely((((unsigned long)addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
67		return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
68
69	return memory_is_poisoned_1(addr + size - 1);
70}
71
72static __always_inline bool memory_is_poisoned_16(const void *addr)
73{
74	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow(addr);
75
76	/* Unaligned 16-bytes access maps into 3 shadow bytes. */
77	if (unlikely(!IS_ALIGNED((unsigned long)addr, KASAN_GRANULE_SIZE)))
78		return *shadow_addr || memory_is_poisoned_1(addr + 15);
79
80	return *shadow_addr;
81}
82
83static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
84					size_t size)
85{
86	while (size) {
87		if (unlikely(*start))
88			return (unsigned long)start;
89		start++;
90		size--;
91	}
92
93	return 0;
94}
95
96static __always_inline unsigned long memory_is_nonzero(const void *start,
97						const void *end)
98{
99	unsigned int words;
100	unsigned long ret;
101	unsigned int prefix = (unsigned long)start % 8;
102
103	if (end - start <= 16)
104		return bytes_is_nonzero(start, end - start);
105
106	if (prefix) {
107		prefix = 8 - prefix;
108		ret = bytes_is_nonzero(start, prefix);
109		if (unlikely(ret))
110			return ret;
111		start += prefix;
112	}
113
114	words = (end - start) / 8;
115	while (words) {
116		if (unlikely(*(u64 *)start))
117			return bytes_is_nonzero(start, 8);
118		start += 8;
119		words--;
120	}
121
122	return bytes_is_nonzero(start, (end - start) % 8);
123}
124
125static __always_inline bool memory_is_poisoned_n(const void *addr, size_t size)
126{
127	unsigned long ret;
128
129	ret = memory_is_nonzero(kasan_mem_to_shadow(addr),
130			kasan_mem_to_shadow(addr + size - 1) + 1);
131
132	if (unlikely(ret)) {
133		const void *last_byte = addr + size - 1;
134		s8 *last_shadow = (s8 *)kasan_mem_to_shadow(last_byte);
135		s8 last_accessible_byte = (unsigned long)last_byte & KASAN_GRANULE_MASK;
136
137		if (unlikely(ret != (unsigned long)last_shadow ||
138			     last_accessible_byte >= *last_shadow))
139			return true;
140	}
141	return false;
142}
143
144static __always_inline bool memory_is_poisoned(const void *addr, size_t size)
145{
146	if (__builtin_constant_p(size)) {
147		switch (size) {
148		case 1:
149			return memory_is_poisoned_1(addr);
150		case 2:
151		case 4:
152		case 8:
153			return memory_is_poisoned_2_4_8(addr, size);
154		case 16:
155			return memory_is_poisoned_16(addr);
156		default:
157			BUILD_BUG();
158		}
159	}
160
161	return memory_is_poisoned_n(addr, size);
162}
163
164static __always_inline bool check_region_inline(const void *addr,
165						size_t size, bool write,
166						unsigned long ret_ip)
167{
168	if (!kasan_arch_is_ready())
169		return true;
170
171	if (unlikely(size == 0))
172		return true;
173
174	if (unlikely(addr + size < addr))
175		return !kasan_report(addr, size, write, ret_ip);
176
177	if (unlikely(!addr_has_metadata(addr)))
178		return !kasan_report(addr, size, write, ret_ip);
179
180	if (likely(!memory_is_poisoned(addr, size)))
181		return true;
182
183	return !kasan_report(addr, size, write, ret_ip);
184}
185
186bool kasan_check_range(const void *addr, size_t size, bool write,
187					unsigned long ret_ip)
188{
189	return check_region_inline(addr, size, write, ret_ip);
190}
191
192bool kasan_byte_accessible(const void *addr)
193{
194	s8 shadow_byte;
195
196	if (!kasan_arch_is_ready())
197		return true;
198
199	shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(addr));
200
201	return shadow_byte >= 0 && shadow_byte < KASAN_GRANULE_SIZE;
202}
203
204void kasan_cache_shrink(struct kmem_cache *cache)
205{
206	kasan_quarantine_remove_cache(cache);
207}
208
209void kasan_cache_shutdown(struct kmem_cache *cache)
210{
211	if (!__kmem_cache_empty(cache))
212		kasan_quarantine_remove_cache(cache);
213}
214
215static void register_global(struct kasan_global *global)
216{
217	size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE);
218
219	kasan_unpoison(global->beg, global->size, false);
220
221	kasan_poison(global->beg + aligned_size,
222		     global->size_with_redzone - aligned_size,
223		     KASAN_GLOBAL_REDZONE, false);
224}
225
226void __asan_register_globals(void *ptr, ssize_t size)
227{
228	int i;
229	struct kasan_global *globals = ptr;
230
231	for (i = 0; i < size; i++)
232		register_global(&globals[i]);
233}
234EXPORT_SYMBOL(__asan_register_globals);
235
236void __asan_unregister_globals(void *ptr, ssize_t size)
237{
238}
239EXPORT_SYMBOL(__asan_unregister_globals);
240
241#define DEFINE_ASAN_LOAD_STORE(size)					\
242	void __asan_load##size(void *addr)				\
243	{								\
244		check_region_inline(addr, size, false, _RET_IP_);	\
245	}								\
246	EXPORT_SYMBOL(__asan_load##size);				\
247	__alias(__asan_load##size)					\
248	void __asan_load##size##_noabort(void *);			\
249	EXPORT_SYMBOL(__asan_load##size##_noabort);			\
250	void __asan_store##size(void *addr)				\
251	{								\
252		check_region_inline(addr, size, true, _RET_IP_);	\
253	}								\
254	EXPORT_SYMBOL(__asan_store##size);				\
255	__alias(__asan_store##size)					\
256	void __asan_store##size##_noabort(void *);			\
257	EXPORT_SYMBOL(__asan_store##size##_noabort)
258
259DEFINE_ASAN_LOAD_STORE(1);
260DEFINE_ASAN_LOAD_STORE(2);
261DEFINE_ASAN_LOAD_STORE(4);
262DEFINE_ASAN_LOAD_STORE(8);
263DEFINE_ASAN_LOAD_STORE(16);
264
265void __asan_loadN(void *addr, ssize_t size)
266{
267	kasan_check_range(addr, size, false, _RET_IP_);
268}
269EXPORT_SYMBOL(__asan_loadN);
270
271__alias(__asan_loadN)
272void __asan_loadN_noabort(void *, ssize_t);
273EXPORT_SYMBOL(__asan_loadN_noabort);
274
275void __asan_storeN(void *addr, ssize_t size)
276{
277	kasan_check_range(addr, size, true, _RET_IP_);
278}
279EXPORT_SYMBOL(__asan_storeN);
280
281__alias(__asan_storeN)
282void __asan_storeN_noabort(void *, ssize_t);
283EXPORT_SYMBOL(__asan_storeN_noabort);
284
285/* to shut up compiler complaints */
286void __asan_handle_no_return(void) {}
287EXPORT_SYMBOL(__asan_handle_no_return);
288
289/* Emitted by compiler to poison alloca()ed objects. */
290void __asan_alloca_poison(void *addr, ssize_t size)
291{
292	size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE);
293	size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
294			rounded_up_size;
295	size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE);
296
297	const void *left_redzone = (const void *)(addr -
298			KASAN_ALLOCA_REDZONE_SIZE);
299	const void *right_redzone = (const void *)(addr + rounded_up_size);
300
301	WARN_ON(!IS_ALIGNED((unsigned long)addr, KASAN_ALLOCA_REDZONE_SIZE));
302
303	kasan_unpoison((const void *)(addr + rounded_down_size),
304			size - rounded_down_size, false);
305	kasan_poison(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
306		     KASAN_ALLOCA_LEFT, false);
307	kasan_poison(right_redzone, padding_size + KASAN_ALLOCA_REDZONE_SIZE,
308		     KASAN_ALLOCA_RIGHT, false);
309}
310EXPORT_SYMBOL(__asan_alloca_poison);
311
312/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
313void __asan_allocas_unpoison(void *stack_top, ssize_t stack_bottom)
314{
315	if (unlikely(!stack_top || stack_top > (void *)stack_bottom))
316		return;
317
318	kasan_unpoison(stack_top, (void *)stack_bottom - stack_top, false);
319}
320EXPORT_SYMBOL(__asan_allocas_unpoison);
321
322/* Emitted by the compiler to [un]poison local variables. */
323#define DEFINE_ASAN_SET_SHADOW(byte) \
324	void __asan_set_shadow_##byte(const void *addr, ssize_t size)	\
325	{								\
326		__memset((void *)addr, 0x##byte, size);			\
327	}								\
328	EXPORT_SYMBOL(__asan_set_shadow_##byte)
329
330DEFINE_ASAN_SET_SHADOW(00);
331DEFINE_ASAN_SET_SHADOW(f1);
332DEFINE_ASAN_SET_SHADOW(f2);
333DEFINE_ASAN_SET_SHADOW(f3);
334DEFINE_ASAN_SET_SHADOW(f5);
335DEFINE_ASAN_SET_SHADOW(f8);
336
337/*
338 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
339 * For larger allocations larger redzones are used.
340 */
341static inline unsigned int optimal_redzone(unsigned int object_size)
342{
343	return
344		object_size <= 64        - 16   ? 16 :
345		object_size <= 128       - 32   ? 32 :
346		object_size <= 512       - 64   ? 64 :
347		object_size <= 4096      - 128  ? 128 :
348		object_size <= (1 << 14) - 256  ? 256 :
349		object_size <= (1 << 15) - 512  ? 512 :
350		object_size <= (1 << 16) - 1024 ? 1024 : 2048;
351}
352
353void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
354			  slab_flags_t *flags)
355{
356	unsigned int ok_size;
357	unsigned int optimal_size;
358	unsigned int rem_free_meta_size;
359	unsigned int orig_alloc_meta_offset;
360
361	if (!kasan_requires_meta())
362		return;
363
364	/*
365	 * SLAB_KASAN is used to mark caches that are sanitized by KASAN and
366	 * that thus have per-object metadata. Currently, this flag is used in
367	 * slab_ksize() to account for per-object metadata when calculating the
368	 * size of the accessible memory within the object. Additionally, we use
369	 * SLAB_NO_MERGE to prevent merging of caches with per-object metadata.
370	 */
371	*flags |= SLAB_KASAN | SLAB_NO_MERGE;
372
373	ok_size = *size;
374
375	/* Add alloc meta into the redzone. */
376	cache->kasan_info.alloc_meta_offset = *size;
377	*size += sizeof(struct kasan_alloc_meta);
378
379	/* If alloc meta doesn't fit, don't add it. */
380	if (*size > KMALLOC_MAX_SIZE) {
381		cache->kasan_info.alloc_meta_offset = 0;
382		*size = ok_size;
383		/* Continue, since free meta might still fit. */
384	}
385
386	ok_size = *size;
387	orig_alloc_meta_offset = cache->kasan_info.alloc_meta_offset;
388
389	/*
390	 * Store free meta in the redzone when it's not possible to store
391	 * it in the object. This is the case when:
392	 * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
393	 *    be touched after it was freed, or
394	 * 2. Object has a constructor, which means it's expected to
395	 *    retain its content until the next allocation.
396	 */
397	if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor) {
398		cache->kasan_info.free_meta_offset = *size;
399		*size += sizeof(struct kasan_free_meta);
400		goto free_meta_added;
401	}
402
403	/*
404	 * Otherwise, if the object is large enough to contain free meta,
405	 * store it within the object.
406	 */
407	if (sizeof(struct kasan_free_meta) <= cache->object_size) {
408		/* cache->kasan_info.free_meta_offset = 0 is implied. */
409		goto free_meta_added;
410	}
411
412	/*
413	 * For smaller objects, store the beginning of free meta within the
414	 * object and the end in the redzone. And thus shift the location of
415	 * alloc meta to free up space for free meta.
416	 * This is only possible when slub_debug is disabled, as otherwise
417	 * the end of free meta will overlap with slub_debug metadata.
418	 */
419	if (!__slub_debug_enabled()) {
420		rem_free_meta_size = sizeof(struct kasan_free_meta) -
421							cache->object_size;
422		*size += rem_free_meta_size;
423		if (cache->kasan_info.alloc_meta_offset != 0)
424			cache->kasan_info.alloc_meta_offset += rem_free_meta_size;
425		goto free_meta_added;
426	}
427
428	/*
429	 * If the object is small and slub_debug is enabled, store free meta
430	 * in the redzone after alloc meta.
431	 */
432	cache->kasan_info.free_meta_offset = *size;
433	*size += sizeof(struct kasan_free_meta);
434
435free_meta_added:
436	/* If free meta doesn't fit, don't add it. */
437	if (*size > KMALLOC_MAX_SIZE) {
438		cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
439		cache->kasan_info.alloc_meta_offset = orig_alloc_meta_offset;
440		*size = ok_size;
441	}
442
443	/* Calculate size with optimal redzone. */
444	optimal_size = cache->object_size + optimal_redzone(cache->object_size);
445	/* Limit it with KMALLOC_MAX_SIZE. */
446	if (optimal_size > KMALLOC_MAX_SIZE)
447		optimal_size = KMALLOC_MAX_SIZE;
448	/* Use optimal size if the size with added metas is not large enough. */
449	if (*size < optimal_size)
450		*size = optimal_size;
451}
452
453struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
454					      const void *object)
455{
456	if (!cache->kasan_info.alloc_meta_offset)
457		return NULL;
458	return (void *)object + cache->kasan_info.alloc_meta_offset;
459}
460
461struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
462					    const void *object)
463{
464	BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
465	if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
466		return NULL;
467	return (void *)object + cache->kasan_info.free_meta_offset;
468}
469
470void kasan_init_object_meta(struct kmem_cache *cache, const void *object)
471{
472	struct kasan_alloc_meta *alloc_meta;
473
474	alloc_meta = kasan_get_alloc_meta(cache, object);
475	if (alloc_meta) {
476		/* Zero out alloc meta to mark it as invalid. */
477		__memset(alloc_meta, 0, sizeof(*alloc_meta));
478	}
479
480	/*
481	 * Explicitly marking free meta as invalid is not required: the shadow
482	 * value for the first 8 bytes of a newly allocated object is not
483	 * KASAN_SLAB_FREE_META.
484	 */
485}
486
487static void release_alloc_meta(struct kasan_alloc_meta *meta)
488{
489	/* Zero out alloc meta to mark it as invalid. */
490	__memset(meta, 0, sizeof(*meta));
491}
492
493static void release_free_meta(const void *object, struct kasan_free_meta *meta)
494{
495	if (!kasan_arch_is_ready())
496		return;
497
498	/* Check if free meta is valid. */
499	if (*(u8 *)kasan_mem_to_shadow(object) != KASAN_SLAB_FREE_META)
500		return;
501
502	/* Mark free meta as invalid. */
503	*(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE;
504}
505
506size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object)
507{
508	struct kasan_cache *info = &cache->kasan_info;
509
510	if (!kasan_requires_meta())
511		return 0;
512
513	if (in_object)
514		return (info->free_meta_offset ?
515			0 : sizeof(struct kasan_free_meta));
516	else
517		return (info->alloc_meta_offset ?
518			sizeof(struct kasan_alloc_meta) : 0) +
519			((info->free_meta_offset &&
520			info->free_meta_offset != KASAN_NO_FREE_META) ?
521			sizeof(struct kasan_free_meta) : 0);
522}
523
524static void __kasan_record_aux_stack(void *addr, depot_flags_t depot_flags)
525{
526	struct slab *slab = kasan_addr_to_slab(addr);
527	struct kmem_cache *cache;
528	struct kasan_alloc_meta *alloc_meta;
529	void *object;
530
531	if (is_kfence_address(addr) || !slab)
532		return;
533
534	cache = slab->slab_cache;
535	object = nearest_obj(cache, slab, addr);
536	alloc_meta = kasan_get_alloc_meta(cache, object);
537	if (!alloc_meta)
538		return;
539
540	alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0];
541	alloc_meta->aux_stack[0] = kasan_save_stack(0, depot_flags);
542}
543
544void kasan_record_aux_stack(void *addr)
545{
546	return __kasan_record_aux_stack(addr, STACK_DEPOT_FLAG_CAN_ALLOC);
547}
548
549void kasan_record_aux_stack_noalloc(void *addr)
550{
551	return __kasan_record_aux_stack(addr, 0);
552}
553
554void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags)
555{
556	struct kasan_alloc_meta *alloc_meta;
557
558	alloc_meta = kasan_get_alloc_meta(cache, object);
559	if (!alloc_meta)
560		return;
561
562	/* Invalidate previous stack traces (might exist for krealloc or mempool). */
563	release_alloc_meta(alloc_meta);
564
565	kasan_save_track(&alloc_meta->alloc_track, flags);
566}
567
568void kasan_save_free_info(struct kmem_cache *cache, void *object)
569{
570	struct kasan_free_meta *free_meta;
571
572	free_meta = kasan_get_free_meta(cache, object);
573	if (!free_meta)
574		return;
575
576	/* Invalidate previous stack trace (might exist for mempool). */
577	release_free_meta(object, free_meta);
578
579	kasan_save_track(&free_meta->free_track, 0);
580
581	/* Mark free meta as valid. */
582	*(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREE_META;
583}
584