1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Functions used by the KMSAN runtime.
4 *
5 * Copyright (C) 2017-2022 Google LLC
6 * Author: Alexander Potapenko <glider@google.com>
7 *
8 */
9
10#ifndef __MM_KMSAN_KMSAN_H
11#define __MM_KMSAN_KMSAN_H
12
13#include <asm/pgtable_64_types.h>
14#include <linux/irqflags.h>
15#include <linux/sched.h>
16#include <linux/stackdepot.h>
17#include <linux/stacktrace.h>
18#include <linux/nmi.h>
19#include <linux/mm.h>
20#include <linux/printk.h>
21
22#define KMSAN_ALLOCA_MAGIC_ORIGIN 0xabcd0100
23#define KMSAN_CHAIN_MAGIC_ORIGIN 0xabcd0200
24
25#define KMSAN_POISON_NOCHECK 0x0
26#define KMSAN_POISON_CHECK 0x1
27#define KMSAN_POISON_FREE 0x2
28
29#define KMSAN_ORIGIN_SIZE 4
30#define KMSAN_MAX_ORIGIN_DEPTH 7
31
32#define KMSAN_STACK_DEPTH 64
33
34#define KMSAN_META_SHADOW (false)
35#define KMSAN_META_ORIGIN (true)
36
37extern bool kmsan_enabled;
38extern int panic_on_kmsan;
39
40/*
41 * KMSAN performs a lot of consistency checks that are currently enabled by
42 * default. BUG_ON is normally discouraged in the kernel, unless used for
43 * debugging, but KMSAN itself is a debugging tool, so it makes little sense to
44 * recover if something goes wrong.
45 */
46#define KMSAN_WARN_ON(cond)                                           \
47	({                                                            \
48		const bool __cond = WARN_ON(cond);                    \
49		if (unlikely(__cond)) {                               \
50			WRITE_ONCE(kmsan_enabled, false);             \
51			if (panic_on_kmsan) {                         \
52				/* Can't call panic() here because */ \
53				/* of uaccess checks. */              \
54				BUG();                                \
55			}                                             \
56		}                                                     \
57		__cond;                                               \
58	})
59
60/*
61 * A pair of metadata pointers to be returned by the instrumentation functions.
62 */
63struct shadow_origin_ptr {
64	void *shadow, *origin;
65};
66
67struct shadow_origin_ptr kmsan_get_shadow_origin_ptr(void *addr, u64 size,
68						     bool store);
69void *kmsan_get_metadata(void *addr, bool is_origin);
70void __init kmsan_init_alloc_meta_for_range(void *start, void *end);
71
72enum kmsan_bug_reason {
73	REASON_ANY,
74	REASON_COPY_TO_USER,
75	REASON_SUBMIT_URB,
76};
77
78void kmsan_print_origin(depot_stack_handle_t origin);
79
80/**
81 * kmsan_report() - Report a use of uninitialized value.
82 * @origin:    Stack ID of the uninitialized value.
83 * @address:   Address at which the memory access happens.
84 * @size:      Memory access size.
85 * @off_first: Offset (from @address) of the first byte to be reported.
86 * @off_last:  Offset (from @address) of the last byte to be reported.
87 * @user_addr: When non-NULL, denotes the userspace address to which the kernel
88 *             is leaking data.
89 * @reason:    Error type from enum kmsan_bug_reason.
90 *
91 * kmsan_report() prints an error message for a consequent group of bytes
92 * sharing the same origin. If an uninitialized value is used in a comparison,
93 * this function is called once without specifying the addresses. When checking
94 * a memory range, KMSAN may call kmsan_report() multiple times with the same
95 * @address, @size, @user_addr and @reason, but different @off_first and
96 * @off_last corresponding to different @origin values.
97 */
98void kmsan_report(depot_stack_handle_t origin, void *address, int size,
99		  int off_first, int off_last, const void *user_addr,
100		  enum kmsan_bug_reason reason);
101
102DECLARE_PER_CPU(struct kmsan_ctx, kmsan_percpu_ctx);
103
104static __always_inline struct kmsan_ctx *kmsan_get_context(void)
105{
106	return in_task() ? &current->kmsan_ctx : raw_cpu_ptr(&kmsan_percpu_ctx);
107}
108
109/*
110 * When a compiler hook or KMSAN runtime function is invoked, it may make a
111 * call to instrumented code and eventually call itself recursively. To avoid
112 * that, we guard the runtime entry regions with
113 * kmsan_enter_runtime()/kmsan_leave_runtime() and exit the hook if
114 * kmsan_in_runtime() is true.
115 *
116 * Non-runtime code may occasionally get executed in nested IRQs from the
117 * runtime code (e.g. when called via smp_call_function_single()). Because some
118 * KMSAN routines may take locks (e.g. for memory allocation), we conservatively
119 * bail out instead of calling them. To minimize the effect of this (potentially
120 * missing initialization events) kmsan_in_runtime() is not checked in
121 * non-blocking runtime functions.
122 */
123static __always_inline bool kmsan_in_runtime(void)
124{
125	if ((hardirq_count() >> HARDIRQ_SHIFT) > 1)
126		return true;
127	if (in_nmi())
128		return true;
129	return kmsan_get_context()->kmsan_in_runtime;
130}
131
132static __always_inline void kmsan_enter_runtime(void)
133{
134	struct kmsan_ctx *ctx;
135
136	ctx = kmsan_get_context();
137	KMSAN_WARN_ON(ctx->kmsan_in_runtime++);
138}
139
140static __always_inline void kmsan_leave_runtime(void)
141{
142	struct kmsan_ctx *ctx = kmsan_get_context();
143
144	KMSAN_WARN_ON(--ctx->kmsan_in_runtime);
145}
146
147depot_stack_handle_t kmsan_save_stack(void);
148depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags,
149						 unsigned int extra_bits);
150
151/*
152 * Pack and unpack the origin chain depth and UAF flag to/from the extra bits
153 * provided by the stack depot.
154 * The UAF flag is stored in the lowest bit, followed by the depth in the upper
155 * bits.
156 * set_dsh_extra_bits() is responsible for clamping the value.
157 */
158static __always_inline unsigned int kmsan_extra_bits(unsigned int depth,
159						     bool uaf)
160{
161	return (depth << 1) | uaf;
162}
163
164static __always_inline bool kmsan_uaf_from_eb(unsigned int extra_bits)
165{
166	return extra_bits & 1;
167}
168
169static __always_inline unsigned int kmsan_depth_from_eb(unsigned int extra_bits)
170{
171	return extra_bits >> 1;
172}
173
174/*
175 * kmsan_internal_ functions are supposed to be very simple and not require the
176 * kmsan_in_runtime() checks.
177 */
178void kmsan_internal_memmove_metadata(void *dst, void *src, size_t n);
179void kmsan_internal_poison_memory(void *address, size_t size, gfp_t flags,
180				  unsigned int poison_flags);
181void kmsan_internal_unpoison_memory(void *address, size_t size, bool checked);
182void kmsan_internal_set_shadow_origin(void *address, size_t size, int b,
183				      u32 origin, bool checked);
184depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id);
185
186void kmsan_internal_task_create(struct task_struct *task);
187
188bool kmsan_metadata_is_contiguous(void *addr, size_t size);
189void kmsan_internal_check_memory(void *addr, size_t size, const void *user_addr,
190				 int reason);
191
192struct page *kmsan_vmalloc_to_page_or_null(void *vaddr);
193void kmsan_setup_meta(struct page *page, struct page *shadow,
194		      struct page *origin, int order);
195
196/*
197 * kmsan_internal_is_module_addr() and kmsan_internal_is_vmalloc_addr() are
198 * non-instrumented versions of is_module_address() and is_vmalloc_addr() that
199 * are safe to call from KMSAN runtime without recursion.
200 */
201static inline bool kmsan_internal_is_module_addr(void *vaddr)
202{
203	return ((u64)vaddr >= MODULES_VADDR) && ((u64)vaddr < MODULES_END);
204}
205
206static inline bool kmsan_internal_is_vmalloc_addr(void *addr)
207{
208	return ((u64)addr >= VMALLOC_START) && ((u64)addr < VMALLOC_END);
209}
210
211#endif /* __MM_KMSAN_KMSAN_H */
212