1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KMSAN runtime library.
4 *
5 * Copyright (C) 2017-2022 Google LLC
6 * Author: Alexander Potapenko <glider@google.com>
7 *
8 */
9
10#include <asm/page.h>
11#include <linux/compiler.h>
12#include <linux/export.h>
13#include <linux/highmem.h>
14#include <linux/interrupt.h>
15#include <linux/kernel.h>
16#include <linux/kmsan_types.h>
17#include <linux/memory.h>
18#include <linux/mm.h>
19#include <linux/mm_types.h>
20#include <linux/mmzone.h>
21#include <linux/percpu-defs.h>
22#include <linux/preempt.h>
23#include <linux/slab.h>
24#include <linux/stackdepot.h>
25#include <linux/stacktrace.h>
26#include <linux/types.h>
27#include <linux/vmalloc.h>
28
29#include "../slab.h"
30#include "kmsan.h"
31
32bool kmsan_enabled __read_mostly;
33
34/*
35 * Per-CPU KMSAN context to be used in interrupts, where current->kmsan is
36 * unavaliable.
37 */
38DEFINE_PER_CPU(struct kmsan_ctx, kmsan_percpu_ctx);
39
40void kmsan_internal_task_create(struct task_struct *task)
41{
42	struct kmsan_ctx *ctx = &task->kmsan_ctx;
43	struct thread_info *info = current_thread_info();
44
45	__memset(ctx, 0, sizeof(*ctx));
46	ctx->allow_reporting = true;
47	kmsan_internal_unpoison_memory(info, sizeof(*info), false);
48}
49
50void kmsan_internal_poison_memory(void *address, size_t size, gfp_t flags,
51				  unsigned int poison_flags)
52{
53	u32 extra_bits =
54		kmsan_extra_bits(/*depth*/ 0, poison_flags & KMSAN_POISON_FREE);
55	bool checked = poison_flags & KMSAN_POISON_CHECK;
56	depot_stack_handle_t handle;
57
58	handle = kmsan_save_stack_with_flags(flags, extra_bits);
59	kmsan_internal_set_shadow_origin(address, size, -1, handle, checked);
60}
61
62void kmsan_internal_unpoison_memory(void *address, size_t size, bool checked)
63{
64	kmsan_internal_set_shadow_origin(address, size, 0, 0, checked);
65}
66
67depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags,
68						 unsigned int extra)
69{
70	unsigned long entries[KMSAN_STACK_DEPTH];
71	unsigned int nr_entries;
72	depot_stack_handle_t handle;
73
74	nr_entries = stack_trace_save(entries, KMSAN_STACK_DEPTH, 0);
75
76	/* Don't sleep. */
77	flags &= ~(__GFP_DIRECT_RECLAIM | __GFP_KSWAPD_RECLAIM);
78
79	handle = stack_depot_save(entries, nr_entries, flags);
80	return stack_depot_set_extra_bits(handle, extra);
81}
82
83/* Copy the metadata following the memmove() behavior. */
84void kmsan_internal_memmove_metadata(void *dst, void *src, size_t n)
85{
86	depot_stack_handle_t prev_old_origin = 0, prev_new_origin = 0;
87	int i, iter, step, src_off, dst_off, oiter_src, oiter_dst;
88	depot_stack_handle_t old_origin = 0, new_origin = 0;
89	depot_stack_handle_t *origin_src, *origin_dst;
90	u8 *shadow_src, *shadow_dst;
91	u32 *align_shadow_dst;
92	bool backwards;
93
94	shadow_dst = kmsan_get_metadata(dst, KMSAN_META_SHADOW);
95	if (!shadow_dst)
96		return;
97	KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(dst, n));
98	align_shadow_dst =
99		(u32 *)ALIGN_DOWN((u64)shadow_dst, KMSAN_ORIGIN_SIZE);
100
101	shadow_src = kmsan_get_metadata(src, KMSAN_META_SHADOW);
102	if (!shadow_src) {
103		/* @src is untracked: mark @dst as initialized. */
104		kmsan_internal_unpoison_memory(dst, n, /*checked*/ false);
105		return;
106	}
107	KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(src, n));
108
109	origin_dst = kmsan_get_metadata(dst, KMSAN_META_ORIGIN);
110	origin_src = kmsan_get_metadata(src, KMSAN_META_ORIGIN);
111	KMSAN_WARN_ON(!origin_dst || !origin_src);
112
113	backwards = dst > src;
114	step = backwards ? -1 : 1;
115	iter = backwards ? n - 1 : 0;
116	src_off = (u64)src % KMSAN_ORIGIN_SIZE;
117	dst_off = (u64)dst % KMSAN_ORIGIN_SIZE;
118
119	/* Copy shadow bytes one by one, updating the origins if necessary. */
120	for (i = 0; i < n; i++, iter += step) {
121		oiter_src = (iter + src_off) / KMSAN_ORIGIN_SIZE;
122		oiter_dst = (iter + dst_off) / KMSAN_ORIGIN_SIZE;
123		if (!shadow_src[iter]) {
124			shadow_dst[iter] = 0;
125			if (!align_shadow_dst[oiter_dst])
126				origin_dst[oiter_dst] = 0;
127			continue;
128		}
129		shadow_dst[iter] = shadow_src[iter];
130		old_origin = origin_src[oiter_src];
131		if (old_origin == prev_old_origin)
132			new_origin = prev_new_origin;
133		else {
134			/*
135			 * kmsan_internal_chain_origin() may return
136			 * NULL, but we don't want to lose the previous
137			 * origin value.
138			 */
139			new_origin = kmsan_internal_chain_origin(old_origin);
140			if (!new_origin)
141				new_origin = old_origin;
142		}
143		origin_dst[oiter_dst] = new_origin;
144		prev_new_origin = new_origin;
145		prev_old_origin = old_origin;
146	}
147}
148
149depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id)
150{
151	unsigned long entries[3];
152	u32 extra_bits;
153	int depth;
154	bool uaf;
155	depot_stack_handle_t handle;
156
157	if (!id)
158		return id;
159	/*
160	 * Make sure we have enough spare bits in @id to hold the UAF bit and
161	 * the chain depth.
162	 */
163	BUILD_BUG_ON(
164		(1 << STACK_DEPOT_EXTRA_BITS) <= (KMSAN_MAX_ORIGIN_DEPTH << 1));
165
166	extra_bits = stack_depot_get_extra_bits(id);
167	depth = kmsan_depth_from_eb(extra_bits);
168	uaf = kmsan_uaf_from_eb(extra_bits);
169
170	/*
171	 * Stop chaining origins once the depth reached KMSAN_MAX_ORIGIN_DEPTH.
172	 * This mostly happens in the case structures with uninitialized padding
173	 * are copied around many times. Origin chains for such structures are
174	 * usually periodic, and it does not make sense to fully store them.
175	 */
176	if (depth == KMSAN_MAX_ORIGIN_DEPTH)
177		return id;
178
179	depth++;
180	extra_bits = kmsan_extra_bits(depth, uaf);
181
182	entries[0] = KMSAN_CHAIN_MAGIC_ORIGIN;
183	entries[1] = kmsan_save_stack_with_flags(__GFP_HIGH, 0);
184	entries[2] = id;
185	/*
186	 * @entries is a local var in non-instrumented code, so KMSAN does not
187	 * know it is initialized. Explicitly unpoison it to avoid false
188	 * positives when stack_depot_save() passes it to instrumented code.
189	 */
190	kmsan_internal_unpoison_memory(entries, sizeof(entries), false);
191	handle = stack_depot_save(entries, ARRAY_SIZE(entries), __GFP_HIGH);
192	return stack_depot_set_extra_bits(handle, extra_bits);
193}
194
195void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
196				      u32 origin, bool checked)
197{
198	u64 address = (u64)addr;
199	void *shadow_start;
200	u32 *origin_start;
201	size_t pad = 0;
202
203	KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size));
204	shadow_start = kmsan_get_metadata(addr, KMSAN_META_SHADOW);
205	if (!shadow_start) {
206		/*
207		 * kmsan_metadata_is_contiguous() is true, so either all shadow
208		 * and origin pages are NULL, or all are non-NULL.
209		 */
210		if (checked) {
211			pr_err("%s: not memsetting %ld bytes starting at %px, because the shadow is NULL\n",
212			       __func__, size, addr);
213			KMSAN_WARN_ON(true);
214		}
215		return;
216	}
217	__memset(shadow_start, b, size);
218
219	if (!IS_ALIGNED(address, KMSAN_ORIGIN_SIZE)) {
220		pad = address % KMSAN_ORIGIN_SIZE;
221		address -= pad;
222		size += pad;
223	}
224	size = ALIGN(size, KMSAN_ORIGIN_SIZE);
225	origin_start =
226		(u32 *)kmsan_get_metadata((void *)address, KMSAN_META_ORIGIN);
227
228	for (int i = 0; i < size / KMSAN_ORIGIN_SIZE; i++)
229		origin_start[i] = origin;
230}
231
232struct page *kmsan_vmalloc_to_page_or_null(void *vaddr)
233{
234	struct page *page;
235
236	if (!kmsan_internal_is_vmalloc_addr(vaddr) &&
237	    !kmsan_internal_is_module_addr(vaddr))
238		return NULL;
239	page = vmalloc_to_page(vaddr);
240	if (pfn_valid(page_to_pfn(page)))
241		return page;
242	else
243		return NULL;
244}
245
246void kmsan_internal_check_memory(void *addr, size_t size, const void *user_addr,
247				 int reason)
248{
249	depot_stack_handle_t cur_origin = 0, new_origin = 0;
250	unsigned long addr64 = (unsigned long)addr;
251	depot_stack_handle_t *origin = NULL;
252	unsigned char *shadow = NULL;
253	int cur_off_start = -1;
254	int chunk_size;
255	size_t pos = 0;
256
257	if (!size)
258		return;
259	KMSAN_WARN_ON(!kmsan_metadata_is_contiguous(addr, size));
260	while (pos < size) {
261		chunk_size = min(size - pos,
262				 PAGE_SIZE - ((addr64 + pos) % PAGE_SIZE));
263		shadow = kmsan_get_metadata((void *)(addr64 + pos),
264					    KMSAN_META_SHADOW);
265		if (!shadow) {
266			/*
267			 * This page is untracked. If there were uninitialized
268			 * bytes before, report them.
269			 */
270			if (cur_origin) {
271				kmsan_enter_runtime();
272				kmsan_report(cur_origin, addr, size,
273					     cur_off_start, pos - 1, user_addr,
274					     reason);
275				kmsan_leave_runtime();
276			}
277			cur_origin = 0;
278			cur_off_start = -1;
279			pos += chunk_size;
280			continue;
281		}
282		for (int i = 0; i < chunk_size; i++) {
283			if (!shadow[i]) {
284				/*
285				 * This byte is unpoisoned. If there were
286				 * poisoned bytes before, report them.
287				 */
288				if (cur_origin) {
289					kmsan_enter_runtime();
290					kmsan_report(cur_origin, addr, size,
291						     cur_off_start, pos + i - 1,
292						     user_addr, reason);
293					kmsan_leave_runtime();
294				}
295				cur_origin = 0;
296				cur_off_start = -1;
297				continue;
298			}
299			origin = kmsan_get_metadata((void *)(addr64 + pos + i),
300						    KMSAN_META_ORIGIN);
301			KMSAN_WARN_ON(!origin);
302			new_origin = *origin;
303			/*
304			 * Encountered new origin - report the previous
305			 * uninitialized range.
306			 */
307			if (cur_origin != new_origin) {
308				if (cur_origin) {
309					kmsan_enter_runtime();
310					kmsan_report(cur_origin, addr, size,
311						     cur_off_start, pos + i - 1,
312						     user_addr, reason);
313					kmsan_leave_runtime();
314				}
315				cur_origin = new_origin;
316				cur_off_start = pos + i;
317			}
318		}
319		pos += chunk_size;
320	}
321	KMSAN_WARN_ON(pos != size);
322	if (cur_origin) {
323		kmsan_enter_runtime();
324		kmsan_report(cur_origin, addr, size, cur_off_start, pos - 1,
325			     user_addr, reason);
326		kmsan_leave_runtime();
327	}
328}
329
330bool kmsan_metadata_is_contiguous(void *addr, size_t size)
331{
332	char *cur_shadow = NULL, *next_shadow = NULL, *cur_origin = NULL,
333	     *next_origin = NULL;
334	u64 cur_addr = (u64)addr, next_addr = cur_addr + PAGE_SIZE;
335	depot_stack_handle_t *origin_p;
336	bool all_untracked = false;
337
338	if (!size)
339		return true;
340
341	/* The whole range belongs to the same page. */
342	if (ALIGN_DOWN(cur_addr + size - 1, PAGE_SIZE) ==
343	    ALIGN_DOWN(cur_addr, PAGE_SIZE))
344		return true;
345
346	cur_shadow = kmsan_get_metadata((void *)cur_addr, /*is_origin*/ false);
347	if (!cur_shadow)
348		all_untracked = true;
349	cur_origin = kmsan_get_metadata((void *)cur_addr, /*is_origin*/ true);
350	if (all_untracked && cur_origin)
351		goto report;
352
353	for (; next_addr < (u64)addr + size;
354	     cur_addr = next_addr, cur_shadow = next_shadow,
355	     cur_origin = next_origin, next_addr += PAGE_SIZE) {
356		next_shadow = kmsan_get_metadata((void *)next_addr, false);
357		next_origin = kmsan_get_metadata((void *)next_addr, true);
358		if (all_untracked) {
359			if (next_shadow || next_origin)
360				goto report;
361			if (!next_shadow && !next_origin)
362				continue;
363		}
364		if (((u64)cur_shadow == ((u64)next_shadow - PAGE_SIZE)) &&
365		    ((u64)cur_origin == ((u64)next_origin - PAGE_SIZE)))
366			continue;
367		goto report;
368	}
369	return true;
370
371report:
372	pr_err("%s: attempting to access two shadow page ranges.\n", __func__);
373	pr_err("Access of size %ld at %px.\n", size, addr);
374	pr_err("Addresses belonging to different ranges: %px and %px\n",
375	       (void *)cur_addr, (void *)next_addr);
376	pr_err("page[0].shadow: %px, page[1].shadow: %px\n", cur_shadow,
377	       next_shadow);
378	pr_err("page[0].origin: %px, page[1].origin: %px\n", cur_origin,
379	       next_origin);
380	origin_p = kmsan_get_metadata(addr, KMSAN_META_ORIGIN);
381	if (origin_p) {
382		pr_err("Origin: %08x\n", *origin_p);
383		kmsan_print_origin(*origin_p);
384	} else {
385		pr_err("Origin: unavailable\n");
386	}
387	return false;
388}
389