1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KFENCE reporting.
4 *
5 * Copyright (C) 2020, Google LLC.
6 */
7
8#include <linux/stdarg.h>
9
10#include <linux/kernel.h>
11#include <linux/lockdep.h>
12#include <linux/math.h>
13#include <linux/printk.h>
14#include <linux/sched/debug.h>
15#include <linux/seq_file.h>
16#include <linux/sprintf.h>
17#include <linux/stacktrace.h>
18#include <linux/string.h>
19#include <trace/events/error_report.h>
20
21#include <asm/kfence.h>
22
23#include "kfence.h"
24
25/* May be overridden by <asm/kfence.h>. */
26#ifndef ARCH_FUNC_PREFIX
27#define ARCH_FUNC_PREFIX ""
28#endif
29
30/* Helper function to either print to a seq_file or to console. */
31__printf(2, 3)
32static void seq_con_printf(struct seq_file *seq, const char *fmt, ...)
33{
34	va_list args;
35
36	va_start(args, fmt);
37	if (seq)
38		seq_vprintf(seq, fmt, args);
39	else
40		vprintk(fmt, args);
41	va_end(args);
42}
43
44/*
45 * Get the number of stack entries to skip to get out of MM internals. @type is
46 * optional, and if set to NULL, assumes an allocation or free stack.
47 */
48static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries,
49			    const enum kfence_error_type *type)
50{
51	char buf[64];
52	int skipnr, fallback = 0;
53
54	if (type) {
55		/* Depending on error type, find different stack entries. */
56		switch (*type) {
57		case KFENCE_ERROR_UAF:
58		case KFENCE_ERROR_OOB:
59		case KFENCE_ERROR_INVALID:
60			/*
61			 * kfence_handle_page_fault() may be called with pt_regs
62			 * set to NULL; in that case we'll simply show the full
63			 * stack trace.
64			 */
65			return 0;
66		case KFENCE_ERROR_CORRUPTION:
67		case KFENCE_ERROR_INVALID_FREE:
68			break;
69		}
70	}
71
72	for (skipnr = 0; skipnr < num_entries; skipnr++) {
73		int len = scnprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skipnr]);
74
75		if (str_has_prefix(buf, ARCH_FUNC_PREFIX "kfence_") ||
76		    str_has_prefix(buf, ARCH_FUNC_PREFIX "__kfence_") ||
77		    str_has_prefix(buf, ARCH_FUNC_PREFIX "__kmem_cache_free") ||
78		    !strncmp(buf, ARCH_FUNC_PREFIX "__slab_free", len)) {
79			/*
80			 * In case of tail calls from any of the below to any of
81			 * the above, optimized by the compiler such that the
82			 * stack trace would omit the initial entry point below.
83			 */
84			fallback = skipnr + 1;
85		}
86
87		/*
88		 * The below list should only include the initial entry points
89		 * into the slab allocators. Includes the *_bulk() variants by
90		 * checking prefixes.
91		 */
92		if (str_has_prefix(buf, ARCH_FUNC_PREFIX "kfree") ||
93		    str_has_prefix(buf, ARCH_FUNC_PREFIX "kmem_cache_free") ||
94		    str_has_prefix(buf, ARCH_FUNC_PREFIX "__kmalloc") ||
95		    str_has_prefix(buf, ARCH_FUNC_PREFIX "kmem_cache_alloc"))
96			goto found;
97	}
98	if (fallback < num_entries)
99		return fallback;
100found:
101	skipnr++;
102	return skipnr < num_entries ? skipnr : 0;
103}
104
105static void kfence_print_stack(struct seq_file *seq, const struct kfence_metadata *meta,
106			       bool show_alloc)
107{
108	const struct kfence_track *track = show_alloc ? &meta->alloc_track : &meta->free_track;
109	u64 ts_sec = track->ts_nsec;
110	unsigned long rem_nsec = do_div(ts_sec, NSEC_PER_SEC);
111
112	/* Timestamp matches printk timestamp format. */
113	seq_con_printf(seq, "%s by task %d on cpu %d at %lu.%06lus:\n",
114		       show_alloc ? "allocated" : "freed", track->pid,
115		       track->cpu, (unsigned long)ts_sec, rem_nsec / 1000);
116
117	if (track->num_stack_entries) {
118		/* Skip allocation/free internals stack. */
119		int i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
120
121		/* stack_trace_seq_print() does not exist; open code our own. */
122		for (; i < track->num_stack_entries; i++)
123			seq_con_printf(seq, " %pS\n", (void *)track->stack_entries[i]);
124	} else {
125		seq_con_printf(seq, " no %s stack\n", show_alloc ? "allocation" : "deallocation");
126	}
127}
128
129void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta)
130{
131	const int size = abs(meta->size);
132	const unsigned long start = meta->addr;
133	const struct kmem_cache *const cache = meta->cache;
134
135	lockdep_assert_held(&meta->lock);
136
137	if (meta->state == KFENCE_OBJECT_UNUSED) {
138		seq_con_printf(seq, "kfence-#%td unused\n", meta - kfence_metadata);
139		return;
140	}
141
142	seq_con_printf(seq, "kfence-#%td: 0x%p-0x%p, size=%d, cache=%s\n\n",
143		       meta - kfence_metadata, (void *)start, (void *)(start + size - 1),
144		       size, (cache && cache->name) ? cache->name : "<destroyed>");
145
146	kfence_print_stack(seq, meta, true);
147
148	if (meta->state == KFENCE_OBJECT_FREED) {
149		seq_con_printf(seq, "\n");
150		kfence_print_stack(seq, meta, false);
151	}
152}
153
154/*
155 * Show bytes at @addr that are different from the expected canary values, up to
156 * @max_bytes.
157 */
158static void print_diff_canary(unsigned long address, size_t bytes_to_show,
159			      const struct kfence_metadata *meta)
160{
161	const unsigned long show_until_addr = address + bytes_to_show;
162	const u8 *cur, *end;
163
164	/* Do not show contents of object nor read into following guard page. */
165	end = (const u8 *)(address < meta->addr ? min(show_until_addr, meta->addr)
166						: min(show_until_addr, PAGE_ALIGN(address)));
167
168	pr_cont("[");
169	for (cur = (const u8 *)address; cur < end; cur++) {
170		if (*cur == KFENCE_CANARY_PATTERN_U8(cur))
171			pr_cont(" .");
172		else if (no_hash_pointers)
173			pr_cont(" 0x%02x", *cur);
174		else /* Do not leak kernel memory in non-debug builds. */
175			pr_cont(" !");
176	}
177	pr_cont(" ]");
178}
179
180static const char *get_access_type(bool is_write)
181{
182	return is_write ? "write" : "read";
183}
184
185void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs,
186			 const struct kfence_metadata *meta, enum kfence_error_type type)
187{
188	unsigned long stack_entries[KFENCE_STACK_DEPTH] = { 0 };
189	const ptrdiff_t object_index = meta ? meta - kfence_metadata : -1;
190	int num_stack_entries;
191	int skipnr = 0;
192
193	if (regs) {
194		num_stack_entries = stack_trace_save_regs(regs, stack_entries, KFENCE_STACK_DEPTH, 0);
195	} else {
196		num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 1);
197		skipnr = get_stack_skipnr(stack_entries, num_stack_entries, &type);
198	}
199
200	/* Require non-NULL meta, except if KFENCE_ERROR_INVALID. */
201	if (WARN_ON(type != KFENCE_ERROR_INVALID && !meta))
202		return;
203
204	if (meta)
205		lockdep_assert_held(&meta->lock);
206	/*
207	 * Because we may generate reports in printk-unfriendly parts of the
208	 * kernel, such as scheduler code, the use of printk() could deadlock.
209	 * Until such time that all printing code here is safe in all parts of
210	 * the kernel, accept the risk, and just get our message out (given the
211	 * system might already behave unpredictably due to the memory error).
212	 * As such, also disable lockdep to hide warnings, and avoid disabling
213	 * lockdep for the rest of the kernel.
214	 */
215	lockdep_off();
216
217	pr_err("==================================================================\n");
218	/* Print report header. */
219	switch (type) {
220	case KFENCE_ERROR_OOB: {
221		const bool left_of_object = address < meta->addr;
222
223		pr_err("BUG: KFENCE: out-of-bounds %s in %pS\n\n", get_access_type(is_write),
224		       (void *)stack_entries[skipnr]);
225		pr_err("Out-of-bounds %s at 0x%p (%luB %s of kfence-#%td):\n",
226		       get_access_type(is_write), (void *)address,
227		       left_of_object ? meta->addr - address : address - meta->addr,
228		       left_of_object ? "left" : "right", object_index);
229		break;
230	}
231	case KFENCE_ERROR_UAF:
232		pr_err("BUG: KFENCE: use-after-free %s in %pS\n\n", get_access_type(is_write),
233		       (void *)stack_entries[skipnr]);
234		pr_err("Use-after-free %s at 0x%p (in kfence-#%td):\n",
235		       get_access_type(is_write), (void *)address, object_index);
236		break;
237	case KFENCE_ERROR_CORRUPTION:
238		pr_err("BUG: KFENCE: memory corruption in %pS\n\n", (void *)stack_entries[skipnr]);
239		pr_err("Corrupted memory at 0x%p ", (void *)address);
240		print_diff_canary(address, 16, meta);
241		pr_cont(" (in kfence-#%td):\n", object_index);
242		break;
243	case KFENCE_ERROR_INVALID:
244		pr_err("BUG: KFENCE: invalid %s in %pS\n\n", get_access_type(is_write),
245		       (void *)stack_entries[skipnr]);
246		pr_err("Invalid %s at 0x%p:\n", get_access_type(is_write),
247		       (void *)address);
248		break;
249	case KFENCE_ERROR_INVALID_FREE:
250		pr_err("BUG: KFENCE: invalid free in %pS\n\n", (void *)stack_entries[skipnr]);
251		pr_err("Invalid free of 0x%p (in kfence-#%td):\n", (void *)address,
252		       object_index);
253		break;
254	}
255
256	/* Print stack trace and object info. */
257	stack_trace_print(stack_entries + skipnr, num_stack_entries - skipnr, 0);
258
259	if (meta) {
260		pr_err("\n");
261		kfence_print_object(NULL, meta);
262	}
263
264	/* Print report footer. */
265	pr_err("\n");
266	if (no_hash_pointers && regs)
267		show_regs(regs);
268	else
269		dump_stack_print_info(KERN_ERR);
270	trace_error_report_end(ERROR_DETECTOR_KFENCE, address);
271	pr_err("==================================================================\n");
272
273	lockdep_on();
274
275	check_panic_on_warn("KFENCE");
276
277	/* We encountered a memory safety error, taint the kernel! */
278	add_taint(TAINT_BAD_PAGE, LOCKDEP_STILL_OK);
279}
280
281#ifdef CONFIG_PRINTK
282static void kfence_to_kp_stack(const struct kfence_track *track, void **kp_stack)
283{
284	int i, j;
285
286	i = get_stack_skipnr(track->stack_entries, track->num_stack_entries, NULL);
287	for (j = 0; i < track->num_stack_entries && j < KS_ADDRS_COUNT; ++i, ++j)
288		kp_stack[j] = (void *)track->stack_entries[i];
289	if (j < KS_ADDRS_COUNT)
290		kp_stack[j] = NULL;
291}
292
293bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
294{
295	struct kfence_metadata *meta = addr_to_metadata((unsigned long)object);
296	unsigned long flags;
297
298	if (!meta)
299		return false;
300
301	/*
302	 * If state is UNUSED at least show the pointer requested; the rest
303	 * would be garbage data.
304	 */
305	kpp->kp_ptr = object;
306
307	/* Requesting info an a never-used object is almost certainly a bug. */
308	if (WARN_ON(meta->state == KFENCE_OBJECT_UNUSED))
309		return true;
310
311	raw_spin_lock_irqsave(&meta->lock, flags);
312
313	kpp->kp_slab = slab;
314	kpp->kp_slab_cache = meta->cache;
315	kpp->kp_objp = (void *)meta->addr;
316	kfence_to_kp_stack(&meta->alloc_track, kpp->kp_stack);
317	if (meta->state == KFENCE_OBJECT_FREED)
318		kfence_to_kp_stack(&meta->free_track, kpp->kp_free_stack);
319	/* get_stack_skipnr() ensures the first entry is outside allocator. */
320	kpp->kp_ret = kpp->kp_stack[0];
321
322	raw_spin_unlock_irqrestore(&meta->lock, flags);
323
324	return true;
325}
326#endif
327