1/*
2 * arch/xtensa/kernel/traps.c
3 *
4 * Exception handling.
5 *
6 * Derived from code with the following copyrights:
7 * Copyright (C) 1994 - 1999 by Ralf Baechle
8 * Modified for R3000 by Paul M. Antoine, 1995, 1996
9 * Complete output from die() by Ulf Carlsson, 1998
10 * Copyright (C) 1999 Silicon Graphics, Inc.
11 *
12 * Essentially rewritten for the Xtensa architecture port.
13 *
14 * Copyright (C) 2001 - 2013 Tensilica Inc.
15 *
16 * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
17 * Chris Zankel	<chris@zankel.net>
18 * Marc Gauthier<marc@tensilica.com, marc@alumni.uwaterloo.ca>
19 * Kevin Chea
20 *
21 * This file is subject to the terms and conditions of the GNU General Public
22 * License.  See the file "COPYING" in the main directory of this archive
23 * for more details.
24 */
25
26#include <linux/cpu.h>
27#include <linux/kernel.h>
28#include <linux/sched/signal.h>
29#include <linux/sched/debug.h>
30#include <linux/sched/task_stack.h>
31#include <linux/init.h>
32#include <linux/module.h>
33#include <linux/stringify.h>
34#include <linux/kallsyms.h>
35#include <linux/delay.h>
36#include <linux/hardirq.h>
37#include <linux/ratelimit.h>
38#include <linux/pgtable.h>
39
40#include <asm/stacktrace.h>
41#include <asm/ptrace.h>
42#include <asm/timex.h>
43#include <linux/uaccess.h>
44#include <asm/processor.h>
45#include <asm/traps.h>
46#include <asm/hw_breakpoint.h>
47
48/*
49 * Machine specific interrupt handlers
50 */
51
52static void do_illegal_instruction(struct pt_regs *regs);
53static void do_div0(struct pt_regs *regs);
54static void do_interrupt(struct pt_regs *regs);
55#if XTENSA_FAKE_NMI
56static void do_nmi(struct pt_regs *regs);
57#endif
58#ifdef CONFIG_XTENSA_LOAD_STORE
59static void do_load_store(struct pt_regs *regs);
60#endif
61static void do_unaligned_user(struct pt_regs *regs);
62static void do_multihit(struct pt_regs *regs);
63#if XTENSA_HAVE_COPROCESSORS
64static void do_coprocessor(struct pt_regs *regs);
65#endif
66static void do_debug(struct pt_regs *regs);
67
68/*
69 * The vector table must be preceded by a save area (which
70 * implies it must be in RAM, unless one places RAM immediately
71 * before a ROM and puts the vector at the start of the ROM (!))
72 */
73
74#define KRNL		0x01
75#define USER		0x02
76
77#define COPROCESSOR(x)							\
78{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, USER|KRNL, fast_coprocessor },\
79{ EXCCAUSE_COPROCESSOR ## x ## _DISABLED, 0, do_coprocessor }
80
81typedef struct {
82	int cause;
83	int fast;
84	void* handler;
85} dispatch_init_table_t;
86
87static dispatch_init_table_t __initdata dispatch_init_table[] = {
88
89#ifdef CONFIG_USER_ABI_CALL0_PROBE
90{ EXCCAUSE_ILLEGAL_INSTRUCTION,	USER,	   fast_illegal_instruction_user },
91#endif
92{ EXCCAUSE_ILLEGAL_INSTRUCTION,	0,	   do_illegal_instruction},
93{ EXCCAUSE_SYSTEM_CALL,		USER,	   fast_syscall_user },
94{ EXCCAUSE_SYSTEM_CALL,		0,	   system_call },
95/* EXCCAUSE_INSTRUCTION_FETCH unhandled */
96#ifdef CONFIG_XTENSA_LOAD_STORE
97{ EXCCAUSE_LOAD_STORE_ERROR,	USER|KRNL, fast_load_store },
98{ EXCCAUSE_LOAD_STORE_ERROR,	0,	   do_load_store },
99#endif
100{ EXCCAUSE_LEVEL1_INTERRUPT,	0,	   do_interrupt },
101#ifdef SUPPORT_WINDOWED
102{ EXCCAUSE_ALLOCA,		USER|KRNL, fast_alloca },
103#endif
104{ EXCCAUSE_INTEGER_DIVIDE_BY_ZERO, 0,	   do_div0 },
105/* EXCCAUSE_PRIVILEGED unhandled */
106#if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION || \
107		IS_ENABLED(CONFIG_XTENSA_LOAD_STORE)
108#ifdef CONFIG_XTENSA_UNALIGNED_USER
109{ EXCCAUSE_UNALIGNED,		USER,	   fast_unaligned },
110#endif
111{ EXCCAUSE_UNALIGNED,		KRNL,	   fast_unaligned },
112#endif
113{ EXCCAUSE_UNALIGNED,		0,	   do_unaligned_user },
114#ifdef CONFIG_MMU
115{ EXCCAUSE_ITLB_MISS,			0,	   do_page_fault },
116{ EXCCAUSE_ITLB_MISS,			USER|KRNL, fast_second_level_miss},
117{ EXCCAUSE_DTLB_MISS,			USER|KRNL, fast_second_level_miss},
118{ EXCCAUSE_DTLB_MISS,			0,	   do_page_fault },
119{ EXCCAUSE_STORE_CACHE_ATTRIBUTE,	USER|KRNL, fast_store_prohibited },
120#endif /* CONFIG_MMU */
121#ifdef CONFIG_PFAULT
122{ EXCCAUSE_ITLB_MULTIHIT,		0,	   do_multihit },
123{ EXCCAUSE_ITLB_PRIVILEGE,		0,	   do_page_fault },
124{ EXCCAUSE_FETCH_CACHE_ATTRIBUTE,	0,	   do_page_fault },
125{ EXCCAUSE_DTLB_MULTIHIT,		0,	   do_multihit },
126{ EXCCAUSE_DTLB_PRIVILEGE,		0,	   do_page_fault },
127{ EXCCAUSE_STORE_CACHE_ATTRIBUTE,	0,	   do_page_fault },
128{ EXCCAUSE_LOAD_CACHE_ATTRIBUTE,	0,	   do_page_fault },
129#endif
130/* XCCHAL_EXCCAUSE_FLOATING_POINT unhandled */
131#if XTENSA_HAVE_COPROCESSOR(0)
132COPROCESSOR(0),
133#endif
134#if XTENSA_HAVE_COPROCESSOR(1)
135COPROCESSOR(1),
136#endif
137#if XTENSA_HAVE_COPROCESSOR(2)
138COPROCESSOR(2),
139#endif
140#if XTENSA_HAVE_COPROCESSOR(3)
141COPROCESSOR(3),
142#endif
143#if XTENSA_HAVE_COPROCESSOR(4)
144COPROCESSOR(4),
145#endif
146#if XTENSA_HAVE_COPROCESSOR(5)
147COPROCESSOR(5),
148#endif
149#if XTENSA_HAVE_COPROCESSOR(6)
150COPROCESSOR(6),
151#endif
152#if XTENSA_HAVE_COPROCESSOR(7)
153COPROCESSOR(7),
154#endif
155#if XTENSA_FAKE_NMI
156{ EXCCAUSE_MAPPED_NMI,			0,		do_nmi },
157#endif
158{ EXCCAUSE_MAPPED_DEBUG,		0,		do_debug },
159{ -1, -1, 0 }
160
161};
162
163/* The exception table <exc_table> serves two functions:
164 * 1. it contains three dispatch tables (fast_user, fast_kernel, default-c)
165 * 2. it is a temporary memory buffer for the exception handlers.
166 */
167
168DEFINE_PER_CPU(struct exc_table, exc_table);
169DEFINE_PER_CPU(struct debug_table, debug_table);
170
171void die(const char*, struct pt_regs*, long);
172
173static inline void
174__die_if_kernel(const char *str, struct pt_regs *regs, long err)
175{
176	if (!user_mode(regs))
177		die(str, regs, err);
178}
179
180#ifdef CONFIG_PRINT_USER_CODE_ON_UNHANDLED_EXCEPTION
181static inline void dump_user_code(struct pt_regs *regs)
182{
183	char buf[32];
184
185	if (copy_from_user(buf, (void __user *)(regs->pc & -16), sizeof(buf)) == 0) {
186		print_hex_dump(KERN_INFO, " ", DUMP_PREFIX_NONE,
187			       32, 1, buf, sizeof(buf), false);
188
189	}
190}
191#else
192static inline void dump_user_code(struct pt_regs *regs)
193{
194}
195#endif
196
197/*
198 * Unhandled Exceptions. Kill user task or panic if in kernel space.
199 */
200
201void do_unhandled(struct pt_regs *regs)
202{
203	__die_if_kernel("Caught unhandled exception - should not happen",
204			regs, SIGKILL);
205
206	/* If in user mode, send SIGILL signal to current process */
207	pr_info_ratelimited("Caught unhandled exception in '%s' "
208			    "(pid = %d, pc = %#010lx) - should not happen\n"
209			    "\tEXCCAUSE is %ld\n",
210			    current->comm, task_pid_nr(current), regs->pc,
211			    regs->exccause);
212	dump_user_code(regs);
213	force_sig(SIGILL);
214}
215
216/*
217 * Multi-hit exception. This if fatal!
218 */
219
220static void do_multihit(struct pt_regs *regs)
221{
222	die("Caught multihit exception", regs, SIGKILL);
223}
224
225/*
226 * IRQ handler.
227 */
228
229#if XTENSA_FAKE_NMI
230
231#define IS_POW2(v) (((v) & ((v) - 1)) == 0)
232
233#if !(PROFILING_INTLEVEL == XCHAL_EXCM_LEVEL && \
234      IS_POW2(XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL)))
235#warning "Fake NMI is requested for PMM, but there are other IRQs at or above its level."
236#warning "Fake NMI will be used, but there will be a bugcheck if one of those IRQs fire."
237
238static inline void check_valid_nmi(void)
239{
240	unsigned intread = xtensa_get_sr(interrupt);
241	unsigned intenable = xtensa_get_sr(intenable);
242
243	BUG_ON(intread & intenable &
244	       ~(XTENSA_INTLEVEL_ANDBELOW_MASK(PROFILING_INTLEVEL) ^
245		 XTENSA_INTLEVEL_MASK(PROFILING_INTLEVEL) ^
246		 BIT(XCHAL_PROFILING_INTERRUPT)));
247}
248
249#else
250
251static inline void check_valid_nmi(void)
252{
253}
254
255#endif
256
257irqreturn_t xtensa_pmu_irq_handler(int irq, void *dev_id);
258
259DEFINE_PER_CPU(unsigned long, nmi_count);
260
261static void do_nmi(struct pt_regs *regs)
262{
263	struct pt_regs *old_regs = set_irq_regs(regs);
264
265	nmi_enter();
266	++*this_cpu_ptr(&nmi_count);
267	check_valid_nmi();
268	xtensa_pmu_irq_handler(0, NULL);
269	nmi_exit();
270	set_irq_regs(old_regs);
271}
272#endif
273
274static void do_interrupt(struct pt_regs *regs)
275{
276	static const unsigned int_level_mask[] = {
277		0,
278		XCHAL_INTLEVEL1_MASK,
279		XCHAL_INTLEVEL2_MASK,
280		XCHAL_INTLEVEL3_MASK,
281		XCHAL_INTLEVEL4_MASK,
282		XCHAL_INTLEVEL5_MASK,
283		XCHAL_INTLEVEL6_MASK,
284		XCHAL_INTLEVEL7_MASK,
285	};
286	struct pt_regs *old_regs = set_irq_regs(regs);
287	unsigned unhandled = ~0u;
288
289	irq_enter();
290
291	for (;;) {
292		unsigned intread = xtensa_get_sr(interrupt);
293		unsigned intenable = xtensa_get_sr(intenable);
294		unsigned int_at_level = intread & intenable;
295		unsigned level;
296
297		for (level = LOCKLEVEL; level > 0; --level) {
298			if (int_at_level & int_level_mask[level]) {
299				int_at_level &= int_level_mask[level];
300				if (int_at_level & unhandled)
301					int_at_level &= unhandled;
302				else
303					unhandled |= int_level_mask[level];
304				break;
305			}
306		}
307
308		if (level == 0)
309			break;
310
311		/* clear lowest pending irq in the unhandled mask */
312		unhandled ^= (int_at_level & -int_at_level);
313		do_IRQ(__ffs(int_at_level), regs);
314	}
315
316	irq_exit();
317	set_irq_regs(old_regs);
318}
319
320static bool check_div0(struct pt_regs *regs)
321{
322	static const u8 pattern[] = {'D', 'I', 'V', '0'};
323	const u8 *p;
324	u8 buf[5];
325
326	if (user_mode(regs)) {
327		if (copy_from_user(buf, (void __user *)regs->pc + 2, 5))
328			return false;
329		p = buf;
330	} else {
331		p = (const u8 *)regs->pc + 2;
332	}
333
334	return memcmp(p, pattern, sizeof(pattern)) == 0 ||
335		memcmp(p + 1, pattern, sizeof(pattern)) == 0;
336}
337
338/*
339 * Illegal instruction. Fatal if in kernel space.
340 */
341
342static void do_illegal_instruction(struct pt_regs *regs)
343{
344#ifdef CONFIG_USER_ABI_CALL0_PROBE
345	/*
346	 * When call0 application encounters an illegal instruction fast
347	 * exception handler will attempt to set PS.WOE and retry failing
348	 * instruction.
349	 * If we get here we know that that instruction is also illegal
350	 * with PS.WOE set, so it's not related to the windowed option
351	 * hence PS.WOE may be cleared.
352	 */
353	if (regs->pc == current_thread_info()->ps_woe_fix_addr)
354		regs->ps &= ~PS_WOE_MASK;
355#endif
356	if (check_div0(regs)) {
357		do_div0(regs);
358		return;
359	}
360
361	__die_if_kernel("Illegal instruction in kernel", regs, SIGKILL);
362
363	/* If in user mode, send SIGILL signal to current process. */
364
365	pr_info_ratelimited("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n",
366			    current->comm, task_pid_nr(current), regs->pc);
367	force_sig(SIGILL);
368}
369
370static void do_div0(struct pt_regs *regs)
371{
372	__die_if_kernel("Unhandled division by 0 in kernel", regs, SIGKILL);
373	force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->pc);
374}
375
376#ifdef CONFIG_XTENSA_LOAD_STORE
377static void do_load_store(struct pt_regs *regs)
378{
379	__die_if_kernel("Unhandled load/store exception in kernel",
380			regs, SIGKILL);
381
382	pr_info_ratelimited("Load/store error to %08lx in '%s' (pid = %d, pc = %#010lx)\n",
383			    regs->excvaddr, current->comm,
384			    task_pid_nr(current), regs->pc);
385	force_sig_fault(SIGBUS, BUS_ADRERR, (void *)regs->excvaddr);
386}
387#endif
388
389/*
390 * Handle unaligned memory accesses from user space. Kill task.
391 *
392 * If CONFIG_UNALIGNED_USER is not set, we don't allow unaligned memory
393 * accesses causes from user space.
394 */
395
396static void do_unaligned_user(struct pt_regs *regs)
397{
398	__die_if_kernel("Unhandled unaligned exception in kernel",
399			regs, SIGKILL);
400
401	pr_info_ratelimited("Unaligned memory access to %08lx in '%s' "
402			    "(pid = %d, pc = %#010lx)\n",
403			    regs->excvaddr, current->comm,
404			    task_pid_nr(current), regs->pc);
405	force_sig_fault(SIGBUS, BUS_ADRALN, (void *) regs->excvaddr);
406}
407
408#if XTENSA_HAVE_COPROCESSORS
409static void do_coprocessor(struct pt_regs *regs)
410{
411	coprocessor_flush_release_all(current_thread_info());
412}
413#endif
414
415/* Handle debug events.
416 * When CONFIG_HAVE_HW_BREAKPOINT is on this handler is called with
417 * preemption disabled to avoid rescheduling and keep mapping of hardware
418 * breakpoint structures to debug registers intact, so that
419 * DEBUGCAUSE.DBNUM could be used in case of data breakpoint hit.
420 */
421static void do_debug(struct pt_regs *regs)
422{
423#ifdef CONFIG_HAVE_HW_BREAKPOINT
424	int ret = check_hw_breakpoint(regs);
425
426	preempt_enable();
427	if (ret == 0)
428		return;
429#endif
430	__die_if_kernel("Breakpoint in kernel", regs, SIGKILL);
431
432	/* If in user mode, send SIGTRAP signal to current process */
433
434	force_sig(SIGTRAP);
435}
436
437
438#define set_handler(type, cause, handler)				\
439	do {								\
440		unsigned int cpu;					\
441									\
442		for_each_possible_cpu(cpu)				\
443			per_cpu(exc_table, cpu).type[cause] = (handler);\
444	} while (0)
445
446/* Set exception C handler - for temporary use when probing exceptions */
447
448xtensa_exception_handler *
449__init trap_set_handler(int cause, xtensa_exception_handler *handler)
450{
451	void *previous = per_cpu(exc_table, 0).default_handler[cause];
452
453	set_handler(default_handler, cause, handler);
454	return previous;
455}
456
457
458static void trap_init_excsave(void)
459{
460	xtensa_set_sr(this_cpu_ptr(&exc_table), excsave1);
461}
462
463static void trap_init_debug(void)
464{
465	unsigned long debugsave = (unsigned long)this_cpu_ptr(&debug_table);
466
467	this_cpu_ptr(&debug_table)->debug_exception = debug_exception;
468	__asm__ __volatile__("wsr %0, excsave" __stringify(XCHAL_DEBUGLEVEL)
469			     :: "a"(debugsave));
470}
471
472/*
473 * Initialize dispatch tables.
474 *
475 * The exception vectors are stored compressed the __init section in the
476 * dispatch_init_table. This function initializes the following three tables
477 * from that compressed table:
478 * - fast user		first dispatch table for user exceptions
479 * - fast kernel	first dispatch table for kernel exceptions
480 * - default C-handler	C-handler called by the default fast handler.
481 *
482 * See vectors.S for more details.
483 */
484
485void __init trap_init(void)
486{
487	int i;
488
489	/* Setup default vectors. */
490
491	for (i = 0; i < EXCCAUSE_N; i++) {
492		set_handler(fast_user_handler, i, user_exception);
493		set_handler(fast_kernel_handler, i, kernel_exception);
494		set_handler(default_handler, i, do_unhandled);
495	}
496
497	/* Setup specific handlers. */
498
499	for(i = 0; dispatch_init_table[i].cause >= 0; i++) {
500		int fast = dispatch_init_table[i].fast;
501		int cause = dispatch_init_table[i].cause;
502		void *handler = dispatch_init_table[i].handler;
503
504		if (fast == 0)
505			set_handler(default_handler, cause, handler);
506		if ((fast & USER) != 0)
507			set_handler(fast_user_handler, cause, handler);
508		if ((fast & KRNL) != 0)
509			set_handler(fast_kernel_handler, cause, handler);
510	}
511
512	/* Initialize EXCSAVE_1 to hold the address of the exception table. */
513	trap_init_excsave();
514	trap_init_debug();
515}
516
517#ifdef CONFIG_SMP
518void secondary_trap_init(void)
519{
520	trap_init_excsave();
521	trap_init_debug();
522}
523#endif
524
525/*
526 * This function dumps the current valid window frame and other base registers.
527 */
528
529void show_regs(struct pt_regs * regs)
530{
531	int i;
532
533	show_regs_print_info(KERN_DEFAULT);
534
535	for (i = 0; i < 16; i++) {
536		if ((i % 8) == 0)
537			pr_info("a%02d:", i);
538		pr_cont(" %08lx", regs->areg[i]);
539	}
540	pr_cont("\n");
541	pr_info("pc: %08lx, ps: %08lx, depc: %08lx, excvaddr: %08lx\n",
542		regs->pc, regs->ps, regs->depc, regs->excvaddr);
543	pr_info("lbeg: %08lx, lend: %08lx lcount: %08lx, sar: %08lx\n",
544		regs->lbeg, regs->lend, regs->lcount, regs->sar);
545	if (user_mode(regs))
546		pr_cont("wb: %08lx, ws: %08lx, wmask: %08lx, syscall: %ld\n",
547			regs->windowbase, regs->windowstart, regs->wmask,
548			regs->syscall);
549}
550
551static int show_trace_cb(struct stackframe *frame, void *data)
552{
553	const char *loglvl = data;
554
555	if (kernel_text_address(frame->pc))
556		printk("%s [<%08lx>] %pB\n",
557			loglvl, frame->pc, (void *)frame->pc);
558	return 0;
559}
560
561static void show_trace(struct task_struct *task, unsigned long *sp,
562		       const char *loglvl)
563{
564	if (!sp)
565		sp = stack_pointer(task);
566
567	printk("%sCall Trace:\n", loglvl);
568	walk_stackframe(sp, show_trace_cb, (void *)loglvl);
569}
570
571#define STACK_DUMP_ENTRY_SIZE 4
572#define STACK_DUMP_LINE_SIZE 16
573static size_t kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
574
575struct stack_fragment
576{
577	size_t len;
578	size_t off;
579	u8 *sp;
580	const char *loglvl;
581};
582
583static int show_stack_fragment_cb(struct stackframe *frame, void *data)
584{
585	struct stack_fragment *sf = data;
586
587	while (sf->off < sf->len) {
588		u8 line[STACK_DUMP_LINE_SIZE];
589		size_t line_len = sf->len - sf->off > STACK_DUMP_LINE_SIZE ?
590			STACK_DUMP_LINE_SIZE : sf->len - sf->off;
591		bool arrow = sf->off == 0;
592
593		if (frame && frame->sp == (unsigned long)(sf->sp + sf->off))
594			arrow = true;
595
596		__memcpy(line, sf->sp + sf->off, line_len);
597		print_hex_dump(sf->loglvl, arrow ? "> " : "  ", DUMP_PREFIX_NONE,
598			       STACK_DUMP_LINE_SIZE, STACK_DUMP_ENTRY_SIZE,
599			       line, line_len, false);
600		sf->off += STACK_DUMP_LINE_SIZE;
601		if (arrow)
602			return 0;
603	}
604	return 1;
605}
606
607void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
608{
609	struct stack_fragment sf;
610
611	if (!sp)
612		sp = stack_pointer(task);
613
614	sf.len = min((-(size_t)sp) & (THREAD_SIZE - STACK_DUMP_ENTRY_SIZE),
615		     kstack_depth_to_print * STACK_DUMP_ENTRY_SIZE);
616	sf.off = 0;
617	sf.sp = (u8 *)sp;
618	sf.loglvl = loglvl;
619
620	printk("%sStack:\n", loglvl);
621	walk_stackframe(sp, show_stack_fragment_cb, &sf);
622	while (sf.off < sf.len)
623		show_stack_fragment_cb(NULL, &sf);
624	show_trace(task, sp, loglvl);
625}
626
627DEFINE_SPINLOCK(die_lock);
628
629void __noreturn die(const char * str, struct pt_regs * regs, long err)
630{
631	static int die_counter;
632	const char *pr = "";
633
634	if (IS_ENABLED(CONFIG_PREEMPTION))
635		pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT";
636
637	console_verbose();
638	spin_lock_irq(&die_lock);
639
640	pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, pr);
641	show_regs(regs);
642	if (!user_mode(regs))
643		show_stack(NULL, (unsigned long *)regs->areg[1], KERN_INFO);
644
645	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
646	spin_unlock_irq(&die_lock);
647
648	if (in_interrupt())
649		panic("Fatal exception in interrupt");
650
651	if (panic_on_oops)
652		panic("Fatal exception");
653
654	make_task_dead(err);
655}
656