1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Kernel traps/events for Hexagon processor
4 *
5 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
6 */
7
8#include <linux/init.h>
9#include <linux/sched/signal.h>
10#include <linux/sched/debug.h>
11#include <linux/sched/task_stack.h>
12#include <linux/module.h>
13#include <linux/kallsyms.h>
14#include <linux/kdebug.h>
15#include <linux/syscalls.h>
16#include <linux/signal.h>
17#include <linux/ptrace.h>
18#include <asm/traps.h>
19#include <asm/vm_fault.h>
20#include <asm/syscall.h>
21#include <asm/registers.h>
22#include <asm/unistd.h>
23#include <asm/sections.h>
24#ifdef CONFIG_KGDB
25# include <linux/kgdb.h>
26#endif
27
28#define TRAP_SYSCALL	1
29#define TRAP_DEBUG	0xdb
30
31#ifdef CONFIG_GENERIC_BUG
32/* Maybe should resemble arch/sh/kernel/traps.c ?? */
33int is_valid_bugaddr(unsigned long addr)
34{
35	return 1;
36}
37#endif /* CONFIG_GENERIC_BUG */
38
39static const char *ex_name(int ex)
40{
41	switch (ex) {
42	case HVM_GE_C_XPROT:
43	case HVM_GE_C_XUSER:
44		return "Execute protection fault";
45	case HVM_GE_C_RPROT:
46	case HVM_GE_C_RUSER:
47		return "Read protection fault";
48	case HVM_GE_C_WPROT:
49	case HVM_GE_C_WUSER:
50		return "Write protection fault";
51	case HVM_GE_C_XMAL:
52		return "Misaligned instruction";
53	case HVM_GE_C_WREG:
54		return "Multiple writes to same register in packet";
55	case HVM_GE_C_PCAL:
56		return "Program counter values that are not properly aligned";
57	case HVM_GE_C_RMAL:
58		return "Misaligned data load";
59	case HVM_GE_C_WMAL:
60		return "Misaligned data store";
61	case HVM_GE_C_INVI:
62	case HVM_GE_C_PRIVI:
63		return "Illegal instruction";
64	case HVM_GE_C_BUS:
65		return "Precise bus error";
66	case HVM_GE_C_CACHE:
67		return "Cache error";
68
69	case 0xdb:
70		return "Debugger trap";
71
72	default:
73		return "Unrecognized exception";
74	}
75}
76
77static void do_show_stack(struct task_struct *task, unsigned long *fp,
78			  unsigned long ip, const char *loglvl)
79{
80	int kstack_depth_to_print = 24;
81	unsigned long offset, size;
82	const char *name = NULL;
83	unsigned long *newfp;
84	unsigned long low, high;
85	char tmpstr[128];
86	char *modname;
87	int i;
88
89	if (task == NULL)
90		task = current;
91
92	printk("%sCPU#%d, %s/%d, Call Trace:\n", loglvl, raw_smp_processor_id(),
93		task->comm, task_pid_nr(task));
94
95	if (fp == NULL) {
96		if (task == current) {
97			asm("%0 = r30" : "=r" (fp));
98		} else {
99			fp = (unsigned long *)
100			     ((struct hexagon_switch_stack *)
101			     task->thread.switch_sp)->fp;
102		}
103	}
104
105	if ((((unsigned long) fp) & 0x3) || ((unsigned long) fp < 0x1000)) {
106		printk("%s-- Corrupt frame pointer %p\n", loglvl, fp);
107		return;
108	}
109
110	/* Saved link reg is one word above FP */
111	if (!ip)
112		ip = *(fp+1);
113
114	/* Expect kernel stack to be in-bounds */
115	low = (unsigned long)task_stack_page(task);
116	high = low + THREAD_SIZE - 8;
117	low += sizeof(struct thread_info);
118
119	for (i = 0; i < kstack_depth_to_print; i++) {
120
121		name = kallsyms_lookup(ip, &size, &offset, &modname, tmpstr);
122
123		printk("%s[%p] 0x%lx: %s + 0x%lx", loglvl, fp, ip, name, offset);
124		if (((unsigned long) fp < low) || (high < (unsigned long) fp))
125			printk(KERN_CONT " (FP out of bounds!)");
126		if (modname)
127			printk(KERN_CONT " [%s] ", modname);
128		printk(KERN_CONT "\n");
129
130		newfp = (unsigned long *) *fp;
131
132		if (((unsigned long) newfp) & 0x3) {
133			printk("%s-- Corrupt frame pointer %p\n", loglvl, newfp);
134			break;
135		}
136
137		/* Attempt to continue past exception. */
138		if (0 == newfp) {
139			struct pt_regs *regs = (struct pt_regs *) (((void *)fp)
140						+ 8);
141
142			if (regs->syscall_nr != -1) {
143				printk("%s-- trap0 -- syscall_nr: %ld", loglvl,
144					regs->syscall_nr);
145				printk(KERN_CONT "  psp: %lx  elr: %lx\n",
146					 pt_psp(regs), pt_elr(regs));
147				break;
148			} else {
149				/* really want to see more ... */
150				kstack_depth_to_print += 6;
151				printk("%s-- %s (0x%lx)  badva: %lx\n", loglvl,
152					ex_name(pt_cause(regs)), pt_cause(regs),
153					pt_badva(regs));
154			}
155
156			newfp = (unsigned long *) regs->r30;
157			ip = pt_elr(regs);
158		} else {
159			ip = *(newfp + 1);
160		}
161
162		/* If link reg is null, we are done. */
163		if (ip == 0x0)
164			break;
165
166		/* If newfp isn't larger, we're tracing garbage. */
167		if (newfp > fp)
168			fp = newfp;
169		else
170			break;
171	}
172}
173
174void show_stack(struct task_struct *task, unsigned long *fp, const char *loglvl)
175{
176	/* Saved link reg is one word above FP */
177	do_show_stack(task, fp, 0, loglvl);
178}
179
180int die(const char *str, struct pt_regs *regs, long err)
181{
182	static struct {
183		spinlock_t lock;
184		int counter;
185	} die = {
186		.lock = __SPIN_LOCK_UNLOCKED(die.lock),
187		.counter = 0
188	};
189
190	console_verbose();
191	oops_enter();
192
193	spin_lock_irq(&die.lock);
194	bust_spinlocks(1);
195	printk(KERN_EMERG "Oops: %s[#%d]:\n", str, ++die.counter);
196
197	if (notify_die(DIE_OOPS, str, regs, err, pt_cause(regs), SIGSEGV) ==
198	    NOTIFY_STOP)
199		return 1;
200
201	print_modules();
202	show_regs(regs);
203	do_show_stack(current, &regs->r30, pt_elr(regs), KERN_EMERG);
204
205	bust_spinlocks(0);
206	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
207
208	spin_unlock_irq(&die.lock);
209
210	if (in_interrupt())
211		panic("Fatal exception in interrupt");
212
213	if (panic_on_oops)
214		panic("Fatal exception");
215
216	oops_exit();
217	make_task_dead(err);
218	return 0;
219}
220
221int die_if_kernel(char *str, struct pt_regs *regs, long err)
222{
223	if (!user_mode(regs))
224		return die(str, regs, err);
225	else
226		return 0;
227}
228
229/*
230 * It's not clear that misaligned fetches are ever recoverable.
231 */
232static void misaligned_instruction(struct pt_regs *regs)
233{
234	die_if_kernel("Misaligned Instruction", regs, 0);
235	force_sig(SIGBUS);
236}
237
238/*
239 * Misaligned loads and stores, on the other hand, can be
240 * emulated, and probably should be, some day.  But for now
241 * they will be considered fatal.
242 */
243static void misaligned_data_load(struct pt_regs *regs)
244{
245	die_if_kernel("Misaligned Data Load", regs, 0);
246	force_sig(SIGBUS);
247}
248
249static void misaligned_data_store(struct pt_regs *regs)
250{
251	die_if_kernel("Misaligned Data Store", regs, 0);
252	force_sig(SIGBUS);
253}
254
255static void illegal_instruction(struct pt_regs *regs)
256{
257	die_if_kernel("Illegal Instruction", regs, 0);
258	force_sig(SIGILL);
259}
260
261/*
262 * Precise bus errors may be recoverable with a a retry,
263 * but for now, treat them as irrecoverable.
264 */
265static void precise_bus_error(struct pt_regs *regs)
266{
267	die_if_kernel("Precise Bus Error", regs, 0);
268	force_sig(SIGBUS);
269}
270
271/*
272 * If anything is to be done here other than panic,
273 * it will probably be complex and migrate to another
274 * source module.  For now, just die.
275 */
276static void cache_error(struct pt_regs *regs)
277{
278	die("Cache Error", regs, 0);
279}
280
281/*
282 * General exception handler
283 */
284void do_genex(struct pt_regs *regs);
285void do_genex(struct pt_regs *regs)
286{
287	/*
288	 * Decode Cause and Dispatch
289	 */
290	switch (pt_cause(regs)) {
291	case HVM_GE_C_XPROT:
292	case HVM_GE_C_XUSER:
293		execute_protection_fault(regs);
294		break;
295	case HVM_GE_C_RPROT:
296	case HVM_GE_C_RUSER:
297		read_protection_fault(regs);
298		break;
299	case HVM_GE_C_WPROT:
300	case HVM_GE_C_WUSER:
301		write_protection_fault(regs);
302		break;
303	case HVM_GE_C_XMAL:
304		misaligned_instruction(regs);
305		break;
306	case HVM_GE_C_WREG:
307		illegal_instruction(regs);
308		break;
309	case HVM_GE_C_PCAL:
310		misaligned_instruction(regs);
311		break;
312	case HVM_GE_C_RMAL:
313		misaligned_data_load(regs);
314		break;
315	case HVM_GE_C_WMAL:
316		misaligned_data_store(regs);
317		break;
318	case HVM_GE_C_INVI:
319	case HVM_GE_C_PRIVI:
320		illegal_instruction(regs);
321		break;
322	case HVM_GE_C_BUS:
323		precise_bus_error(regs);
324		break;
325	case HVM_GE_C_CACHE:
326		cache_error(regs);
327		break;
328	default:
329		/* Halt and catch fire */
330		panic("Unrecognized exception 0x%lx\n", pt_cause(regs));
331		break;
332	}
333}
334
335void do_trap0(struct pt_regs *regs);
336void do_trap0(struct pt_regs *regs)
337{
338	syscall_fn syscall;
339
340	switch (pt_cause(regs)) {
341	case TRAP_SYSCALL:
342		/* System call is trap0 #1 */
343
344		/* allow strace to catch syscall args  */
345		if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE) &&
346			ptrace_report_syscall_entry(regs)))
347			return;  /*  return -ENOSYS somewhere?  */
348
349		/* Interrupts should be re-enabled for syscall processing */
350		__vmsetie(VM_INT_ENABLE);
351
352		/*
353		 * System call number is in r6, arguments in r0..r5.
354		 * Fortunately, no Linux syscall has more than 6 arguments,
355		 * and Hexagon ABI passes first 6 arguments in registers.
356		 * 64-bit arguments are passed in odd/even register pairs.
357		 * Fortunately, we have no system calls that take more
358		 * than three arguments with more than one 64-bit value.
359		 * Should that change, we'd need to redesign to copy
360		 * between user and kernel stacks.
361		 */
362		regs->syscall_nr = regs->r06;
363
364		/*
365		 * GPR R0 carries the first parameter, and is also used
366		 * to report the return value.  We need a backup of
367		 * the user's value in case we need to do a late restart
368		 * of the system call.
369		 */
370		regs->restart_r0 = regs->r00;
371
372		if ((unsigned long) regs->syscall_nr >= __NR_syscalls) {
373			regs->r00 = -1;
374		} else {
375			syscall = (syscall_fn)
376				  (sys_call_table[regs->syscall_nr]);
377			regs->r00 = syscall(regs->r00, regs->r01,
378				   regs->r02, regs->r03,
379				   regs->r04, regs->r05);
380		}
381
382		/* allow strace to get the syscall return state  */
383		if (unlikely(test_thread_flag(TIF_SYSCALL_TRACE)))
384			ptrace_report_syscall_exit(regs, 0);
385
386		break;
387	case TRAP_DEBUG:
388		/* Trap0 0xdb is debug breakpoint */
389		if (user_mode(regs)) {
390			/*
391			 * Some architecures add some per-thread state
392			 * to distinguish between breakpoint traps and
393			 * trace traps.  We may want to do that, and
394			 * set the si_code value appropriately, or we
395			 * may want to use a different trap0 flavor.
396			 */
397			force_sig_fault(SIGTRAP, TRAP_BRKPT,
398					(void __user *) pt_elr(regs));
399		} else {
400#ifdef CONFIG_KGDB
401			kgdb_handle_exception(pt_cause(regs), SIGTRAP,
402					      TRAP_BRKPT, regs);
403#endif
404		}
405		break;
406	}
407	/* Ignore other trap0 codes for now, especially 0 (Angel calls) */
408}
409
410/*
411 * Machine check exception handler
412 */
413void do_machcheck(struct pt_regs *regs);
414void do_machcheck(struct pt_regs *regs)
415{
416	/* Halt and catch fire */
417	__vmstop();
418}
419
420/*
421 * Treat this like the old 0xdb trap.
422 */
423
424void do_debug_exception(struct pt_regs *regs);
425void do_debug_exception(struct pt_regs *regs)
426{
427	regs->hvmer.vmest &= ~HVM_VMEST_CAUSE_MSK;
428	regs->hvmer.vmest |= (TRAP_DEBUG << HVM_VMEST_CAUSE_SFT);
429	do_trap0(regs);
430}
431