trap-v4.c revision 245551
1/*	$NetBSD: fault.c,v 1.45 2003/11/20 14:44:36 scw Exp $	*/
2
3/*-
4 * Copyright 2004 Olivier Houchard
5 * Copyright 2003 Wasabi Systems, Inc.
6 * All rights reserved.
7 *
8 * Written by Steve C. Woodford for Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 *    notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 *    notice, this list of conditions and the following disclaimer in the
17 *    documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 *    must display the following acknowledgement:
20 *      This product includes software developed for the NetBSD Project by
21 *      Wasabi Systems, Inc.
22 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23 *    or promote products derived from this software without specific prior
24 *    written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38/*-
39 * Copyright (c) 1994-1997 Mark Brinicombe.
40 * Copyright (c) 1994 Brini.
41 * All rights reserved.
42 *
43 * This code is derived from software written for Brini by Mark Brinicombe
44 *
45 * Redistribution and use in source and binary forms, with or without
46 * modification, are permitted provided that the following conditions
47 * are met:
48 * 1. Redistributions of source code must retain the above copyright
49 *    notice, this list of conditions and the following disclaimer.
50 * 2. Redistributions in binary form must reproduce the above copyright
51 *    notice, this list of conditions and the following disclaimer in the
52 *    documentation and/or other materials provided with the distribution.
53 * 3. All advertising materials mentioning features or use of this software
54 *    must display the following acknowledgement:
55 *	This product includes software developed by Brini.
56 * 4. The name of the company nor the name of the author may be used to
57 *    endorse or promote products derived from this software without specific
58 *    prior written permission.
59 *
60 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED
61 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
62 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
63 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
64 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
65 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
66 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
67 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
68 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
69 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
70 * SUCH DAMAGE.
71 *
72 * RiscBSD kernel project
73 *
74 * fault.c
75 *
76 * Fault handlers
77 *
78 * Created      : 28/11/94
79 */
80
81
82#include "opt_ktrace.h"
83
84#include <sys/cdefs.h>
85__FBSDID("$FreeBSD: head/sys/arm/arm/trap.c 245551 2013-01-17 09:52:35Z andrew $");
86
87#include <sys/param.h>
88#include <sys/systm.h>
89#include <sys/proc.h>
90#include <sys/kernel.h>
91#include <sys/lock.h>
92#include <sys/mutex.h>
93#include <sys/syscall.h>
94#include <sys/sysent.h>
95#include <sys/signalvar.h>
96#include <sys/ktr.h>
97#ifdef KTRACE
98#include <sys/uio.h>
99#include <sys/ktrace.h>
100#endif
101#include <sys/ptrace.h>
102#include <sys/pioctl.h>
103
104#include <vm/vm.h>
105#include <vm/pmap.h>
106#include <vm/vm_kern.h>
107#include <vm/vm_map.h>
108#include <vm/vm_extern.h>
109
110#include <machine/cpuconf.h>
111#include <machine/vmparam.h>
112#include <machine/frame.h>
113#include <machine/cpu.h>
114#include <machine/intr.h>
115#include <machine/pcb.h>
116#include <machine/proc.h>
117#include <machine/swi.h>
118
119#include <security/audit/audit.h>
120
121#ifdef KDB
122#include <sys/kdb.h>
123#endif
124
125
126void swi_handler(trapframe_t *);
127void undefinedinstruction(trapframe_t *);
128
129#include <machine/disassem.h>
130#include <machine/machdep.h>
131
132extern char fusubailout[];
133
134#ifdef DEBUG
135int last_fault_code;	/* For the benefit of pmap_fault_fixup() */
136#endif
137
138#if defined(CPU_ARM7TDMI)
139/* These CPUs may need data/prefetch abort fixups */
140#define	CPU_ABORT_FIXUP_REQUIRED
141#endif
142
143struct ksig {
144	int signb;
145	u_long code;
146};
147struct data_abort {
148	int (*func)(trapframe_t *, u_int, u_int, struct thread *, struct ksig *);
149	const char *desc;
150};
151
152static int dab_fatal(trapframe_t *, u_int, u_int, struct thread *, struct ksig *);
153static int dab_align(trapframe_t *, u_int, u_int, struct thread *, struct ksig *);
154static int dab_buserr(trapframe_t *, u_int, u_int, struct thread *, struct ksig *);
155
156static const struct data_abort data_aborts[] = {
157	{dab_fatal,	"Vector Exception"},
158	{dab_align,	"Alignment Fault 1"},
159	{dab_fatal,	"Terminal Exception"},
160	{dab_align,	"Alignment Fault 3"},
161	{dab_buserr,	"External Linefetch Abort (S)"},
162	{NULL,		"Translation Fault (S)"},
163	{dab_buserr,	"External Linefetch Abort (P)"},
164	{NULL,		"Translation Fault (P)"},
165	{dab_buserr,	"External Non-Linefetch Abort (S)"},
166	{NULL,		"Domain Fault (S)"},
167	{dab_buserr,	"External Non-Linefetch Abort (P)"},
168	{NULL,		"Domain Fault (P)"},
169	{dab_buserr,	"External Translation Abort (L1)"},
170	{NULL,		"Permission Fault (S)"},
171	{dab_buserr,	"External Translation Abort (L2)"},
172	{NULL,		"Permission Fault (P)"}
173};
174
175/* Determine if a fault came from user mode */
176#define	TRAP_USERMODE(tf)	((tf->tf_spsr & PSR_MODE) == PSR_USR32_MODE)
177
178/* Determine if 'x' is a permission fault */
179#define	IS_PERMISSION_FAULT(x)					\
180	(((1 << ((x) & FAULT_TYPE_MASK)) &			\
181	  ((1 << FAULT_PERM_P) | (1 << FAULT_PERM_S))) != 0)
182
183static __inline void
184call_trapsignal(struct thread *td, int sig, u_long code)
185{
186	ksiginfo_t ksi;
187
188	ksiginfo_init_trap(&ksi);
189	ksi.ksi_signo = sig;
190	ksi.ksi_code = (int)code;
191	trapsignal(td, &ksi);
192}
193
194static __inline int
195data_abort_fixup(trapframe_t *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig)
196{
197#ifdef CPU_ABORT_FIXUP_REQUIRED
198	int error;
199
200	/* Call the cpu specific data abort fixup routine */
201	error = cpu_dataabt_fixup(tf);
202	if (__predict_true(error != ABORT_FIXUP_FAILED))
203		return (error);
204
205	/*
206	 * Oops, couldn't fix up the instruction
207	 */
208	printf("data_abort_fixup: fixup for %s mode data abort failed.\n",
209	    TRAP_USERMODE(tf) ? "user" : "kernel");
210	printf("pc = 0x%08x, opcode 0x%08x, insn = ", tf->tf_pc,
211	    *((u_int *)tf->tf_pc));
212	disassemble(tf->tf_pc);
213
214	/* Die now if this happened in kernel mode */
215	if (!TRAP_USERMODE(tf))
216		dab_fatal(tf, fsr, far, td, NULL, ksig);
217
218	return (error);
219#else
220	return (ABORT_FIXUP_OK);
221#endif /* CPU_ABORT_FIXUP_REQUIRED */
222}
223
224void
225data_abort_handler(trapframe_t *tf)
226{
227	struct vm_map *map;
228	struct pcb *pcb;
229	struct thread *td;
230	u_int user, far, fsr;
231	vm_prot_t ftype;
232	void *onfault;
233	vm_offset_t va;
234	int error = 0;
235	struct ksig ksig;
236	struct proc *p;
237
238
239	/* Grab FAR/FSR before enabling interrupts */
240	far = cpu_faultaddress();
241	fsr = cpu_faultstatus();
242#if 0
243	printf("data abort: %p (from %p %p)\n", (void*)far, (void*)tf->tf_pc,
244	    (void*)tf->tf_svc_lr);
245#endif
246
247	/* Update vmmeter statistics */
248#if 0
249	vmexp.traps++;
250#endif
251
252	td = curthread;
253	p = td->td_proc;
254
255	PCPU_INC(cnt.v_trap);
256	/* Data abort came from user mode? */
257	user = TRAP_USERMODE(tf);
258
259	if (user) {
260		td->td_pticks = 0;
261		td->td_frame = tf;
262		if (td->td_ucred != td->td_proc->p_ucred)
263			cred_update_thread(td);
264
265	}
266	/* Grab the current pcb */
267	pcb = td->td_pcb;
268	/* Re-enable interrupts if they were enabled previously */
269	if (td->td_md.md_spinlock_count == 0) {
270		if (__predict_true(tf->tf_spsr & I32_bit) == 0)
271			enable_interrupts(I32_bit);
272		if (__predict_true(tf->tf_spsr & F32_bit) == 0)
273			enable_interrupts(F32_bit);
274	}
275
276
277	/* Invoke the appropriate handler, if necessary */
278	if (__predict_false(data_aborts[fsr & FAULT_TYPE_MASK].func != NULL)) {
279		if ((data_aborts[fsr & FAULT_TYPE_MASK].func)(tf, fsr, far,
280		    td, &ksig)) {
281			goto do_trapsignal;
282		}
283		goto out;
284	}
285
286	/*
287	 * At this point, we're dealing with one of the following data aborts:
288	 *
289	 *  FAULT_TRANS_S  - Translation -- Section
290	 *  FAULT_TRANS_P  - Translation -- Page
291	 *  FAULT_DOMAIN_S - Domain -- Section
292	 *  FAULT_DOMAIN_P - Domain -- Page
293	 *  FAULT_PERM_S   - Permission -- Section
294	 *  FAULT_PERM_P   - Permission -- Page
295	 *
296	 * These are the main virtual memory-related faults signalled by
297	 * the MMU.
298	 */
299
300	/* fusubailout is used by [fs]uswintr to avoid page faulting */
301	if (__predict_false(pcb->pcb_onfault == fusubailout)) {
302		tf->tf_r0 = EFAULT;
303		tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault;
304		return;
305	}
306
307	/*
308	 * Make sure the Program Counter is sane. We could fall foul of
309	 * someone executing Thumb code, in which case the PC might not
310	 * be word-aligned. This would cause a kernel alignment fault
311	 * further down if we have to decode the current instruction.
312	 * XXX: It would be nice to be able to support Thumb at some point.
313	 */
314	if (__predict_false((tf->tf_pc & 3) != 0)) {
315		if (user) {
316			/*
317			 * Give the user an illegal instruction signal.
318			 */
319			/* Deliver a SIGILL to the process */
320			ksig.signb = SIGILL;
321			ksig.code = 0;
322			goto do_trapsignal;
323		}
324
325		/*
326		 * The kernel never executes Thumb code.
327		 */
328		printf("\ndata_abort_fault: Misaligned Kernel-mode "
329		    "Program Counter\n");
330		dab_fatal(tf, fsr, far, td, &ksig);
331	}
332
333	/* See if the cpu state needs to be fixed up */
334	switch (data_abort_fixup(tf, fsr, far, td, &ksig)) {
335	case ABORT_FIXUP_RETURN:
336		return;
337	case ABORT_FIXUP_FAILED:
338		/* Deliver a SIGILL to the process */
339		ksig.signb = SIGILL;
340		ksig.code = 0;
341		goto do_trapsignal;
342	default:
343		break;
344	}
345
346	va = trunc_page((vm_offset_t)far);
347
348	/*
349	 * It is only a kernel address space fault iff:
350	 *	1. user == 0  and
351	 *	2. pcb_onfault not set or
352	 *	3. pcb_onfault set and not LDRT/LDRBT/STRT/STRBT instruction.
353	 */
354	if (user == 0 && (va >= VM_MIN_KERNEL_ADDRESS ||
355	    (va < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW)) &&
356	    __predict_true((pcb->pcb_onfault == NULL ||
357	     (ReadWord(tf->tf_pc) & 0x05200000) != 0x04200000))) {
358		map = kernel_map;
359
360		/* Was the fault due to the FPE/IPKDB ? */
361		if (__predict_false((tf->tf_spsr & PSR_MODE)==PSR_UND32_MODE)) {
362
363			/*
364			 * Force exit via userret()
365			 * This is necessary as the FPE is an extension to
366			 * userland that actually runs in a priveledged mode
367			 * but uses USR mode permissions for its accesses.
368			 */
369			user = 1;
370			ksig.signb = SIGSEGV;
371			ksig.code = 0;
372			goto do_trapsignal;
373		}
374	} else {
375		map = &td->td_proc->p_vmspace->vm_map;
376	}
377
378	/*
379	 * We need to know whether the page should be mapped
380	 * as R or R/W. The MMU does not give us the info as
381	 * to whether the fault was caused by a read or a write.
382	 *
383	 * However, we know that a permission fault can only be
384	 * the result of a write to a read-only location, so
385	 * we can deal with those quickly.
386	 *
387	 * Otherwise we need to disassemble the instruction
388	 * responsible to determine if it was a write.
389	 */
390	if (IS_PERMISSION_FAULT(fsr)) {
391		ftype = VM_PROT_WRITE;
392	} else {
393		u_int insn = ReadWord(tf->tf_pc);
394
395		if (((insn & 0x0c100000) == 0x04000000) ||	/* STR/STRB */
396		    ((insn & 0x0e1000b0) == 0x000000b0) ||	/* STRH/STRD */
397		    ((insn & 0x0a100000) == 0x08000000))	/* STM/CDT */
398		{
399			ftype = VM_PROT_WRITE;
400	}
401		else
402		if ((insn & 0x0fb00ff0) == 0x01000090)		/* SWP */
403			ftype = VM_PROT_READ | VM_PROT_WRITE;
404		else
405			ftype = VM_PROT_READ;
406	}
407
408	/*
409	 * See if the fault is as a result of ref/mod emulation,
410	 * or domain mismatch.
411	 */
412#ifdef DEBUG
413	last_fault_code = fsr;
414#endif
415	if (pmap_fault_fixup(vmspace_pmap(td->td_proc->p_vmspace), va, ftype,
416	    user)) {
417		goto out;
418	}
419
420	onfault = pcb->pcb_onfault;
421	pcb->pcb_onfault = NULL;
422	if (map != kernel_map) {
423		PROC_LOCK(p);
424		p->p_lock++;
425		PROC_UNLOCK(p);
426	}
427	error = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
428	pcb->pcb_onfault = onfault;
429
430	if (map != kernel_map) {
431		PROC_LOCK(p);
432		p->p_lock--;
433		PROC_UNLOCK(p);
434	}
435	if (__predict_true(error == 0))
436		goto out;
437	if (user == 0) {
438		if (pcb->pcb_onfault) {
439			tf->tf_r0 = error;
440			tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault;
441			return;
442		}
443
444		printf("\nvm_fault(%p, %x, %x, 0) -> %x\n", map, va, ftype,
445		    error);
446		dab_fatal(tf, fsr, far, td, &ksig);
447	}
448
449
450	if (error == ENOMEM) {
451		printf("VM: pid %d (%s), uid %d killed: "
452		    "out of swap\n", td->td_proc->p_pid, td->td_name,
453		    (td->td_proc->p_ucred) ?
454		     td->td_proc->p_ucred->cr_uid : -1);
455		ksig.signb = SIGKILL;
456	} else {
457		ksig.signb = SIGSEGV;
458	}
459	ksig.code = 0;
460do_trapsignal:
461	call_trapsignal(td, ksig.signb, ksig.code);
462out:
463	/* If returning to user mode, make sure to invoke userret() */
464	if (user)
465		userret(td, tf);
466}
467
468/*
469 * dab_fatal() handles the following data aborts:
470 *
471 *  FAULT_WRTBUF_0 - Vector Exception
472 *  FAULT_WRTBUF_1 - Terminal Exception
473 *
474 * We should never see these on a properly functioning system.
475 *
476 * This function is also called by the other handlers if they
477 * detect a fatal problem.
478 *
479 * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort.
480 */
481static int
482dab_fatal(trapframe_t *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig)
483{
484	const char *mode;
485
486	mode = TRAP_USERMODE(tf) ? "user" : "kernel";
487
488	disable_interrupts(I32_bit|F32_bit);
489	if (td != NULL) {
490		printf("Fatal %s mode data abort: '%s'\n", mode,
491		    data_aborts[fsr & FAULT_TYPE_MASK].desc);
492		printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr);
493		if ((fsr & FAULT_IMPRECISE) == 0)
494			printf("%08x, ", far);
495		else
496			printf("Invalid,  ");
497		printf("spsr=%08x\n", tf->tf_spsr);
498	} else {
499		printf("Fatal %s mode prefetch abort at 0x%08x\n",
500		    mode, tf->tf_pc);
501		printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr);
502	}
503
504	printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n",
505	    tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3);
506	printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n",
507	    tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7);
508	printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n",
509	    tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11);
510	printf("r12=%08x, ", tf->tf_r12);
511
512	if (TRAP_USERMODE(tf))
513		printf("usp=%08x, ulr=%08x",
514		    tf->tf_usr_sp, tf->tf_usr_lr);
515	else
516		printf("ssp=%08x, slr=%08x",
517		    tf->tf_svc_sp, tf->tf_svc_lr);
518	printf(", pc =%08x\n\n", tf->tf_pc);
519
520#ifdef KDB
521	if (debugger_on_panic || kdb_active)
522		kdb_trap(fsr, 0, tf);
523#endif
524	panic("Fatal abort");
525	/*NOTREACHED*/
526}
527
528/*
529 * dab_align() handles the following data aborts:
530 *
531 *  FAULT_ALIGN_0 - Alignment fault
532 *  FAULT_ALIGN_1 - Alignment fault
533 *
534 * These faults are fatal if they happen in kernel mode. Otherwise, we
535 * deliver a bus error to the process.
536 */
537static int
538dab_align(trapframe_t *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig)
539{
540
541	/* Alignment faults are always fatal if they occur in kernel mode */
542	if (!TRAP_USERMODE(tf)) {
543		if (!td || !td->td_pcb->pcb_onfault)
544			dab_fatal(tf, fsr, far, td, ksig);
545		tf->tf_r0 = EFAULT;
546		tf->tf_pc = (int)td->td_pcb->pcb_onfault;
547		return (0);
548	}
549
550	/* pcb_onfault *must* be NULL at this point */
551
552	/* See if the cpu state needs to be fixed up */
553	(void) data_abort_fixup(tf, fsr, far, td, ksig);
554
555	/* Deliver a bus error signal to the process */
556	ksig->code = 0;
557	ksig->signb = SIGBUS;
558	td->td_frame = tf;
559
560	return (1);
561}
562
563/*
564 * dab_buserr() handles the following data aborts:
565 *
566 *  FAULT_BUSERR_0 - External Abort on Linefetch -- Section
567 *  FAULT_BUSERR_1 - External Abort on Linefetch -- Page
568 *  FAULT_BUSERR_2 - External Abort on Non-linefetch -- Section
569 *  FAULT_BUSERR_3 - External Abort on Non-linefetch -- Page
570 *  FAULT_BUSTRNL1 - External abort on Translation -- Level 1
571 *  FAULT_BUSTRNL2 - External abort on Translation -- Level 2
572 *
573 * If pcb_onfault is set, flag the fault and return to the handler.
574 * If the fault occurred in user mode, give the process a SIGBUS.
575 *
576 * Note: On XScale, FAULT_BUSERR_0, FAULT_BUSERR_1, and FAULT_BUSERR_2
577 * can be flagged as imprecise in the FSR. This causes a real headache
578 * since some of the machine state is lost. In this case, tf->tf_pc
579 * may not actually point to the offending instruction. In fact, if
580 * we've taken a double abort fault, it generally points somewhere near
581 * the top of "data_abort_entry" in exception.S.
582 *
583 * In all other cases, these data aborts are considered fatal.
584 */
585static int
586dab_buserr(trapframe_t *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig)
587{
588	struct pcb *pcb = td->td_pcb;
589
590#ifdef __XSCALE__
591	if ((fsr & FAULT_IMPRECISE) != 0 &&
592	    (tf->tf_spsr & PSR_MODE) == PSR_ABT32_MODE) {
593		/*
594		 * Oops, an imprecise, double abort fault. We've lost the
595		 * r14_abt/spsr_abt values corresponding to the original
596		 * abort, and the spsr saved in the trapframe indicates
597		 * ABT mode.
598		 */
599		tf->tf_spsr &= ~PSR_MODE;
600
601		/*
602		 * We use a simple heuristic to determine if the double abort
603		 * happened as a result of a kernel or user mode access.
604		 * If the current trapframe is at the top of the kernel stack,
605		 * the fault _must_ have come from user mode.
606		 */
607		if (tf != ((trapframe_t *)pcb->un_32.pcb32_sp) - 1) {
608			/*
609			 * Kernel mode. We're either about to die a
610			 * spectacular death, or pcb_onfault will come
611			 * to our rescue. Either way, the current value
612			 * of tf->tf_pc is irrelevant.
613			 */
614			tf->tf_spsr |= PSR_SVC32_MODE;
615			if (pcb->pcb_onfault == NULL)
616				printf("\nKernel mode double abort!\n");
617		} else {
618			/*
619			 * User mode. We've lost the program counter at the
620			 * time of the fault (not that it was accurate anyway;
621			 * it's not called an imprecise fault for nothing).
622			 * About all we can do is copy r14_usr to tf_pc and
623			 * hope for the best. The process is about to get a
624			 * SIGBUS, so it's probably history anyway.
625			 */
626			tf->tf_spsr |= PSR_USR32_MODE;
627			tf->tf_pc = tf->tf_usr_lr;
628		}
629	}
630
631	/* FAR is invalid for imprecise exceptions */
632	if ((fsr & FAULT_IMPRECISE) != 0)
633		far = 0;
634#endif /* __XSCALE__ */
635
636	if (pcb->pcb_onfault) {
637		tf->tf_r0 = EFAULT;
638		tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault;
639		return (0);
640	}
641
642	/* See if the cpu state needs to be fixed up */
643	(void) data_abort_fixup(tf, fsr, far, td, ksig);
644
645	/*
646	 * At this point, if the fault happened in kernel mode, we're toast
647	 */
648	if (!TRAP_USERMODE(tf))
649		dab_fatal(tf, fsr, far, td, ksig);
650
651	/* Deliver a bus error signal to the process */
652	ksig->signb = SIGBUS;
653	ksig->code = 0;
654	td->td_frame = tf;
655
656	return (1);
657}
658
659static __inline int
660prefetch_abort_fixup(trapframe_t *tf, struct ksig *ksig)
661{
662#ifdef CPU_ABORT_FIXUP_REQUIRED
663	int error;
664
665	/* Call the cpu specific prefetch abort fixup routine */
666	error = cpu_prefetchabt_fixup(tf);
667	if (__predict_true(error != ABORT_FIXUP_FAILED))
668		return (error);
669
670	/*
671	 * Oops, couldn't fix up the instruction
672	 */
673	printf(
674	    "prefetch_abort_fixup: fixup for %s mode prefetch abort failed.\n",
675	    TRAP_USERMODE(tf) ? "user" : "kernel");
676	printf("pc = 0x%08x, opcode 0x%08x, insn = ", tf->tf_pc,
677	    *((u_int *)tf->tf_pc));
678	disassemble(tf->tf_pc);
679
680	/* Die now if this happened in kernel mode */
681	if (!TRAP_USERMODE(tf))
682		dab_fatal(tf, 0, tf->tf_pc, NULL, ksig);
683
684	return (error);
685#else
686	return (ABORT_FIXUP_OK);
687#endif /* CPU_ABORT_FIXUP_REQUIRED */
688}
689
690/*
691 * void prefetch_abort_handler(trapframe_t *tf)
692 *
693 * Abort handler called when instruction execution occurs at
694 * a non existent or restricted (access permissions) memory page.
695 * If the address is invalid and we were in SVC mode then panic as
696 * the kernel should never prefetch abort.
697 * If the address is invalid and the page is mapped then the user process
698 * does no have read permission so send it a signal.
699 * Otherwise fault the page in and try again.
700 */
701void
702prefetch_abort_handler(trapframe_t *tf)
703{
704	struct thread *td;
705	struct proc * p;
706	struct vm_map *map;
707	vm_offset_t fault_pc, va;
708	int error = 0;
709	struct ksig ksig;
710
711
712#if 0
713	/* Update vmmeter statistics */
714	uvmexp.traps++;
715#endif
716#if 0
717	printf("prefetch abort handler: %p %p\n", (void*)tf->tf_pc,
718	    (void*)tf->tf_usr_lr);
719#endif
720
721 	td = curthread;
722	p = td->td_proc;
723	PCPU_INC(cnt.v_trap);
724
725	if (TRAP_USERMODE(tf)) {
726		td->td_frame = tf;
727		if (td->td_ucred != td->td_proc->p_ucred)
728			cred_update_thread(td);
729	}
730	fault_pc = tf->tf_pc;
731	if (td->td_md.md_spinlock_count == 0) {
732		if (__predict_true(tf->tf_spsr & I32_bit) == 0)
733			enable_interrupts(I32_bit);
734		if (__predict_true(tf->tf_spsr & F32_bit) == 0)
735			enable_interrupts(F32_bit);
736	}
737
738	/* See if the cpu state needs to be fixed up */
739	switch (prefetch_abort_fixup(tf, &ksig)) {
740	case ABORT_FIXUP_RETURN:
741		return;
742	case ABORT_FIXUP_FAILED:
743		/* Deliver a SIGILL to the process */
744		ksig.signb = SIGILL;
745		ksig.code = 0;
746		td->td_frame = tf;
747		goto do_trapsignal;
748	default:
749		break;
750	}
751
752	/* Prefetch aborts cannot happen in kernel mode */
753	if (__predict_false(!TRAP_USERMODE(tf)))
754		dab_fatal(tf, 0, tf->tf_pc, NULL, &ksig);
755	td->td_pticks = 0;
756
757
758	/* Ok validate the address, can only execute in USER space */
759	if (__predict_false(fault_pc >= VM_MAXUSER_ADDRESS ||
760	    (fault_pc < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW))) {
761		ksig.signb = SIGSEGV;
762		ksig.code = 0;
763		goto do_trapsignal;
764	}
765
766	map = &td->td_proc->p_vmspace->vm_map;
767	va = trunc_page(fault_pc);
768
769	/*
770	 * See if the pmap can handle this fault on its own...
771	 */
772#ifdef DEBUG
773	last_fault_code = -1;
774#endif
775	if (pmap_fault_fixup(map->pmap, va, VM_PROT_READ, 1))
776		goto out;
777
778	if (map != kernel_map) {
779		PROC_LOCK(p);
780		p->p_lock++;
781		PROC_UNLOCK(p);
782	}
783
784	error = vm_fault(map, va, VM_PROT_READ | VM_PROT_EXECUTE,
785	    VM_FAULT_NORMAL);
786	if (map != kernel_map) {
787		PROC_LOCK(p);
788		p->p_lock--;
789		PROC_UNLOCK(p);
790	}
791
792	if (__predict_true(error == 0))
793		goto out;
794
795	if (error == ENOMEM) {
796		printf("VM: pid %d (%s), uid %d killed: "
797		    "out of swap\n", td->td_proc->p_pid, td->td_name,
798		    (td->td_proc->p_ucred) ?
799		     td->td_proc->p_ucred->cr_uid : -1);
800		ksig.signb = SIGKILL;
801	} else {
802		ksig.signb = SIGSEGV;
803	}
804	ksig.code = 0;
805
806do_trapsignal:
807	call_trapsignal(td, ksig.signb, ksig.code);
808
809out:
810	userret(td, tf);
811
812}
813
814extern int badaddr_read_1(const uint8_t *, uint8_t *);
815extern int badaddr_read_2(const uint16_t *, uint16_t *);
816extern int badaddr_read_4(const uint32_t *, uint32_t *);
817/*
818 * Tentatively read an 8, 16, or 32-bit value from 'addr'.
819 * If the read succeeds, the value is written to 'rptr' and zero is returned.
820 * Else, return EFAULT.
821 */
822int
823badaddr_read(void *addr, size_t size, void *rptr)
824{
825	union {
826		uint8_t v1;
827		uint16_t v2;
828		uint32_t v4;
829	} u;
830	int rv;
831
832	cpu_drain_writebuf();
833
834	/* Read from the test address. */
835	switch (size) {
836	case sizeof(uint8_t):
837		rv = badaddr_read_1(addr, &u.v1);
838		if (rv == 0 && rptr)
839			*(uint8_t *) rptr = u.v1;
840		break;
841
842	case sizeof(uint16_t):
843		rv = badaddr_read_2(addr, &u.v2);
844		if (rv == 0 && rptr)
845			*(uint16_t *) rptr = u.v2;
846		break;
847
848	case sizeof(uint32_t):
849		rv = badaddr_read_4(addr, &u.v4);
850		if (rv == 0 && rptr)
851			*(uint32_t *) rptr = u.v4;
852		break;
853
854	default:
855		panic("badaddr: invalid size (%lu)", (u_long) size);
856	}
857
858	/* Return EFAULT if the address was invalid, else zero */
859	return (rv);
860}
861
862int
863cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
864{
865	struct proc *p;
866	register_t *ap;
867	int error;
868
869#ifdef __ARM_EABI__
870	sa->code = td->td_frame->tf_r7;
871#else
872	sa->code = sa->insn & 0x000fffff;
873#endif
874	ap = &td->td_frame->tf_r0;
875	if (sa->code == SYS_syscall) {
876		sa->code = *ap++;
877		sa->nap--;
878	} else if (sa->code == SYS___syscall) {
879		sa->code = ap[_QUAD_LOWWORD];
880		sa->nap -= 2;
881		ap += 2;
882	}
883	p = td->td_proc;
884	if (p->p_sysent->sv_mask)
885		sa->code &= p->p_sysent->sv_mask;
886	if (sa->code >= p->p_sysent->sv_size)
887		sa->callp = &p->p_sysent->sv_table[0];
888	else
889		sa->callp = &p->p_sysent->sv_table[sa->code];
890	sa->narg = sa->callp->sy_narg;
891	error = 0;
892	memcpy(sa->args, ap, sa->nap * sizeof(register_t));
893	if (sa->narg > sa->nap) {
894		error = copyin((void *)td->td_frame->tf_usr_sp, sa->args +
895		    sa->nap, (sa->narg - sa->nap) * sizeof(register_t));
896	}
897	if (error == 0) {
898		td->td_retval[0] = 0;
899		td->td_retval[1] = 0;
900	}
901	return (error);
902}
903
904#include "../../kern/subr_syscall.c"
905
906static void
907syscall(struct thread *td, trapframe_t *frame)
908{
909	struct syscall_args sa;
910	int error;
911
912#ifndef __ARM_EABI__
913	sa.insn = *(uint32_t *)(frame->tf_pc - INSN_SIZE);
914	switch (sa.insn & SWI_OS_MASK) {
915	case 0: /* XXX: we need our own one. */
916		break;
917	default:
918		call_trapsignal(td, SIGILL, 0);
919		userret(td, frame);
920		return;
921	}
922#endif
923	sa.nap = 4;
924
925	error = syscallenter(td, &sa);
926	KASSERT(error != 0 || td->td_ar == NULL,
927	    ("returning from syscall with td_ar set!"));
928	syscallret(td, error, &sa);
929}
930
931void
932swi_handler(trapframe_t *frame)
933{
934	struct thread *td = curthread;
935
936	td->td_frame = frame;
937
938	td->td_pticks = 0;
939	/*
940      	 * Make sure the program counter is correctly aligned so we
941	 * don't take an alignment fault trying to read the opcode.
942	 */
943	if (__predict_false(((frame->tf_pc - INSN_SIZE) & 3) != 0)) {
944		call_trapsignal(td, SIGILL, 0);
945		userret(td, frame);
946		return;
947	}
948	/*
949	 * Enable interrupts if they were enabled before the exception.
950	 * Since all syscalls *should* come from user mode it will always
951	 * be safe to enable them, but check anyway.
952	 */
953	if (td->td_md.md_spinlock_count == 0) {
954		if (__predict_true(frame->tf_spsr & I32_bit) == 0)
955			enable_interrupts(I32_bit);
956		if (__predict_true(frame->tf_spsr & F32_bit) == 0)
957			enable_interrupts(F32_bit);
958	}
959
960	syscall(td, frame);
961}
962
963