trap-v6.c revision 285389
1/*-
2 * Copyright 2014 Olivier Houchard <cognet@FreeBSD.org>
3 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
4 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
5 * Copyright 2014 Andrew Turner <andrew@FreeBSD.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include "opt_ktrace.h"
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/arm/arm/trap-v6.c 285389 2015-07-11 16:02:06Z andrew $");
34
35#include <sys/param.h>
36#include <sys/bus.h>
37#include <sys/systm.h>
38#include <sys/proc.h>
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/signalvar.h>
43#include <sys/ktr.h>
44#ifdef KTRACE
45#include <sys/uio.h>
46#include <sys/ktrace.h>
47#endif
48
49#include <vm/vm.h>
50#include <vm/pmap.h>
51#include <vm/vm_kern.h>
52#include <vm/vm_map.h>
53#include <vm/vm_extern.h>
54#include <vm/vm_param.h>
55
56#include <machine/acle-compat.h>
57#include <machine/cpu.h>
58#include <machine/cpu-v6.h>
59#include <machine/frame.h>
60#include <machine/machdep.h>
61#include <machine/pcb.h>
62#include <machine/vmparam.h>
63
64#ifdef KDB
65#include <sys/kdb.h>
66#include <machine/db_machdep.h>
67#endif
68
69extern char fusubailout[];
70extern char cachebailout[];
71
72#ifdef DEBUG
73int last_fault_code;	/* For the benefit of pmap_fault_fixup() */
74#endif
75
76struct ksig {
77	int sig;
78	u_long code;
79	vm_offset_t	addr;
80};
81
82typedef int abort_func_t(struct trapframe *, u_int, u_int, u_int, u_int,
83    struct thread *, struct ksig *);
84
85static abort_func_t abort_fatal;
86static abort_func_t abort_align;
87static abort_func_t abort_icache;
88
89struct abort {
90	abort_func_t	*func;
91	const char	*desc;
92};
93
94/*
95 * How are the aborts handled?
96 *
97 * Undefined Code:
98 *  - Always fatal as we do not know what does it mean.
99 * Imprecise External Abort:
100 *  - Always fatal, but can be handled somehow in the future.
101 *    Now, due to PCIe buggy harware, ignored.
102 * Precise External Abort:
103 *  - Always fatal, but who knows in the future???
104 * Debug Event:
105 *  - Special handling.
106 * External Translation Abort (L1 & L2)
107 *  - Always fatal as something is screwed up in page tables or harware.
108 * Domain Fault (L1 & L2):
109 *  - Always fatal as we do not play game with domains.
110 * Alignment Fault:
111 *  - Everything should be aligned in kernel including user to kernel and
112 *    vice versa data copying, so we ignore pcb_onfault, and it's always fatal.
113 *    We generate signal in case of abort from user mode.
114 * Instruction cache maintenance:
115 *  - According to manual, this is translation fault during cache maintenance
116 *    operation. So, it could be really complex in SMP case and fuzzy too
117 *    for cache operations working on virtual addresses. For now, we will
118 *    consider this abort as fatal. In fact, no cache maintenance on
119 *    not mapped virtual addresses should be called. As cache maintenance
120 *    operation (except DMB, DSB, and Flush Prefetch Buffer) are priviledged,
121 *    the abort is fatal for user mode as well for now. (This is good place to
122 *    note that cache maintenance on virtual address fill TLB.)
123 * Acces Bit (L1 & L2):
124 *  - Fast hardware emulation for kernel and user mode.
125 * Translation Fault (L1 & L2):
126 *  - Standard fault mechanism is held including vm_fault().
127 * Permission Fault (L1 & L2):
128 *  - Fast harware emulation of modify bits and in other cases, standard
129 *    fault mechanism is held including vm_fault().
130 */
131
132static const struct abort aborts[] = {
133	{abort_fatal,	"Undefined Code (0x000)"},
134	{abort_align,	"Alignment Fault"},
135	{abort_fatal,	"Debug Event"},
136	{NULL,		"Access Bit (L1)"},
137	{NULL,		"Instruction cache maintenance"},
138	{NULL,		"Translation Fault (L1)"},
139	{NULL,		"Access Bit (L2)"},
140	{NULL,		"Translation Fault (L2)"},
141
142	{abort_fatal,	"External Abort"},
143	{abort_fatal,	"Domain Fault (L1)"},
144	{abort_fatal,	"Undefined Code (0x00A)"},
145	{abort_fatal,	"Domain Fault (L2)"},
146	{abort_fatal,	"External Translation Abort (L1)"},
147	{NULL,		"Permission Fault (L1)"},
148	{abort_fatal,	"External Translation Abort (L2)"},
149	{NULL,		"Permission Fault (L2)"},
150
151	{abort_fatal,	"TLB Conflict Abort"},
152	{abort_fatal,	"Undefined Code (0x401)"},
153	{abort_fatal,	"Undefined Code (0x402)"},
154	{abort_fatal,	"Undefined Code (0x403)"},
155	{abort_fatal,	"Undefined Code (0x404)"},
156	{abort_fatal,	"Undefined Code (0x405)"},
157	{abort_fatal,	"Asynchronous External Abort"},
158	{abort_fatal,	"Undefined Code (0x407)"},
159
160	{abort_fatal,	"Asynchronous Parity Error on Memory Access"},
161	{abort_fatal,	"Parity Error on Memory Access"},
162	{abort_fatal,	"Undefined Code (0x40A)"},
163	{abort_fatal,	"Undefined Code (0x40B)"},
164	{abort_fatal,	"Parity Error on Translation (L1)"},
165	{abort_fatal,	"Undefined Code (0x40D)"},
166	{abort_fatal,	"Parity Error on Translation (L2)"},
167	{abort_fatal,	"Undefined Code (0x40F)"}
168};
169
170
171static __inline void
172call_trapsignal(struct thread *td, int sig, int code, vm_offset_t addr)
173{
174	ksiginfo_t ksi;
175
176	CTR4(KTR_TRAP, "%s: addr: %#x, sig: %d, code: %d",
177	   __func__, addr, sig, code);
178
179	/*
180	 * TODO: some info would be nice to know
181	 * if we are serving data or prefetch abort.
182	 */
183
184	ksiginfo_init_trap(&ksi);
185	ksi.ksi_signo = sig;
186	ksi.ksi_code = code;
187	ksi.ksi_addr = (void *)addr;
188	trapsignal(td, &ksi);
189}
190
191/*
192 * abort_imprecise() handles the following abort:
193 *
194 *  FAULT_EA_IMPREC - Imprecise External Abort
195 *
196 * The imprecise means that we don't know where the abort happened,
197 * thus FAR is undefined. The abort should not never fire, but hot
198 * plugging or accidental harware failure can be the cause of it.
199 * If the abort happens, it can even be on different (thread) context.
200 * Without any additional support, the abort is fatal, as we do not
201 * know what really happened.
202 *
203 * QQQ: Some additional functionality, like pcb_onfault but global,
204 *      can be implemented. Imprecise handlers could be registered
205 *      which tell us if the abort is caused by something they know
206 *      about. They should return one of three codes like:
207 *		FAULT_IS_MINE,
208 *		FAULT_CAN_BE_MINE,
209 *		FAULT_IS_NOT_MINE.
210 *      The handlers should be called until some of them returns
211 *      FAULT_IS_MINE value or all was called. If all handlers return
212 *	FAULT_IS_NOT_MINE value, then the abort is fatal.
213 */
214static __inline void
215abort_imprecise(struct trapframe *tf, u_int fsr, u_int prefetch, u_int usermode)
216{
217	/* XXXX  We can got imprecise abort as result of access
218	 * to not-present PCI/PCIe configuration space.
219	 */
220#if 0
221	goto out;
222#endif
223	abort_fatal(tf, FAULT_EA_IMPREC, fsr, 0, prefetch, curthread, NULL);
224
225	/*
226	 * Returning from this function means that we ignore
227	 * the abort for good reason. Note that imprecise abort
228	 * could fire any time even in user mode.
229	 */
230
231#if 0
232out:
233	if (usermode)
234		userret(curthread, tf);
235#endif
236}
237
238/*
239 * abort_debug() handles the following abort:
240 *
241 *  FAULT_DEBUG - Debug Event
242 *
243 */
244static __inline void
245abort_debug(struct trapframe *tf, u_int fsr, u_int prefetch, u_int usermode,
246    u_int far)
247{
248	if (usermode) {
249		struct thread *td;
250
251		td = curthread;
252		call_trapsignal(td, SIGTRAP, TRAP_BRKPT, far);
253		userret(td, tf);
254	} else {
255#ifdef KDB
256		kdb_trap(T_BREAKPOINT, 0, tf);
257#else
258		printf("No debugger in kernel.\n");
259#endif
260	}
261}
262
263/*
264 * Abort handler.
265 *
266 * FAR, FSR, and everything what can be lost after enabling
267 * interrupts must be grabbed before the interrupts will be
268 * enabled. Note that when interrupts will be enabled, we
269 * could even migrate to another CPU ...
270 *
271 * TODO: move quick cases to ASM
272 */
273void
274abort_handler(struct trapframe *tf, int prefetch)
275{
276	struct thread *td;
277	vm_offset_t far, va;
278	int idx, usermode;
279	uint32_t fsr;
280	struct ksig ksig;
281	struct proc *p;
282	struct pcb *pcb;
283	struct vm_map *map;
284	struct vmspace *vm;
285	vm_prot_t ftype;
286	int rv;
287#ifdef INVARIANTS
288	void *onfault;
289#endif
290	td = curthread;
291	fsr = (prefetch) ? cp15_ifsr_get(): cp15_dfsr_get();
292#if __ARM_ARCH >= 7
293	far = (prefetch) ? cp15_ifar_get() : cp15_dfar_get();
294#else
295	far = (prefetch) ? TRAPF_PC(tf) : cp15_dfar_get();
296#endif
297
298	idx = FSR_TO_FAULT(fsr);
299	usermode = TRAPF_USERMODE(tf);	/* Abort came from user mode? */
300	if (usermode)
301		td->td_frame = tf;
302
303	CTR4(KTR_TRAP, "abort_handler: fsr %#x (idx %u) far %#x prefetch %u",
304	fsr, idx, far, prefetch);
305
306	/*
307	 * Firstly, handle aborts that are not directly related to mapping.
308	 */
309	if (__predict_false(idx == FAULT_EA_IMPREC)) {
310		abort_imprecise(tf, fsr, prefetch, usermode);
311		return;
312	}
313
314	if (__predict_false(idx == FAULT_DEBUG)) {
315		abort_debug(tf, fsr, prefetch, usermode, far);
316		return;
317	}
318
319#ifdef ARM_NEW_PMAP
320	rv = pmap_fault(PCPU_GET(curpmap), far, fsr, idx, usermode);
321	if (rv == 0) {
322		return;
323	} else if (rv == EFAULT) {
324
325		call_trapsignal(td, SIGSEGV, SEGV_MAPERR, far);
326		userret(td, tf);
327		return;
328	}
329#endif
330	/*
331	 * Now, when we handled imprecise and debug aborts, the rest of
332	 * aborts should be really related to mapping.
333	 *
334	 */
335
336	PCPU_INC(cnt.v_trap);
337
338#ifdef KDB
339	if (kdb_active) {
340		kdb_reenter();
341		goto out;
342	}
343#endif
344	if (__predict_false((td->td_pflags & TDP_NOFAULTING) != 0)) {
345		/*
346		 * Due to both processor errata and lazy TLB invalidation when
347		 * access restrictions are removed from virtual pages, memory
348		 * accesses that are allowed by the physical mapping layer may
349		 * nonetheless cause one spurious page fault per virtual page.
350		 * When the thread is executing a "no faulting" section that
351		 * is bracketed by vm_fault_{disable,enable}_pagefaults(),
352		 * every page fault is treated as a spurious page fault,
353		 * unless it accesses the same virtual address as the most
354		 * recent page fault within the same "no faulting" section.
355		 */
356		if (td->td_md.md_spurflt_addr != far ||
357		    (td->td_pflags & TDP_RESETSPUR) != 0) {
358			td->td_md.md_spurflt_addr = far;
359			td->td_pflags &= ~TDP_RESETSPUR;
360
361			tlb_flush_local(far & ~PAGE_MASK);
362			return;
363		}
364	} else {
365		/*
366		 * If we get a page fault while in a critical section, then
367		 * it is most likely a fatal kernel page fault.  The kernel
368		 * is already going to panic trying to get a sleep lock to
369		 * do the VM lookup, so just consider it a fatal trap so the
370		 * kernel can print out a useful trap message and even get
371		 * to the debugger.
372		 *
373		 * If we get a page fault while holding a non-sleepable
374		 * lock, then it is most likely a fatal kernel page fault.
375		 * If WITNESS is enabled, then it's going to whine about
376		 * bogus LORs with various VM locks, so just skip to the
377		 * fatal trap handling directly.
378		 */
379		if (td->td_critnest != 0 ||
380		    WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL,
381		    "Kernel page fault") != 0) {
382			abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
383			return;
384		}
385	}
386
387	/* Re-enable interrupts if they were enabled previously. */
388	if (td->td_md.md_spinlock_count == 0) {
389		if (__predict_true(tf->tf_spsr & PSR_I) == 0)
390			enable_interrupts(PSR_I);
391		if (__predict_true(tf->tf_spsr & PSR_F) == 0)
392			enable_interrupts(PSR_F);
393	}
394
395	p = td->td_proc;
396	if (usermode) {
397		td->td_pticks = 0;
398		if (td->td_cowgen != p->p_cowgen)
399			thread_cow_update(td);
400	}
401
402	/* Invoke the appropriate handler, if necessary. */
403	if (__predict_false(aborts[idx].func != NULL)) {
404		if ((aborts[idx].func)(tf, idx, fsr, far, prefetch, td, &ksig))
405			goto do_trapsignal;
406		goto out;
407	}
408
409	/*
410	 * Don't pass faulting cache operation to vm_fault(). We don't want
411	 * to handle all vm stuff at this moment.
412	 */
413	pcb = td->td_pcb;
414	if (__predict_false(pcb->pcb_onfault == cachebailout)) {
415		tf->tf_r0 = far;		/* return failing address */
416		tf->tf_pc = (register_t)pcb->pcb_onfault;
417		return;
418	}
419
420	/* Handle remaining I cache aborts. */
421	if (idx == FAULT_ICACHE) {
422		if (abort_icache(tf, idx, fsr, far, prefetch, td, &ksig))
423			goto do_trapsignal;
424		goto out;
425	}
426
427	/*
428	 * At this point, we're dealing with one of the following aborts:
429	 *
430	 *  FAULT_TRAN_xx  - Translation
431	 *  FAULT_PERM_xx  - Permission
432	 *
433	 * These are the main virtual memory-related faults signalled by
434	 * the MMU.
435	 */
436
437	/* fusubailout is used by [fs]uswintr to avoid page faulting */
438	pcb = td->td_pcb;
439	if (__predict_false(pcb->pcb_onfault == fusubailout)) {
440		tf->tf_r0 = EFAULT;
441		tf->tf_pc = (register_t)pcb->pcb_onfault;
442		return;
443	}
444
445	/*
446	 * QQQ: ARM has a set of unprivileged load and store instructions
447	 *      (LDRT/LDRBT/STRT/STRBT ...) which are supposed to be used
448	 *      in other than user mode and OS should recognize their
449	 *      aborts and behaved appropriately. However, there is no way
450	 *      how to do that reasonably in general unless we restrict
451	 *      the handling somehow. One way is to limit the handling for
452	 *      aborts which come from undefined mode only.
453	 *
454	 *      Anyhow, we do not use these instructions and do not implement
455	 *      any special handling for them.
456	 */
457
458	va = trunc_page(far);
459	if (va >= KERNBASE) {
460		/*
461		 * Don't allow user-mode faults in kernel address space.
462		 */
463		if (usermode)
464			goto nogo;
465
466		map = kernel_map;
467	} else {
468		/*
469		 * This is a fault on non-kernel virtual memory. If curproc
470		 * is NULL or curproc->p_vmspace is NULL the fault is fatal.
471		 */
472		vm = (p != NULL) ? p->p_vmspace : NULL;
473		if (vm == NULL)
474			goto nogo;
475
476		map = &vm->vm_map;
477		if (!usermode && (td->td_intr_nesting_level != 0 ||
478		    pcb->pcb_onfault == NULL)) {
479			abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
480			return;
481		}
482	}
483
484	ftype = (fsr & FSR_WNR) ? VM_PROT_WRITE : VM_PROT_READ;
485	if (prefetch)
486		ftype |= VM_PROT_EXECUTE;
487
488#ifdef DEBUG
489	last_fault_code = fsr;
490#endif
491
492#ifndef ARM_NEW_PMAP
493	if (pmap_fault_fixup(vmspace_pmap(td->td_proc->p_vmspace), va, ftype,
494	    usermode)) {
495		goto out;
496	}
497#endif
498
499#ifdef INVARIANTS
500	onfault = pcb->pcb_onfault;
501	pcb->pcb_onfault = NULL;
502#endif
503	if (map != kernel_map) {
504		/*
505		 * Keep swapout from messing with us during this
506		 *	critical time.
507		 */
508		PROC_LOCK(p);
509		++p->p_lock;
510		PROC_UNLOCK(p);
511
512		/* Fault in the user page: */
513		rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
514
515		PROC_LOCK(p);
516		--p->p_lock;
517		PROC_UNLOCK(p);
518	} else {
519		/*
520		 * Don't have to worry about process locking or stacks in the
521		 * kernel.
522		 */
523		rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
524	}
525
526#ifdef INVARIANTS
527	pcb->pcb_onfault = onfault;
528#endif
529
530	if (__predict_true(rv == KERN_SUCCESS))
531		goto out;
532nogo:
533	if (!usermode) {
534		if (td->td_intr_nesting_level == 0 &&
535		    pcb->pcb_onfault != NULL) {
536			tf->tf_r0 = rv;
537			tf->tf_pc = (int)pcb->pcb_onfault;
538			return;
539		}
540		CTR2(KTR_TRAP, "%s: vm_fault() failed with %d", __func__, rv);
541		abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
542		return;
543	}
544
545	ksig.sig = SIGSEGV;
546	ksig.code = (rv == KERN_PROTECTION_FAILURE) ? SEGV_ACCERR : SEGV_MAPERR;
547	ksig.addr = far;
548
549do_trapsignal:
550	call_trapsignal(td, ksig.sig, ksig.code, ksig.addr);
551out:
552	if (usermode)
553		userret(td, tf);
554}
555
556/*
557 * abort_fatal() handles the following data aborts:
558
559 *  FAULT_DEBUG		- Debug Event
560 *  FAULT_ACCESS_xx	- Acces Bit
561 *  FAULT_EA_PREC	- Precise External Abort
562 *  FAULT_DOMAIN_xx	- Domain Fault
563 *  FAULT_EA_TRAN_xx	- External Translation Abort
564 *  FAULT_EA_IMPREC	- Imprecise External Abort
565 *  + all undefined codes for ABORT
566 *
567 * We should never see these on a properly functioning system.
568 *
569 * This function is also called by the other handlers if they
570 * detect a fatal problem.
571 *
572 * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort.
573 */
574static int
575abort_fatal(struct trapframe *tf, u_int idx, u_int fsr, u_int far, u_int prefetch,
576    struct thread *td, struct ksig *ksig)
577{
578	u_int usermode;
579	const char *mode;
580	const char *rw_mode;
581
582	usermode = TRAPF_USERMODE(tf);
583	mode = usermode ? "user" : "kernel";
584	rw_mode  = fsr & FSR_WNR ? "write" : "read";
585	disable_interrupts(PSR_I|PSR_F);
586
587	if (td != NULL) {
588		printf("Fatal %s mode data abort: '%s' on %s\n", mode,
589		    aborts[idx].desc, rw_mode);
590		printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr);
591		if (idx != FAULT_EA_IMPREC)
592			printf("%08x, ", far);
593		else
594			printf("Invalid,  ");
595		printf("spsr=%08x\n", tf->tf_spsr);
596	} else {
597		printf("Fatal %s mode prefetch abort at 0x%08x\n",
598		    mode, tf->tf_pc);
599		printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr);
600	}
601
602	printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n",
603	    tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3);
604	printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n",
605	    tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7);
606	printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n",
607	    tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11);
608	printf("r12=%08x, ", tf->tf_r12);
609
610	if (usermode)
611		printf("usp=%08x, ulr=%08x",
612		    tf->tf_usr_sp, tf->tf_usr_lr);
613	else
614		printf("ssp=%08x, slr=%08x",
615		    tf->tf_svc_sp, tf->tf_svc_lr);
616	printf(", pc =%08x\n\n", tf->tf_pc);
617
618#ifdef KDB
619	if (debugger_on_panic || kdb_active)
620		kdb_trap(fsr, 0, tf);
621#endif
622	panic("Fatal abort");
623	/*NOTREACHED*/
624}
625
626/*
627 * abort_align() handles the following data abort:
628 *
629 *  FAULT_ALIGN - Alignment fault
630 *
631 * Every memory access should be correctly aligned in kernel including
632 * user to kernel and vice versa data copying, so we ignore pcb_onfault,
633 * and it's always fatal. We generate a signal in case of abort from user mode.
634 */
635static int
636abort_align(struct trapframe *tf, u_int idx, u_int fsr, u_int far, u_int prefetch,
637    struct thread *td, struct ksig *ksig)
638{
639	u_int usermode;
640
641	usermode = TRAPF_USERMODE(tf);
642
643	/*
644	 * Alignment faults are always fatal if they occur in any but user mode.
645	 *
646	 * XXX The old trap code handles pcb fault even for alignment traps.
647	 * Unfortunately, we don't known why and if is this need.
648	 */
649	if (!usermode) {
650		if (td->td_intr_nesting_level == 0 && td != NULL &&
651		    td->td_pcb->pcb_onfault != NULL) {
652			printf("%s: Got alignment fault with pcb_onfault set"
653			    ", please report this issue\n", __func__);
654			tf->tf_r0 = EFAULT;;
655			tf->tf_pc = (int)td->td_pcb->pcb_onfault;
656			return (0);
657		}
658		abort_fatal(tf, idx, fsr, far, prefetch, td, ksig);
659	}
660	/* Deliver a bus error signal to the process */
661	ksig->code = 0;
662	ksig->sig = SIGBUS;
663	ksig->addr = far;
664	return (1);
665}
666
667/*
668 * abort_icache() handles the following data abort:
669 *
670 * FAULT_ICACHE - Instruction cache maintenance
671 *
672 * According to manual, FAULT_ICACHE is translation fault during cache
673 * maintenance operation. In fact, no cache maintenance operation on
674 * not mapped virtual addresses should be called. As cache maintenance
675 * operation (except DMB, DSB, and Flush Prefetch Buffer) are priviledged,
676 * the abort is concider as fatal for now. However, all the matter with
677 * cache maintenance operation on virtual addresses could be really complex
678 * and fuzzy in SMP case, so maybe in future standard fault mechanism
679 * should be held here including vm_fault() calling.
680 */
681static int
682abort_icache(struct trapframe *tf, u_int idx, u_int fsr, u_int far, u_int prefetch,
683    struct thread *td, struct ksig *ksig)
684{
685	abort_fatal(tf, idx, fsr, far, prefetch, td, ksig);
686	return(0);
687}
688