trap-v6.c revision 300694
1/*-
2 * Copyright 2014 Olivier Houchard <cognet@FreeBSD.org>
3 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
4 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
5 * Copyright 2014 Andrew Turner <andrew@FreeBSD.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include "opt_ktrace.h"
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: head/sys/arm/arm/trap-v6.c 300694 2016-05-25 19:44:26Z ian $");
34
35#include <sys/param.h>
36#include <sys/bus.h>
37#include <sys/systm.h>
38#include <sys/proc.h>
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/signalvar.h>
43#include <sys/ktr.h>
44#ifdef KTRACE
45#include <sys/uio.h>
46#include <sys/ktrace.h>
47#endif
48
49#include <vm/vm.h>
50#include <vm/pmap.h>
51#include <vm/vm_kern.h>
52#include <vm/vm_map.h>
53#include <vm/vm_extern.h>
54#include <vm/vm_param.h>
55
56#include <machine/cpu.h>
57#include <machine/frame.h>
58#include <machine/machdep.h>
59#include <machine/pcb.h>
60
61#ifdef KDB
62#include <sys/kdb.h>
63#include <machine/db_machdep.h>
64#endif
65
66#ifdef KDTRACE_HOOKS
67#include <sys/dtrace_bsd.h>
68#endif
69
70extern char cachebailout[];
71
72#ifdef DEBUG
73int last_fault_code;	/* For the benefit of pmap_fault_fixup() */
74#endif
75
76struct ksig {
77	int sig;
78	u_long code;
79	vm_offset_t	addr;
80};
81
82typedef int abort_func_t(struct trapframe *, u_int, u_int, u_int, u_int,
83    struct thread *, struct ksig *);
84
85static abort_func_t abort_fatal;
86static abort_func_t abort_align;
87static abort_func_t abort_icache;
88
89struct abort {
90	abort_func_t	*func;
91	const char	*desc;
92};
93
94/*
95 * How are the aborts handled?
96 *
97 * Undefined Code:
98 *  - Always fatal as we do not know what does it mean.
99 * Imprecise External Abort:
100 *  - Always fatal, but can be handled somehow in the future.
101 *    Now, due to PCIe buggy hardware, ignored.
102 * Precise External Abort:
103 *  - Always fatal, but who knows in the future???
104 * Debug Event:
105 *  - Special handling.
106 * External Translation Abort (L1 & L2)
107 *  - Always fatal as something is screwed up in page tables or hardware.
108 * Domain Fault (L1 & L2):
109 *  - Always fatal as we do not play game with domains.
110 * Alignment Fault:
111 *  - Everything should be aligned in kernel with exception of user to kernel
112 *    and vice versa data copying, so if pcb_onfault is not set, it's fatal.
113 *    We generate signal in case of abort from user mode.
114 * Instruction cache maintenance:
115 *  - According to manual, this is translation fault during cache maintenance
116 *    operation. So, it could be really complex in SMP case and fuzzy too
117 *    for cache operations working on virtual addresses. For now, we will
118 *    consider this abort as fatal. In fact, no cache maintenance on
119 *    not mapped virtual addresses should be called. As cache maintenance
120 *    operation (except DMB, DSB, and Flush Prefetch Buffer) are priviledged,
121 *    the abort is fatal for user mode as well for now. (This is good place to
122 *    note that cache maintenance on virtual address fill TLB.)
123 * Acces Bit (L1 & L2):
124 *  - Fast hardware emulation for kernel and user mode.
125 * Translation Fault (L1 & L2):
126 *  - Standard fault mechanism is held including vm_fault().
127 * Permission Fault (L1 & L2):
128 *  - Fast hardware emulation of modify bits and in other cases, standard
129 *    fault mechanism is held including vm_fault().
130 */
131
132static const struct abort aborts[] = {
133	{abort_fatal,	"Undefined Code (0x000)"},
134	{abort_align,	"Alignment Fault"},
135	{abort_fatal,	"Debug Event"},
136	{NULL,		"Access Bit (L1)"},
137	{NULL,		"Instruction cache maintenance"},
138	{NULL,		"Translation Fault (L1)"},
139	{NULL,		"Access Bit (L2)"},
140	{NULL,		"Translation Fault (L2)"},
141
142	{abort_fatal,	"External Abort"},
143	{abort_fatal,	"Domain Fault (L1)"},
144	{abort_fatal,	"Undefined Code (0x00A)"},
145	{abort_fatal,	"Domain Fault (L2)"},
146	{abort_fatal,	"External Translation Abort (L1)"},
147	{NULL,		"Permission Fault (L1)"},
148	{abort_fatal,	"External Translation Abort (L2)"},
149	{NULL,		"Permission Fault (L2)"},
150
151	{abort_fatal,	"TLB Conflict Abort"},
152	{abort_fatal,	"Undefined Code (0x401)"},
153	{abort_fatal,	"Undefined Code (0x402)"},
154	{abort_fatal,	"Undefined Code (0x403)"},
155	{abort_fatal,	"Undefined Code (0x404)"},
156	{abort_fatal,	"Undefined Code (0x405)"},
157	{abort_fatal,	"Asynchronous External Abort"},
158	{abort_fatal,	"Undefined Code (0x407)"},
159
160	{abort_fatal,	"Asynchronous Parity Error on Memory Access"},
161	{abort_fatal,	"Parity Error on Memory Access"},
162	{abort_fatal,	"Undefined Code (0x40A)"},
163	{abort_fatal,	"Undefined Code (0x40B)"},
164	{abort_fatal,	"Parity Error on Translation (L1)"},
165	{abort_fatal,	"Undefined Code (0x40D)"},
166	{abort_fatal,	"Parity Error on Translation (L2)"},
167	{abort_fatal,	"Undefined Code (0x40F)"}
168};
169
170static __inline void
171call_trapsignal(struct thread *td, int sig, int code, vm_offset_t addr)
172{
173	ksiginfo_t ksi;
174
175	CTR4(KTR_TRAP, "%s: addr: %#x, sig: %d, code: %d",
176	   __func__, addr, sig, code);
177
178	/*
179	 * TODO: some info would be nice to know
180	 * if we are serving data or prefetch abort.
181	 */
182
183	ksiginfo_init_trap(&ksi);
184	ksi.ksi_signo = sig;
185	ksi.ksi_code = code;
186	ksi.ksi_addr = (void *)addr;
187	trapsignal(td, &ksi);
188}
189
190/*
191 * abort_imprecise() handles the following abort:
192 *
193 *  FAULT_EA_IMPREC - Imprecise External Abort
194 *
195 * The imprecise means that we don't know where the abort happened,
196 * thus FAR is undefined. The abort should not never fire, but hot
197 * plugging or accidental hardware failure can be the cause of it.
198 * If the abort happens, it can even be on different (thread) context.
199 * Without any additional support, the abort is fatal, as we do not
200 * know what really happened.
201 *
202 * QQQ: Some additional functionality, like pcb_onfault but global,
203 *      can be implemented. Imprecise handlers could be registered
204 *      which tell us if the abort is caused by something they know
205 *      about. They should return one of three codes like:
206 *		FAULT_IS_MINE,
207 *		FAULT_CAN_BE_MINE,
208 *		FAULT_IS_NOT_MINE.
209 *      The handlers should be called until some of them returns
210 *      FAULT_IS_MINE value or all was called. If all handlers return
211 *	FAULT_IS_NOT_MINE value, then the abort is fatal.
212 */
213static __inline void
214abort_imprecise(struct trapframe *tf, u_int fsr, u_int prefetch, bool usermode)
215{
216
217	/*
218	 * XXX - We can got imprecise abort as result of access
219	 * to not-present PCI/PCIe configuration space.
220	 */
221#if 0
222	goto out;
223#endif
224	abort_fatal(tf, FAULT_EA_IMPREC, fsr, 0, prefetch, curthread, NULL);
225
226	/*
227	 * Returning from this function means that we ignore
228	 * the abort for good reason. Note that imprecise abort
229	 * could fire any time even in user mode.
230	 */
231
232#if 0
233out:
234	if (usermode)
235		userret(curthread, tf);
236#endif
237}
238
239/*
240 * abort_debug() handles the following abort:
241 *
242 *  FAULT_DEBUG - Debug Event
243 *
244 */
245static __inline void
246abort_debug(struct trapframe *tf, u_int fsr, u_int prefetch, bool usermode,
247    u_int far)
248{
249
250	if (usermode) {
251		struct thread *td;
252
253		td = curthread;
254		call_trapsignal(td, SIGTRAP, TRAP_BRKPT, far);
255		userret(td, tf);
256	} else {
257#ifdef KDB
258		kdb_trap((prefetch) ? T_BREAKPOINT : T_WATCHPOINT, 0, tf);
259#else
260		printf("No debugger in kernel.\n");
261#endif
262	}
263}
264
265/*
266 * Abort handler.
267 *
268 * FAR, FSR, and everything what can be lost after enabling
269 * interrupts must be grabbed before the interrupts will be
270 * enabled. Note that when interrupts will be enabled, we
271 * could even migrate to another CPU ...
272 *
273 * TODO: move quick cases to ASM
274 */
275void
276abort_handler(struct trapframe *tf, int prefetch)
277{
278	struct thread *td;
279	vm_offset_t far, va;
280	int idx, rv;
281	uint32_t fsr;
282	struct ksig ksig;
283	struct proc *p;
284	struct pcb *pcb;
285	struct vm_map *map;
286	struct vmspace *vm;
287	vm_prot_t ftype;
288	bool usermode;
289#ifdef INVARIANTS
290	void *onfault;
291#endif
292
293	PCPU_INC(cnt.v_trap);
294	td = curthread;
295
296	fsr = (prefetch) ? cp15_ifsr_get(): cp15_dfsr_get();
297#if __ARM_ARCH >= 7
298	far = (prefetch) ? cp15_ifar_get() : cp15_dfar_get();
299#else
300	far = (prefetch) ? TRAPF_PC(tf) : cp15_dfar_get();
301#endif
302
303	idx = FSR_TO_FAULT(fsr);
304	usermode = TRAPF_USERMODE(tf);	/* Abort came from user mode? */
305	if (usermode)
306		td->td_frame = tf;
307
308	CTR6(KTR_TRAP, "%s: fsr %#x (idx %u) far %#x prefetch %u usermode %d",
309	    __func__, fsr, idx, far, prefetch, usermode);
310
311	/*
312	 * Firstly, handle aborts that are not directly related to mapping.
313	 */
314	if (__predict_false(idx == FAULT_EA_IMPREC)) {
315		abort_imprecise(tf, fsr, prefetch, usermode);
316		return;
317	}
318
319	if (__predict_false(idx == FAULT_DEBUG)) {
320		abort_debug(tf, fsr, prefetch, usermode, far);
321		return;
322	}
323
324	/*
325	 * ARM has a set of unprivileged load and store instructions
326	 * (LDRT/LDRBT/STRT/STRBT ...) which are supposed to be used in other
327	 * than user mode and OS should recognize their aborts and behave
328	 * appropriately. However, there is no way how to do that reasonably
329	 * in general unless we restrict the handling somehow.
330	 *
331	 * For now, these instructions are used only in copyin()/copyout()
332	 * like functions where usermode buffers are checked in advance that
333	 * they are not from KVA space. Thus, no action is needed here.
334	 */
335
336	/*
337	 * (1) Handle access and R/W hardware emulation aborts.
338	 * (2) Check that abort is not on pmap essential address ranges.
339	 *     There is no way how to fix it, so we don't even try.
340	 */
341	rv = pmap_fault(PCPU_GET(curpmap), far, fsr, idx, usermode);
342	if (rv == KERN_SUCCESS)
343		return;
344#ifdef KDB
345	if (kdb_active) {
346		kdb_reenter();
347		goto out;
348	}
349#endif
350	if (rv == KERN_INVALID_ADDRESS)
351		goto nogo;
352
353	if (__predict_false((td->td_pflags & TDP_NOFAULTING) != 0)) {
354		/*
355		 * Due to both processor errata and lazy TLB invalidation when
356		 * access restrictions are removed from virtual pages, memory
357		 * accesses that are allowed by the physical mapping layer may
358		 * nonetheless cause one spurious page fault per virtual page.
359		 * When the thread is executing a "no faulting" section that
360		 * is bracketed by vm_fault_{disable,enable}_pagefaults(),
361		 * every page fault is treated as a spurious page fault,
362		 * unless it accesses the same virtual address as the most
363		 * recent page fault within the same "no faulting" section.
364		 */
365		if (td->td_md.md_spurflt_addr != far ||
366		    (td->td_pflags & TDP_RESETSPUR) != 0) {
367			td->td_md.md_spurflt_addr = far;
368			td->td_pflags &= ~TDP_RESETSPUR;
369
370			tlb_flush_local(far & ~PAGE_MASK);
371			return;
372		}
373	} else {
374		/*
375		 * If we get a page fault while in a critical section, then
376		 * it is most likely a fatal kernel page fault.  The kernel
377		 * is already going to panic trying to get a sleep lock to
378		 * do the VM lookup, so just consider it a fatal trap so the
379		 * kernel can print out a useful trap message and even get
380		 * to the debugger.
381		 *
382		 * If we get a page fault while holding a non-sleepable
383		 * lock, then it is most likely a fatal kernel page fault.
384		 * If WITNESS is enabled, then it's going to whine about
385		 * bogus LORs with various VM locks, so just skip to the
386		 * fatal trap handling directly.
387		 */
388		if (td->td_critnest != 0 ||
389		    WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL,
390		    "Kernel page fault") != 0) {
391			abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
392			return;
393		}
394	}
395
396	/* Re-enable interrupts if they were enabled previously. */
397	if (td->td_md.md_spinlock_count == 0) {
398		if (__predict_true(tf->tf_spsr & PSR_I) == 0)
399			enable_interrupts(PSR_I);
400		if (__predict_true(tf->tf_spsr & PSR_F) == 0)
401			enable_interrupts(PSR_F);
402	}
403
404	p = td->td_proc;
405	if (usermode) {
406		td->td_pticks = 0;
407		if (td->td_cowgen != p->p_cowgen)
408			thread_cow_update(td);
409	}
410
411	/* Invoke the appropriate handler, if necessary. */
412	if (__predict_false(aborts[idx].func != NULL)) {
413		if ((aborts[idx].func)(tf, idx, fsr, far, prefetch, td, &ksig))
414			goto do_trapsignal;
415		goto out;
416	}
417
418	/*
419	 * At this point, we're dealing with one of the following aborts:
420	 *
421	 *  FAULT_ICACHE   - I-cache maintenance
422	 *  FAULT_TRAN_xx  - Translation
423	 *  FAULT_PERM_xx  - Permission
424	 */
425
426	/*
427	 * Don't pass faulting cache operation to vm_fault(). We don't want
428	 * to handle all vm stuff at this moment.
429	 */
430	pcb = td->td_pcb;
431	if (__predict_false(pcb->pcb_onfault == cachebailout)) {
432		tf->tf_r0 = far;		/* return failing address */
433		tf->tf_pc = (register_t)pcb->pcb_onfault;
434		return;
435	}
436
437	/* Handle remaining I-cache aborts. */
438	if (idx == FAULT_ICACHE) {
439		if (abort_icache(tf, idx, fsr, far, prefetch, td, &ksig))
440			goto do_trapsignal;
441		goto out;
442	}
443
444	va = trunc_page(far);
445	if (va >= KERNBASE) {
446		/*
447		 * Don't allow user-mode faults in kernel address space.
448		 */
449		if (usermode)
450			goto nogo;
451
452		map = kernel_map;
453	} else {
454		/*
455		 * This is a fault on non-kernel virtual memory. If curproc
456		 * is NULL or curproc->p_vmspace is NULL the fault is fatal.
457		 */
458		vm = (p != NULL) ? p->p_vmspace : NULL;
459		if (vm == NULL)
460			goto nogo;
461
462		map = &vm->vm_map;
463		if (!usermode && (td->td_intr_nesting_level != 0 ||
464		    pcb->pcb_onfault == NULL)) {
465			abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
466			return;
467		}
468	}
469
470	ftype = (fsr & FSR_WNR) ? VM_PROT_WRITE : VM_PROT_READ;
471	if (prefetch)
472		ftype |= VM_PROT_EXECUTE;
473
474#ifdef DEBUG
475	last_fault_code = fsr;
476#endif
477
478#ifdef INVARIANTS
479	onfault = pcb->pcb_onfault;
480	pcb->pcb_onfault = NULL;
481#endif
482
483	/* Fault in the page. */
484	rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
485
486#ifdef INVARIANTS
487	pcb->pcb_onfault = onfault;
488#endif
489
490	if (__predict_true(rv == KERN_SUCCESS))
491		goto out;
492nogo:
493	if (!usermode) {
494		if (td->td_intr_nesting_level == 0 &&
495		    pcb->pcb_onfault != NULL) {
496			tf->tf_r0 = rv;
497			tf->tf_pc = (int)pcb->pcb_onfault;
498			return;
499		}
500		CTR2(KTR_TRAP, "%s: vm_fault() failed with %d", __func__, rv);
501		abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
502		return;
503	}
504
505	ksig.sig = SIGSEGV;
506	ksig.code = (rv == KERN_PROTECTION_FAILURE) ? SEGV_ACCERR : SEGV_MAPERR;
507	ksig.addr = far;
508
509do_trapsignal:
510	call_trapsignal(td, ksig.sig, ksig.code, ksig.addr);
511out:
512	if (usermode)
513		userret(td, tf);
514}
515
516/*
517 * abort_fatal() handles the following data aborts:
518 *
519 *  FAULT_DEBUG		- Debug Event
520 *  FAULT_ACCESS_xx	- Acces Bit
521 *  FAULT_EA_PREC	- Precise External Abort
522 *  FAULT_DOMAIN_xx	- Domain Fault
523 *  FAULT_EA_TRAN_xx	- External Translation Abort
524 *  FAULT_EA_IMPREC	- Imprecise External Abort
525 *  + all undefined codes for ABORT
526 *
527 * We should never see these on a properly functioning system.
528 *
529 * This function is also called by the other handlers if they
530 * detect a fatal problem.
531 *
532 * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort.
533 */
534static int
535abort_fatal(struct trapframe *tf, u_int idx, u_int fsr, u_int far,
536    u_int prefetch, struct thread *td, struct ksig *ksig)
537{
538	bool usermode;
539	const char *mode;
540	const char *rw_mode;
541
542	usermode = TRAPF_USERMODE(tf);
543#ifdef KDTRACE_HOOKS
544	if (!usermode) {
545		if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, far))
546			return (0);
547	}
548#endif
549
550	mode = usermode ? "user" : "kernel";
551	rw_mode  = fsr & FSR_WNR ? "write" : "read";
552	disable_interrupts(PSR_I|PSR_F);
553
554	if (td != NULL) {
555		printf("Fatal %s mode data abort: '%s' on %s\n", mode,
556		    aborts[idx].desc, rw_mode);
557		printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr);
558		if (idx != FAULT_EA_IMPREC)
559			printf("%08x, ", far);
560		else
561			printf("Invalid,  ");
562		printf("spsr=%08x\n", tf->tf_spsr);
563	} else {
564		printf("Fatal %s mode prefetch abort at 0x%08x\n",
565		    mode, tf->tf_pc);
566		printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr);
567	}
568
569	printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n",
570	    tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3);
571	printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n",
572	    tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7);
573	printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n",
574	    tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11);
575	printf("r12=%08x, ", tf->tf_r12);
576
577	if (usermode)
578		printf("usp=%08x, ulr=%08x",
579		    tf->tf_usr_sp, tf->tf_usr_lr);
580	else
581		printf("ssp=%08x, slr=%08x",
582		    tf->tf_svc_sp, tf->tf_svc_lr);
583	printf(", pc =%08x\n\n", tf->tf_pc);
584
585#ifdef KDB
586	if (debugger_on_panic || kdb_active)
587		kdb_trap(fsr, 0, tf);
588#endif
589	panic("Fatal abort");
590	/*NOTREACHED*/
591}
592
593/*
594 * abort_align() handles the following data abort:
595 *
596 *  FAULT_ALIGN - Alignment fault
597 *
598 * Everything should be aligned in kernel with exception of user to kernel
599 * and vice versa data copying, so if pcb_onfault is not set, it's fatal.
600 * We generate signal in case of abort from user mode.
601 */
602static int
603abort_align(struct trapframe *tf, u_int idx, u_int fsr, u_int far,
604    u_int prefetch, struct thread *td, struct ksig *ksig)
605{
606	bool usermode;
607
608	usermode = TRAPF_USERMODE(tf);
609	if (!usermode) {
610		if (td->td_intr_nesting_level == 0 && td != NULL &&
611		    td->td_pcb->pcb_onfault != NULL) {
612			tf->tf_r0 = EFAULT;
613			tf->tf_pc = (int)td->td_pcb->pcb_onfault;
614			return (0);
615		}
616		abort_fatal(tf, idx, fsr, far, prefetch, td, ksig);
617	}
618	/* Deliver a bus error signal to the process */
619	ksig->code = BUS_ADRALN;
620	ksig->sig = SIGBUS;
621	ksig->addr = far;
622	return (1);
623}
624
625/*
626 * abort_icache() handles the following data abort:
627 *
628 * FAULT_ICACHE - Instruction cache maintenance
629 *
630 * According to manual, FAULT_ICACHE is translation fault during cache
631 * maintenance operation. In fact, no cache maintenance operation on
632 * not mapped virtual addresses should be called. As cache maintenance
633 * operation (except DMB, DSB, and Flush Prefetch Buffer) are priviledged,
634 * the abort is concider as fatal for now. However, all the matter with
635 * cache maintenance operation on virtual addresses could be really complex
636 * and fuzzy in SMP case, so maybe in future standard fault mechanism
637 * should be held here including vm_fault() calling.
638 */
639static int
640abort_icache(struct trapframe *tf, u_int idx, u_int fsr, u_int far,
641    u_int prefetch, struct thread *td, struct ksig *ksig)
642{
643
644	abort_fatal(tf, idx, fsr, far, prefetch, td, ksig);
645	return(0);
646}
647