1/*-
2 * Copyright 2014 Olivier Houchard <cognet@FreeBSD.org>
3 * Copyright 2014 Svatopluk Kraus <onwahe@gmail.com>
4 * Copyright 2014 Michal Meloun <meloun@miracle.cz>
5 * Copyright 2014 Andrew Turner <andrew@FreeBSD.org>
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include "opt_ktrace.h"
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD: stable/11/sys/arm/arm/trap-v6.c 344905 2019-03-08 00:20:37Z jhb $");
34
35#include <sys/param.h>
36#include <sys/bus.h>
37#include <sys/systm.h>
38#include <sys/proc.h>
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/signalvar.h>
43#include <sys/ktr.h>
44#include <sys/vmmeter.h>
45#ifdef KTRACE
46#include <sys/uio.h>
47#include <sys/ktrace.h>
48#endif
49
50#include <vm/vm.h>
51#include <vm/pmap.h>
52#include <vm/vm_kern.h>
53#include <vm/vm_map.h>
54#include <vm/vm_extern.h>
55#include <vm/vm_param.h>
56
57#include <machine/cpu.h>
58#include <machine/frame.h>
59#include <machine/machdep.h>
60#include <machine/pcb.h>
61
62#ifdef KDB
63#include <sys/kdb.h>
64#include <machine/db_machdep.h>
65#endif
66
67#ifdef KDTRACE_HOOKS
68#include <sys/dtrace_bsd.h>
69#endif
70
71extern char cachebailout[];
72
73#ifdef DEBUG
74int last_fault_code;	/* For the benefit of pmap_fault_fixup() */
75#endif
76
77struct ksig {
78	int sig;
79	u_long code;
80	vm_offset_t	addr;
81};
82
83typedef int abort_func_t(struct trapframe *, u_int, u_int, u_int, u_int,
84    struct thread *, struct ksig *);
85
86static abort_func_t abort_fatal;
87static abort_func_t abort_align;
88static abort_func_t abort_icache;
89
90struct abort {
91	abort_func_t	*func;
92	const char	*desc;
93};
94
95/*
96 * How are the aborts handled?
97 *
98 * Undefined Code:
99 *  - Always fatal as we do not know what does it mean.
100 * Imprecise External Abort:
101 *  - Always fatal, but can be handled somehow in the future.
102 *    Now, due to PCIe buggy hardware, ignored.
103 * Precise External Abort:
104 *  - Always fatal, but who knows in the future???
105 * Debug Event:
106 *  - Special handling.
107 * External Translation Abort (L1 & L2)
108 *  - Always fatal as something is screwed up in page tables or hardware.
109 * Domain Fault (L1 & L2):
110 *  - Always fatal as we do not play game with domains.
111 * Alignment Fault:
112 *  - Everything should be aligned in kernel with exception of user to kernel
113 *    and vice versa data copying, so if pcb_onfault is not set, it's fatal.
114 *    We generate signal in case of abort from user mode.
115 * Instruction cache maintenance:
116 *  - According to manual, this is translation fault during cache maintenance
117 *    operation. So, it could be really complex in SMP case and fuzzy too
118 *    for cache operations working on virtual addresses. For now, we will
119 *    consider this abort as fatal. In fact, no cache maintenance on
120 *    not mapped virtual addresses should be called. As cache maintenance
121 *    operation (except DMB, DSB, and Flush Prefetch Buffer) are priviledged,
122 *    the abort is fatal for user mode as well for now. (This is good place to
123 *    note that cache maintenance on virtual address fill TLB.)
124 * Acces Bit (L1 & L2):
125 *  - Fast hardware emulation for kernel and user mode.
126 * Translation Fault (L1 & L2):
127 *  - Standard fault mechanism is held including vm_fault().
128 * Permission Fault (L1 & L2):
129 *  - Fast hardware emulation of modify bits and in other cases, standard
130 *    fault mechanism is held including vm_fault().
131 */
132
133static const struct abort aborts[] = {
134	{abort_fatal,	"Undefined Code (0x000)"},
135	{abort_align,	"Alignment Fault"},
136	{abort_fatal,	"Debug Event"},
137	{NULL,		"Access Bit (L1)"},
138	{NULL,		"Instruction cache maintenance"},
139	{NULL,		"Translation Fault (L1)"},
140	{NULL,		"Access Bit (L2)"},
141	{NULL,		"Translation Fault (L2)"},
142
143	{abort_fatal,	"External Abort"},
144	{abort_fatal,	"Domain Fault (L1)"},
145	{abort_fatal,	"Undefined Code (0x00A)"},
146	{abort_fatal,	"Domain Fault (L2)"},
147	{abort_fatal,	"External Translation Abort (L1)"},
148	{NULL,		"Permission Fault (L1)"},
149	{abort_fatal,	"External Translation Abort (L2)"},
150	{NULL,		"Permission Fault (L2)"},
151
152	{abort_fatal,	"TLB Conflict Abort"},
153	{abort_fatal,	"Undefined Code (0x401)"},
154	{abort_fatal,	"Undefined Code (0x402)"},
155	{abort_fatal,	"Undefined Code (0x403)"},
156	{abort_fatal,	"Undefined Code (0x404)"},
157	{abort_fatal,	"Undefined Code (0x405)"},
158	{abort_fatal,	"Asynchronous External Abort"},
159	{abort_fatal,	"Undefined Code (0x407)"},
160
161	{abort_fatal,	"Asynchronous Parity Error on Memory Access"},
162	{abort_fatal,	"Parity Error on Memory Access"},
163	{abort_fatal,	"Undefined Code (0x40A)"},
164	{abort_fatal,	"Undefined Code (0x40B)"},
165	{abort_fatal,	"Parity Error on Translation (L1)"},
166	{abort_fatal,	"Undefined Code (0x40D)"},
167	{abort_fatal,	"Parity Error on Translation (L2)"},
168	{abort_fatal,	"Undefined Code (0x40F)"}
169};
170
171static __inline void
172call_trapsignal(struct thread *td, int sig, int code, vm_offset_t addr)
173{
174	ksiginfo_t ksi;
175
176	CTR4(KTR_TRAP, "%s: addr: %#x, sig: %d, code: %d",
177	   __func__, addr, sig, code);
178
179	/*
180	 * TODO: some info would be nice to know
181	 * if we are serving data or prefetch abort.
182	 */
183
184	ksiginfo_init_trap(&ksi);
185	ksi.ksi_signo = sig;
186	ksi.ksi_code = code;
187	ksi.ksi_addr = (void *)addr;
188	trapsignal(td, &ksi);
189}
190
191/*
192 * abort_imprecise() handles the following abort:
193 *
194 *  FAULT_EA_IMPREC - Imprecise External Abort
195 *
196 * The imprecise means that we don't know where the abort happened,
197 * thus FAR is undefined. The abort should not never fire, but hot
198 * plugging or accidental hardware failure can be the cause of it.
199 * If the abort happens, it can even be on different (thread) context.
200 * Without any additional support, the abort is fatal, as we do not
201 * know what really happened.
202 *
203 * QQQ: Some additional functionality, like pcb_onfault but global,
204 *      can be implemented. Imprecise handlers could be registered
205 *      which tell us if the abort is caused by something they know
206 *      about. They should return one of three codes like:
207 *		FAULT_IS_MINE,
208 *		FAULT_CAN_BE_MINE,
209 *		FAULT_IS_NOT_MINE.
210 *      The handlers should be called until some of them returns
211 *      FAULT_IS_MINE value or all was called. If all handlers return
212 *	FAULT_IS_NOT_MINE value, then the abort is fatal.
213 */
214static __inline void
215abort_imprecise(struct trapframe *tf, u_int fsr, u_int prefetch, bool usermode)
216{
217
218	/*
219	 * XXX - We can got imprecise abort as result of access
220	 * to not-present PCI/PCIe configuration space.
221	 */
222#if 0
223	goto out;
224#endif
225	abort_fatal(tf, FAULT_EA_IMPREC, fsr, 0, prefetch, curthread, NULL);
226
227	/*
228	 * Returning from this function means that we ignore
229	 * the abort for good reason. Note that imprecise abort
230	 * could fire any time even in user mode.
231	 */
232
233#if 0
234out:
235	if (usermode)
236		userret(curthread, tf);
237#endif
238}
239
240/*
241 * abort_debug() handles the following abort:
242 *
243 *  FAULT_DEBUG - Debug Event
244 *
245 */
246static __inline void
247abort_debug(struct trapframe *tf, u_int fsr, u_int prefetch, bool usermode,
248    u_int far)
249{
250
251	if (usermode) {
252		struct thread *td;
253
254		td = curthread;
255		call_trapsignal(td, SIGTRAP, TRAP_BRKPT, far);
256		userret(td, tf);
257	} else {
258#ifdef KDB
259		kdb_trap((prefetch) ? T_BREAKPOINT : T_WATCHPOINT, 0, tf);
260#else
261		printf("No debugger in kernel.\n");
262#endif
263	}
264}
265
266/*
267 * Abort handler.
268 *
269 * FAR, FSR, and everything what can be lost after enabling
270 * interrupts must be grabbed before the interrupts will be
271 * enabled. Note that when interrupts will be enabled, we
272 * could even migrate to another CPU ...
273 *
274 * TODO: move quick cases to ASM
275 */
276void
277abort_handler(struct trapframe *tf, int prefetch)
278{
279	struct thread *td;
280	vm_offset_t far, va;
281	int idx, rv;
282	uint32_t fsr;
283	struct ksig ksig;
284	struct proc *p;
285	struct pcb *pcb;
286	struct vm_map *map;
287	struct vmspace *vm;
288	vm_prot_t ftype;
289	bool usermode;
290	int bp_harden;
291#ifdef INVARIANTS
292	void *onfault;
293#endif
294
295	PCPU_INC(cnt.v_trap);
296	td = curthread;
297
298	fsr = (prefetch) ? cp15_ifsr_get(): cp15_dfsr_get();
299#if __ARM_ARCH >= 7
300	far = (prefetch) ? cp15_ifar_get() : cp15_dfar_get();
301#else
302	far = (prefetch) ? TRAPF_PC(tf) : cp15_dfar_get();
303#endif
304
305	idx = FSR_TO_FAULT(fsr);
306	usermode = TRAPF_USERMODE(tf);	/* Abort came from user mode? */
307
308	/*
309	 * Apply BP hardening by flushing the branch prediction cache
310	 * for prefaults on kernel addresses.
311	 */
312	if (__predict_false(prefetch && far > VM_MAXUSER_ADDRESS &&
313	    (idx == FAULT_TRAN_L2 || idx == FAULT_PERM_L2))) {
314		bp_harden = PCPU_GET(bp_harden_kind);
315		if (bp_harden == PCPU_BP_HARDEN_KIND_BPIALL)
316			_CP15_BPIALL();
317		else if (bp_harden == PCPU_BP_HARDEN_KIND_ICIALLU)
318			_CP15_ICIALLU();
319	}
320
321	if (usermode)
322		td->td_frame = tf;
323
324	CTR6(KTR_TRAP, "%s: fsr %#x (idx %u) far %#x prefetch %u usermode %d",
325	    __func__, fsr, idx, far, prefetch, usermode);
326
327	/*
328	 * Firstly, handle aborts that are not directly related to mapping.
329	 */
330	if (__predict_false(idx == FAULT_EA_IMPREC)) {
331		abort_imprecise(tf, fsr, prefetch, usermode);
332		return;
333	}
334
335	if (__predict_false(idx == FAULT_DEBUG)) {
336		abort_debug(tf, fsr, prefetch, usermode, far);
337		return;
338	}
339
340	/*
341	 * ARM has a set of unprivileged load and store instructions
342	 * (LDRT/LDRBT/STRT/STRBT ...) which are supposed to be used in other
343	 * than user mode and OS should recognize their aborts and behave
344	 * appropriately. However, there is no way how to do that reasonably
345	 * in general unless we restrict the handling somehow.
346	 *
347	 * For now, these instructions are used only in copyin()/copyout()
348	 * like functions where usermode buffers are checked in advance that
349	 * they are not from KVA space. Thus, no action is needed here.
350	 */
351
352	/*
353	 * (1) Handle access and R/W hardware emulation aborts.
354	 * (2) Check that abort is not on pmap essential address ranges.
355	 *     There is no way how to fix it, so we don't even try.
356	 */
357	rv = pmap_fault(PCPU_GET(curpmap), far, fsr, idx, usermode);
358	if (rv == KERN_SUCCESS)
359		return;
360#ifdef KDB
361	if (kdb_active) {
362		kdb_reenter();
363		goto out;
364	}
365#endif
366	if (rv == KERN_INVALID_ADDRESS)
367		goto nogo;
368
369	if (__predict_false((td->td_pflags & TDP_NOFAULTING) != 0)) {
370		/*
371		 * Due to both processor errata and lazy TLB invalidation when
372		 * access restrictions are removed from virtual pages, memory
373		 * accesses that are allowed by the physical mapping layer may
374		 * nonetheless cause one spurious page fault per virtual page.
375		 * When the thread is executing a "no faulting" section that
376		 * is bracketed by vm_fault_{disable,enable}_pagefaults(),
377		 * every page fault is treated as a spurious page fault,
378		 * unless it accesses the same virtual address as the most
379		 * recent page fault within the same "no faulting" section.
380		 */
381		if (td->td_md.md_spurflt_addr != far ||
382		    (td->td_pflags & TDP_RESETSPUR) != 0) {
383			td->td_md.md_spurflt_addr = far;
384			td->td_pflags &= ~TDP_RESETSPUR;
385
386			tlb_flush_local(far & ~PAGE_MASK);
387			return;
388		}
389	} else {
390		/*
391		 * If we get a page fault while in a critical section, then
392		 * it is most likely a fatal kernel page fault.  The kernel
393		 * is already going to panic trying to get a sleep lock to
394		 * do the VM lookup, so just consider it a fatal trap so the
395		 * kernel can print out a useful trap message and even get
396		 * to the debugger.
397		 *
398		 * If we get a page fault while holding a non-sleepable
399		 * lock, then it is most likely a fatal kernel page fault.
400		 * If WITNESS is enabled, then it's going to whine about
401		 * bogus LORs with various VM locks, so just skip to the
402		 * fatal trap handling directly.
403		 */
404		if (td->td_critnest != 0 ||
405		    WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, NULL,
406		    "Kernel page fault") != 0) {
407			abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
408			return;
409		}
410	}
411
412	/* Re-enable interrupts if they were enabled previously. */
413	if (td->td_md.md_spinlock_count == 0) {
414		if (__predict_true(tf->tf_spsr & PSR_I) == 0)
415			enable_interrupts(PSR_I);
416		if (__predict_true(tf->tf_spsr & PSR_F) == 0)
417			enable_interrupts(PSR_F);
418	}
419
420	p = td->td_proc;
421	if (usermode) {
422		td->td_pticks = 0;
423		if (td->td_cowgen != p->p_cowgen)
424			thread_cow_update(td);
425	}
426
427	/* Invoke the appropriate handler, if necessary. */
428	if (__predict_false(aborts[idx].func != NULL)) {
429		if ((aborts[idx].func)(tf, idx, fsr, far, prefetch, td, &ksig))
430			goto do_trapsignal;
431		goto out;
432	}
433
434	/*
435	 * At this point, we're dealing with one of the following aborts:
436	 *
437	 *  FAULT_ICACHE   - I-cache maintenance
438	 *  FAULT_TRAN_xx  - Translation
439	 *  FAULT_PERM_xx  - Permission
440	 */
441
442	/*
443	 * Don't pass faulting cache operation to vm_fault(). We don't want
444	 * to handle all vm stuff at this moment.
445	 */
446	pcb = td->td_pcb;
447	if (__predict_false(pcb->pcb_onfault == cachebailout)) {
448		tf->tf_r0 = far;		/* return failing address */
449		tf->tf_pc = (register_t)pcb->pcb_onfault;
450		return;
451	}
452
453	/* Handle remaining I-cache aborts. */
454	if (idx == FAULT_ICACHE) {
455		if (abort_icache(tf, idx, fsr, far, prefetch, td, &ksig))
456			goto do_trapsignal;
457		goto out;
458	}
459
460	va = trunc_page(far);
461	if (va >= KERNBASE) {
462		/*
463		 * Don't allow user-mode faults in kernel address space.
464		 */
465		if (usermode)
466			goto nogo;
467
468		map = kernel_map;
469	} else {
470		/*
471		 * This is a fault on non-kernel virtual memory. If curproc
472		 * is NULL or curproc->p_vmspace is NULL the fault is fatal.
473		 */
474		vm = (p != NULL) ? p->p_vmspace : NULL;
475		if (vm == NULL)
476			goto nogo;
477
478		map = &vm->vm_map;
479		if (!usermode && (td->td_intr_nesting_level != 0 ||
480		    pcb->pcb_onfault == NULL)) {
481			abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
482			return;
483		}
484	}
485
486	ftype = (fsr & FSR_WNR) ? VM_PROT_WRITE : VM_PROT_READ;
487	if (prefetch)
488		ftype |= VM_PROT_EXECUTE;
489
490#ifdef DEBUG
491	last_fault_code = fsr;
492#endif
493
494#ifdef INVARIANTS
495	onfault = pcb->pcb_onfault;
496	pcb->pcb_onfault = NULL;
497#endif
498
499	/* Fault in the page. */
500	rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
501
502#ifdef INVARIANTS
503	pcb->pcb_onfault = onfault;
504#endif
505
506	if (__predict_true(rv == KERN_SUCCESS))
507		goto out;
508nogo:
509	if (!usermode) {
510		if (td->td_intr_nesting_level == 0 &&
511		    pcb->pcb_onfault != NULL) {
512			tf->tf_r0 = rv;
513			tf->tf_pc = (int)pcb->pcb_onfault;
514			return;
515		}
516		CTR2(KTR_TRAP, "%s: vm_fault() failed with %d", __func__, rv);
517		abort_fatal(tf, idx, fsr, far, prefetch, td, &ksig);
518		return;
519	}
520
521	ksig.sig = SIGSEGV;
522	ksig.code = (rv == KERN_PROTECTION_FAILURE) ? SEGV_ACCERR : SEGV_MAPERR;
523	ksig.addr = far;
524
525do_trapsignal:
526	call_trapsignal(td, ksig.sig, ksig.code, ksig.addr);
527out:
528	if (usermode)
529		userret(td, tf);
530}
531
532/*
533 * abort_fatal() handles the following data aborts:
534 *
535 *  FAULT_DEBUG		- Debug Event
536 *  FAULT_ACCESS_xx	- Acces Bit
537 *  FAULT_EA_PREC	- Precise External Abort
538 *  FAULT_DOMAIN_xx	- Domain Fault
539 *  FAULT_EA_TRAN_xx	- External Translation Abort
540 *  FAULT_EA_IMPREC	- Imprecise External Abort
541 *  + all undefined codes for ABORT
542 *
543 * We should never see these on a properly functioning system.
544 *
545 * This function is also called by the other handlers if they
546 * detect a fatal problem.
547 *
548 * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort.
549 */
550static int
551abort_fatal(struct trapframe *tf, u_int idx, u_int fsr, u_int far,
552    u_int prefetch, struct thread *td, struct ksig *ksig)
553{
554	bool usermode;
555	const char *mode;
556	const char *rw_mode;
557
558	usermode = TRAPF_USERMODE(tf);
559#ifdef KDTRACE_HOOKS
560	if (!usermode) {
561		if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, far))
562			return (0);
563	}
564#endif
565
566	mode = usermode ? "user" : "kernel";
567	rw_mode  = fsr & FSR_WNR ? "write" : "read";
568	disable_interrupts(PSR_I|PSR_F);
569
570	if (td != NULL) {
571		printf("Fatal %s mode data abort: '%s' on %s\n", mode,
572		    aborts[idx].desc, rw_mode);
573		printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr);
574		if (idx != FAULT_EA_IMPREC)
575			printf("%08x, ", far);
576		else
577			printf("Invalid,  ");
578		printf("spsr=%08x\n", tf->tf_spsr);
579	} else {
580		printf("Fatal %s mode prefetch abort at 0x%08x\n",
581		    mode, tf->tf_pc);
582		printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr);
583	}
584
585	printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n",
586	    tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3);
587	printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n",
588	    tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7);
589	printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n",
590	    tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11);
591	printf("r12=%08x, ", tf->tf_r12);
592
593	if (usermode)
594		printf("usp=%08x, ulr=%08x",
595		    tf->tf_usr_sp, tf->tf_usr_lr);
596	else
597		printf("ssp=%08x, slr=%08x",
598		    tf->tf_svc_sp, tf->tf_svc_lr);
599	printf(", pc =%08x\n\n", tf->tf_pc);
600
601#ifdef KDB
602	if (debugger_on_trap) {
603		kdb_why = KDB_WHY_TRAP;
604		kdb_trap(fsr, 0, tf);
605		kdb_why = KDB_WHY_UNSET;
606	}
607#endif
608	panic("Fatal abort");
609	/*NOTREACHED*/
610}
611
612/*
613 * abort_align() handles the following data abort:
614 *
615 *  FAULT_ALIGN - Alignment fault
616 *
617 * Everything should be aligned in kernel with exception of user to kernel
618 * and vice versa data copying, so if pcb_onfault is not set, it's fatal.
619 * We generate signal in case of abort from user mode.
620 */
621static int
622abort_align(struct trapframe *tf, u_int idx, u_int fsr, u_int far,
623    u_int prefetch, struct thread *td, struct ksig *ksig)
624{
625	bool usermode;
626
627	usermode = TRAPF_USERMODE(tf);
628	if (!usermode) {
629		if (td->td_intr_nesting_level == 0 && td != NULL &&
630		    td->td_pcb->pcb_onfault != NULL) {
631			tf->tf_r0 = EFAULT;
632			tf->tf_pc = (int)td->td_pcb->pcb_onfault;
633			return (0);
634		}
635		abort_fatal(tf, idx, fsr, far, prefetch, td, ksig);
636	}
637	/* Deliver a bus error signal to the process */
638	ksig->code = BUS_ADRALN;
639	ksig->sig = SIGBUS;
640	ksig->addr = far;
641	return (1);
642}
643
644/*
645 * abort_icache() handles the following data abort:
646 *
647 * FAULT_ICACHE - Instruction cache maintenance
648 *
649 * According to manual, FAULT_ICACHE is translation fault during cache
650 * maintenance operation. In fact, no cache maintenance operation on
651 * not mapped virtual addresses should be called. As cache maintenance
652 * operation (except DMB, DSB, and Flush Prefetch Buffer) are priviledged,
653 * the abort is concider as fatal for now. However, all the matter with
654 * cache maintenance operation on virtual addresses could be really complex
655 * and fuzzy in SMP case, so maybe in future standard fault mechanism
656 * should be held here including vm_fault() calling.
657 */
658static int
659abort_icache(struct trapframe *tf, u_int idx, u_int fsr, u_int far,
660    u_int prefetch, struct thread *td, struct ksig *ksig)
661{
662
663	abort_fatal(tf, idx, fsr, far, prefetch, td, ksig);
664	return(0);
665}
666