trap.c revision 1.102
1/*	$OpenBSD: trap.c,v 1.102 2015/02/10 10:15:30 miod Exp $	*/
2/*
3 * Copyright (c) 2004, Miodrag Vallat.
4 * Copyright (c) 1998 Steve Murphree, Jr.
5 * Copyright (c) 1996 Nivas Madhur
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *      This product includes software developed by Nivas Madhur.
19 * 4. The name of the author may not be used to endorse or promote products
20 *    derived from this software without specific prior written permission
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34/*
35 * Mach Operating System
36 * Copyright (c) 1991 Carnegie Mellon University
37 * Copyright (c) 1991 OMRON Corporation
38 * All Rights Reserved.
39 *
40 * Permission to use, copy, modify and distribute this software and its
41 * documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 */
47
48#include <sys/types.h>
49#include <sys/param.h>
50#include <sys/proc.h>
51#include <sys/signalvar.h>
52#include <sys/user.h>
53#include <sys/syscall.h>
54#include <sys/syscall_mi.h>
55#include <sys/systm.h>
56
57#include <uvm/uvm_extern.h>
58
59#include <machine/asm_macro.h>
60#include <machine/cmmu.h>
61#include <machine/cpu.h>
62#ifdef M88100
63#include <machine/m88100.h>
64#include <machine/m8820x.h>
65#endif
66#ifdef M88110
67#include <machine/m88110.h>
68#endif
69#include <machine/fpu.h>
70#include <machine/pcb.h>
71#include <machine/psl.h>
72#include <machine/trap.h>
73
74#include <machine/db_machdep.h>
75
76#define SSBREAKPOINT (0xF000D1F8U) /* Single Step Breakpoint */
77
78#define USERMODE(PSR)   (((PSR) & PSR_MODE) == 0)
79#define SYSTEMMODE(PSR) (((PSR) & PSR_MODE) != 0)
80
81void printtrap(int, struct trapframe *);
82__dead void panictrap(int, struct trapframe *);
83__dead void error_fatal(struct trapframe *);
84int double_reg_fixup(struct trapframe *, int);
85int ss_put_value(struct proc *, vaddr_t, u_int);
86
87extern void regdump(struct trapframe *f);
88
89const char *trap_type[] = {
90	"Reset",
91	"Interrupt Exception",
92	"Instruction Access",
93	"Data Access Exception",
94	"Misaligned Access",
95	"Unimplemented Opcode",
96	"Privilege Violation"
97	"Bounds Check Violation",
98	"Illegal Integer Divide",
99	"Integer Overflow",
100	"Error Exception",
101	"Non-Maskable Exception",
102};
103
104const int trap_types = sizeof trap_type / sizeof trap_type[0];
105
106#ifdef M88100
107const char *pbus_exception_type[] = {
108	"Success (No Fault)",
109	"unknown 1",
110	"unknown 2",
111	"Bus Error",
112	"Segment Fault",
113	"Page Fault",
114	"Supervisor Violation",
115	"Write Violation",
116};
117#endif
118
119void
120printtrap(int type, struct trapframe *frame)
121{
122#ifdef M88100
123	if (CPU_IS88100) {
124		if (type == 2) {
125			/* instruction exception */
126			printf("\nInstr access fault (%s) v = %lx, frame %p\n",
127			    pbus_exception_type[
128			      CMMU_PFSR_FAULT(frame->tf_ipfsr)],
129			    frame->tf_sxip & XIP_ADDR, frame);
130		} else if (type == 3) {
131			/* data access exception */
132			printf("\nData access fault (%s) v = %lx, frame %p\n",
133			    pbus_exception_type[
134			      CMMU_PFSR_FAULT(frame->tf_dpfsr)],
135			    frame->tf_sxip & XIP_ADDR, frame);
136		} else
137			printf("\nTrap type %d, v = %lx, frame %p\n",
138			    type, frame->tf_sxip & XIP_ADDR, frame);
139	}
140#endif
141#ifdef M88110
142	if (CPU_IS88110) {
143		printf("\nTrap type %d, v = %lx, frame %p\n",
144		    type, frame->tf_exip, frame);
145	}
146#endif
147#ifdef DDB
148	regdump(frame);
149#endif
150}
151
152__dead void
153panictrap(int type, struct trapframe *frame)
154{
155	static int panicing = 0;
156
157	if (panicing++ == 0)
158		printtrap(type, frame);
159	if ((u_int)type < trap_types)
160		panic(trap_type[type]);
161	else
162		panic("trap %d", type);
163	/*NOTREACHED*/
164}
165
166/*
167 * Handle external interrupts.
168 */
169void
170interrupt(struct trapframe *frame)
171{
172	struct cpu_info *ci = curcpu();
173
174	ci->ci_intrdepth++;
175	md_interrupt_func(frame);
176	ci->ci_intrdepth--;
177}
178
179#ifdef M88110
180/*
181 * Handle non-maskable interrupts.
182 */
183int
184nmi(struct trapframe *frame)
185{
186	return md_nmi_func(frame);
187}
188
189/*
190 * Reenable non-maskable interrupts.
191 */
192void
193nmi_wrapup(struct trapframe *frame)
194{
195	md_nmi_wrapup_func(frame);
196}
197#endif
198
199/*
200 * Handle asynchronous software traps.
201 */
202void
203ast(struct trapframe *frame)
204{
205	struct cpu_info *ci = curcpu();
206	struct proc *p = ci->ci_curproc;
207
208	p->p_md.md_astpending = 0;
209
210	uvmexp.softs++;
211	mi_ast(p, ci->ci_want_resched);
212	userret(p);
213}
214
215#ifdef M88100
216void
217m88100_trap(u_int type, struct trapframe *frame)
218{
219	struct proc *p;
220	struct vm_map *map;
221	vaddr_t va, pcb_onfault;
222	vm_prot_t ftype;
223	int fault_type, pbus_type;
224	u_long fault_code;
225	vaddr_t fault_addr;
226	struct vmspace *vm;
227	union sigval sv;
228	int result;
229#ifdef DDB
230	int s;
231	u_int psr;
232#endif
233	int sig = 0;
234
235	uvmexp.traps++;
236	if ((p = curproc) == NULL)
237		p = &proc0;
238
239	if (USERMODE(frame->tf_epsr)) {
240		type += T_USER;
241		p->p_md.md_tf = frame;	/* for ptrace/signals */
242		refreshcreds(p);
243	}
244	fault_type = SI_NOINFO;
245	fault_code = 0;
246	fault_addr = frame->tf_sxip & XIP_ADDR;
247
248	switch (type) {
249	default:
250	case T_ILLFLT:
251lose:
252		panictrap(frame->tf_vector, frame);
253		break;
254		/*NOTREACHED*/
255
256#if defined(DDB)
257	case T_KDB_BREAK:
258		s = splhigh();
259		set_psr((psr = get_psr()) & ~PSR_IND);
260		ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame);
261		set_psr(psr);
262		splx(s);
263		return;
264	case T_KDB_ENTRY:
265		s = splhigh();
266		set_psr((psr = get_psr()) & ~PSR_IND);
267		ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame);
268		set_psr(psr);
269		splx(s);
270		return;
271#endif /* DDB */
272	case T_MISALGNFLT:
273		printf("kernel misaligned access exception @0x%08lx\n",
274		    frame->tf_sxip);
275		goto lose;
276	case T_INSTFLT:
277		/* kernel mode instruction access fault.
278		 * Should never, never happen for a non-paged kernel.
279		 */
280#ifdef TRAPDEBUG
281		pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr);
282		printf("Kernel Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
283		    pbus_type, pbus_exception_type[pbus_type],
284		    fault_addr, frame, frame->tf_cpu);
285#endif
286		goto lose;
287	case T_DATAFLT:
288		/* kernel mode data fault */
289
290		/* data fault on the user address? */
291		if ((frame->tf_dmt0 & DMT_DAS) == 0) {
292			KERNEL_LOCK();
293			goto user_fault;
294		}
295
296		fault_addr = frame->tf_dma0;
297		if (frame->tf_dmt0 & (DMT_WRITE|DMT_LOCKBAR)) {
298			ftype = PROT_READ | PROT_WRITE;
299			fault_code = PROT_WRITE;
300		} else {
301			ftype = PROT_READ;
302			fault_code = PROT_READ;
303		}
304
305		va = trunc_page((vaddr_t)fault_addr);
306
307		KERNEL_LOCK();
308		vm = p->p_vmspace;
309		map = kernel_map;
310
311		pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr);
312#ifdef TRAPDEBUG
313		printf("Kernel Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
314		    pbus_type, pbus_exception_type[pbus_type],
315		    fault_addr, frame, frame->tf_cpu);
316#endif
317
318		pcb_onfault = p->p_addr->u_pcb.pcb_onfault;
319		switch (pbus_type) {
320		case CMMU_PFSR_SUCCESS:
321			/*
322			 * The fault was resolved. Call data_access_emulation
323			 * to drain the data unit pipe line and reset dmt0
324			 * so that trap won't get called again.
325			 */
326			p->p_addr->u_pcb.pcb_onfault = 0;
327			data_access_emulation((u_int *)frame);
328			p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
329			frame->tf_dmt0 = 0;
330			frame->tf_dpfsr = 0;
331			KERNEL_UNLOCK();
332			return;
333		case CMMU_PFSR_SFAULT:
334		case CMMU_PFSR_PFAULT:
335			p->p_addr->u_pcb.pcb_onfault = 0;
336			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
337			p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
338			if (result == 0) {
339				/*
340				 * We could resolve the fault. Call
341				 * data_access_emulation to drain the data
342				 * unit pipe line and reset dmt0 so that trap
343				 * won't get called again.
344				 */
345				p->p_addr->u_pcb.pcb_onfault = 0;
346				data_access_emulation((u_int *)frame);
347				p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
348				frame->tf_dmt0 = 0;
349				frame->tf_dpfsr = 0;
350				KERNEL_UNLOCK();
351				return;
352			} else if (pcb_onfault != 0) {
353				/*
354				 * This could be a fault caused in copyout*()
355				 * while accessing kernel space.
356				 */
357				frame->tf_snip = pcb_onfault | NIP_V;
358				frame->tf_sfip = (pcb_onfault + 4) | FIP_V;
359				/*
360				 * Continue as if the fault had been resolved,
361				 * but do not try to complete the faulting
362				 * access.
363				 */
364				frame->tf_dmt0 = 0;
365				frame->tf_dpfsr = 0;
366				KERNEL_UNLOCK();
367				return;
368			}
369			break;
370		}
371#ifdef TRAPDEBUG
372		printf("PBUS Fault %d (%s) va = 0x%x\n", pbus_type,
373		    pbus_exception_type[pbus_type], va);
374#endif
375		KERNEL_UNLOCK();
376		goto lose;
377		/* NOTREACHED */
378	case T_INSTFLT+T_USER:
379		/* User mode instruction access fault */
380		/* FALLTHROUGH */
381	case T_DATAFLT+T_USER:
382		KERNEL_LOCK();
383user_fault:
384		if (type == T_INSTFLT + T_USER) {
385			pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr);
386#ifdef TRAPDEBUG
387			printf("User Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
388			    pbus_type, pbus_exception_type[pbus_type],
389			    fault_addr, frame, frame->tf_cpu);
390#endif
391		} else {
392			fault_addr = frame->tf_dma0;
393			pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr);
394#ifdef TRAPDEBUG
395			printf("User Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
396			    pbus_type, pbus_exception_type[pbus_type],
397			    fault_addr, frame, frame->tf_cpu);
398#endif
399		}
400
401		if (frame->tf_dmt0 & (DMT_WRITE | DMT_LOCKBAR)) {
402			ftype = PROT_READ | PROT_WRITE;
403			fault_code = PROT_WRITE;
404		} else {
405			ftype = PROT_READ;
406			fault_code = PROT_READ;
407		}
408
409		va = trunc_page((vaddr_t)fault_addr);
410
411		vm = p->p_vmspace;
412		map = &vm->vm_map;
413		if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
414			p->p_addr->u_pcb.pcb_onfault = 0;
415
416		/* Call uvm_fault() to resolve non-bus error faults */
417		switch (pbus_type) {
418		case CMMU_PFSR_SUCCESS:
419			result = 0;
420			break;
421		case CMMU_PFSR_BERROR:
422			result = EACCES;
423			break;
424		default:
425			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
426			if (result == EACCES)
427				result = EFAULT;
428			break;
429		}
430
431		p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
432
433		if (result == 0 && (caddr_t)va >= vm->vm_maxsaddr)
434			uvm_grow(p, va);
435
436		if (result == 0) {
437			if (type == T_INSTFLT + T_USER) {
438				m88100_rewind_insn(&(frame->tf_regs));
439				/* clear the error bit */
440				frame->tf_sfip &= ~FIP_E;
441				frame->tf_snip &= ~NIP_E;
442				frame->tf_ipfsr = 0;
443			} else {
444				/*
445			 	 * We could resolve the fault. Call
446			 	 * data_access_emulation to drain the data unit
447			 	 * pipe line and reset dmt0 so that trap won't
448			 	 * get called again.
449			 	 */
450				p->p_addr->u_pcb.pcb_onfault = 0;
451				data_access_emulation((u_int *)frame);
452				p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
453				frame->tf_dmt0 = 0;
454				frame->tf_dpfsr = 0;
455			}
456		} else {
457			/*
458			 * This could be a fault caused in copyin*()
459			 * while accessing user space.
460			 */
461			if (pcb_onfault != 0) {
462				frame->tf_snip = pcb_onfault | NIP_V;
463				frame->tf_sfip = (pcb_onfault + 4) | FIP_V;
464				/*
465				 * Continue as if the fault had been resolved,
466				 * but do not try to complete the faulting
467				 * access.
468				 */
469				frame->tf_dmt0 = 0;
470				frame->tf_dpfsr = 0;
471			} else {
472				sig = result == EACCES ? SIGBUS : SIGSEGV;
473				fault_type = result == EACCES ?
474				    BUS_ADRERR : SEGV_MAPERR;
475			}
476		}
477		KERNEL_UNLOCK();
478		break;
479	case T_MISALGNFLT+T_USER:
480		/* Fix any misaligned ld.d or st.d instructions */
481		sig = double_reg_fixup(frame, T_MISALGNFLT);
482		fault_type = BUS_ADRALN;
483		break;
484	case T_PRIVINFLT+T_USER:
485	case T_ILLFLT+T_USER:
486#ifndef DDB
487	case T_KDB_BREAK:
488	case T_KDB_ENTRY:
489#endif
490	case T_KDB_BREAK+T_USER:
491	case T_KDB_ENTRY+T_USER:
492	case T_KDB_TRACE:
493	case T_KDB_TRACE+T_USER:
494		sig = SIGILL;
495		break;
496	case T_BNDFLT+T_USER:
497		sig = SIGFPE;
498		break;
499	case T_ZERODIV+T_USER:
500		sig = SIGFPE;
501		fault_type = FPE_INTDIV;
502		break;
503	case T_OVFFLT+T_USER:
504		sig = SIGFPE;
505		fault_type = FPE_INTOVF;
506		break;
507	case T_FPEPFLT+T_USER:
508		m88100_fpu_precise_exception(frame);
509		goto userexit;
510	case T_FPEIFLT+T_USER:
511		m88100_fpu_imprecise_exception(frame);
512		goto userexit;
513	case T_SIGSYS+T_USER:
514		sig = SIGSYS;
515		break;
516	case T_STEPBPT+T_USER:
517#ifdef PTRACE
518		/*
519		 * This trap is used by the kernel to support single-step
520		 * debugging (although any user could generate this trap
521		 * which should probably be handled differently). When a
522		 * process is continued by a debugger with the PT_STEP
523		 * function of ptrace (single step), the kernel inserts
524		 * one or two breakpoints in the user process so that only
525		 * one instruction (or two in the case of a delayed branch)
526		 * is executed.  When this breakpoint is hit, we get the
527		 * T_STEPBPT trap.
528		 */
529		{
530			u_int instr;
531			vaddr_t pc = PC_REGS(&frame->tf_regs);
532
533			/* read break instruction */
534			copyin((caddr_t)pc, &instr, sizeof(u_int));
535
536			/* check and see if we got here by accident */
537			if ((p->p_md.md_bp0va != pc &&
538			     p->p_md.md_bp1va != pc) ||
539			    instr != SSBREAKPOINT) {
540				sig = SIGTRAP;
541				fault_type = TRAP_TRACE;
542				break;
543			}
544
545			/* restore original instruction and clear breakpoint */
546			KERNEL_LOCK();
547			if (p->p_md.md_bp0va == pc) {
548				ss_put_value(p, pc, p->p_md.md_bp0save);
549				p->p_md.md_bp0va = 0;
550			}
551			if (p->p_md.md_bp1va == pc) {
552				ss_put_value(p, pc, p->p_md.md_bp1save);
553				p->p_md.md_bp1va = 0;
554			}
555			KERNEL_UNLOCK();
556
557			frame->tf_sxip = pc | NIP_V;
558			sig = SIGTRAP;
559			fault_type = TRAP_BRKPT;
560		}
561#else
562		sig = SIGTRAP;
563		fault_type = TRAP_TRACE;
564#endif
565		break;
566
567	case T_USERBPT+T_USER:
568		/*
569		 * This trap is meant to be used by debuggers to implement
570		 * breakpoint debugging.  When we get this trap, we just
571		 * return a signal which gets caught by the debugger.
572		 */
573		sig = SIGTRAP;
574		fault_type = TRAP_BRKPT;
575		break;
576
577	}
578
579	/*
580	 * If trap from supervisor mode, just return
581	 */
582	if (type < T_USER)
583		return;
584
585	if (sig) {
586		sv.sival_ptr = (void *)fault_addr;
587		KERNEL_LOCK();
588		trapsignal(p, sig, fault_code, fault_type, sv);
589		KERNEL_UNLOCK();
590		/*
591		 * don't want multiple faults - we are going to
592		 * deliver signal.
593		 */
594		frame->tf_dmt0 = 0;
595		frame->tf_ipfsr = frame->tf_dpfsr = 0;
596	}
597
598userexit:
599	userret(p);
600}
601#endif /* M88100 */
602
603#ifdef M88110
604void
605m88110_trap(u_int type, struct trapframe *frame)
606{
607	struct proc *p;
608	struct vm_map *map;
609	vaddr_t va, pcb_onfault;
610	vm_prot_t ftype;
611	int fault_type;
612	u_long fault_code;
613	vaddr_t fault_addr;
614	struct vmspace *vm;
615	union sigval sv;
616	int result;
617#ifdef DDB
618        int s;
619	u_int psr;
620#endif
621	int sig = 0;
622
623	uvmexp.traps++;
624	if ((p = curproc) == NULL)
625		p = &proc0;
626
627	fault_type = SI_NOINFO;
628	fault_code = 0;
629	fault_addr = frame->tf_exip & XIP_ADDR;
630
631	/*
632	 * 88110 errata #16 (4.2) or #3 (5.1.1):
633	 * ``bsr, br, bcnd, jsr and jmp instructions with the .n extension
634	 *   can cause the enip value to be incremented by 4 incorrectly
635	 *   if the instruction in the delay slot is the first word of a
636	 *   page which misses in the mmu and results in a hardware
637	 *   tablewalk which encounters an exception or an invalid
638	 *   descriptor.  The exip value in this case will point to the
639	 *   first word of the page, and the D bit will be set.
640	 *
641	 *   Note: if the instruction is a jsr.n r1, r1 will be overwritten
642	 *   with erroneous data.  Therefore, no recovery is possible. Do
643	 *   not allow this instruction to occupy the last word of a page.
644	 *
645	 *   Suggested fix: recover in general by backing up the exip by 4
646	 *   and clearing the delay bit before an rte when the lower 3 hex
647	 *   digits of the exip are 001.''
648	 */
649	if ((frame->tf_exip & PAGE_MASK) == 0x00000001 && type == T_INSTFLT) {
650		u_int instr;
651
652		/*
653		 * Note that we have initialized fault_addr above, so that
654		 * signals provide the correct address if necessary.
655		 */
656		frame->tf_exip = (frame->tf_exip & ~1) - 4;
657
658		/*
659		 * Check the instruction at the (backed up) exip.
660		 * If it is a jsr.n, abort.
661		 */
662		if (!USERMODE(frame->tf_epsr)) {
663			instr = *(u_int *)fault_addr;
664			if (instr == 0xf400cc01)
665				panic("mc88110 errata #16, exip 0x%lx enip 0x%lx",
666				    (frame->tf_exip + 4) | 1, frame->tf_enip);
667		} else {
668			/* copyin here should not fail */
669			if (copyin((const void *)frame->tf_exip, &instr,
670			    sizeof instr) == 0 &&
671			    instr == 0xf400cc01) {
672				uprintf("mc88110 errata #16, exip 0x%lx enip 0x%lx",
673				    (frame->tf_exip + 4) | 1, frame->tf_enip);
674				sig = SIGILL;
675			}
676		}
677	}
678
679	if (USERMODE(frame->tf_epsr)) {
680		type += T_USER;
681		p->p_md.md_tf = frame;	/* for ptrace/signals */
682		refreshcreds(p);
683	}
684
685	if (sig != 0)
686		goto deliver;
687
688	switch (type) {
689	default:
690lose:
691		panictrap(frame->tf_vector, frame);
692		break;
693		/*NOTREACHED*/
694
695#ifdef DEBUG
696	case T_110_DRM+T_USER:
697	case T_110_DRM:
698		printf("DMMU read miss: Hardware Table Searches should be enabled!\n");
699		goto lose;
700	case T_110_DWM+T_USER:
701	case T_110_DWM:
702		printf("DMMU write miss: Hardware Table Searches should be enabled!\n");
703		goto lose;
704	case T_110_IAM+T_USER:
705	case T_110_IAM:
706		printf("IMMU miss: Hardware Table Searches should be enabled!\n");
707		goto lose;
708#endif
709
710#ifdef DDB
711	case T_KDB_TRACE:
712		s = splhigh();
713		set_psr((psr = get_psr()) & ~PSR_IND);
714		ddb_break_trap(T_KDB_TRACE, (db_regs_t*)frame);
715		set_psr(psr);
716		splx(s);
717		return;
718	case T_KDB_BREAK:
719		s = splhigh();
720		set_psr((psr = get_psr()) & ~PSR_IND);
721		ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame);
722		set_psr(psr);
723		splx(s);
724		return;
725	case T_KDB_ENTRY:
726		s = splhigh();
727		set_psr((psr = get_psr()) & ~PSR_IND);
728		ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame);
729		set_psr(psr);
730		/* skip trap instruction */
731		m88110_skip_insn(frame);
732		splx(s);
733		return;
734#endif /* DDB */
735	case T_ILLFLT:
736		/*
737		 * The 88110 seems to trigger an instruction fault in
738		 * supervisor mode when running the following sequence:
739		 *
740		 *	bcnd.n cond, reg, 1f
741		 *	arithmetic insn
742		 *	...
743		 *  	the same exact arithmetic insn
744		 *  1:	another arithmetic insn stalled by the previous one
745		 *	...
746		 *
747		 * The exception is reported with exip pointing to the
748		 * branch address. I don't know, at this point, if there
749		 * is any better workaround than the aggressive one
750		 * implemented below; I don't see how this could relate to
751		 * any of the 88110 errata (although it might be related to
752		 * branch prediction).
753		 *
754		 * For the record, the exact sequence triggering the
755		 * spurious exception is:
756		 *
757		 *	bcnd.n	eq0, r2,  1f
758		 *	 or	r25, r0,  r22
759		 *	bsr	somewhere
760		 *	or	r25, r0,  r22
761		 *  1:	cmp	r13, r25, r20
762		 *
763		 * within the same cache line.
764		 *
765		 * Simply ignoring the exception and returning does not
766		 * cause the exception to disappear. Clearing the
767		 * instruction cache works, but on 88110+88410 systems,
768		 * the 88410 needs to be invalidated as well. (note that
769		 * the size passed to the flush routines does not matter
770		 * since there is no way to flush a subset of the 88110
771		 * I$ anyway)
772		 */
773	    {
774		extern void *kernel_text, *etext;
775
776		if (fault_addr >= (vaddr_t)&kernel_text &&
777		    fault_addr < (vaddr_t)&etext) {
778			cmmu_icache_inv(curcpu()->ci_cpuid,
779			    trunc_page(fault_addr), PAGE_SIZE);
780			cmmu_cache_wbinv(curcpu()->ci_cpuid,
781			    trunc_page(fault_addr), PAGE_SIZE);
782			return;
783		}
784	    }
785		goto lose;
786	case T_MISALGNFLT:
787		printf("kernel misaligned access exception @%p\n",
788		    (void *)frame->tf_exip);
789		goto lose;
790	case T_INSTFLT:
791		/* kernel mode instruction access fault.
792		 * Should never, never happen for a non-paged kernel.
793		 */
794#ifdef TRAPDEBUG
795		printf("Kernel Instruction fault exip %x isr %x ilar %x\n",
796		    frame->tf_exip, frame->tf_isr, frame->tf_ilar);
797#endif
798		goto lose;
799
800	case T_DATAFLT:
801		/* kernel mode data fault */
802
803		/* data fault on the user address? */
804		if ((frame->tf_dsr & CMMU_DSR_SU) == 0) {
805			KERNEL_LOCK();
806			goto m88110_user_fault;
807		}
808
809#ifdef TRAPDEBUG
810		printf("Kernel Data access fault exip %x dsr %x dlar %x\n",
811		    frame->tf_exip, frame->tf_dsr, frame->tf_dlar);
812#endif
813
814		fault_addr = frame->tf_dlar;
815		if (frame->tf_dsr & CMMU_DSR_RW) {
816			ftype = PROT_READ;
817			fault_code = PROT_READ;
818		} else {
819			ftype = PROT_READ | PROT_WRITE;
820			fault_code = PROT_WRITE;
821		}
822
823		va = trunc_page((vaddr_t)fault_addr);
824
825		KERNEL_LOCK();
826		vm = p->p_vmspace;
827		map = kernel_map;
828
829		if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) {
830			/*
831			 * On a segment or a page fault, call uvm_fault() to
832			 * resolve the fault.
833			 */
834			if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
835				p->p_addr->u_pcb.pcb_onfault = 0;
836			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
837			p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
838			/*
839			 * This could be a fault caused in copyout*()
840			 * while accessing kernel space.
841			 */
842			if (result != 0 && pcb_onfault != 0) {
843				frame->tf_exip = pcb_onfault;
844				/*
845				 * Continue as if the fault had been resolved.
846				 */
847				result = 0;
848			}
849			if (result == 0) {
850				KERNEL_UNLOCK();
851				return;
852			}
853		}
854		KERNEL_UNLOCK();
855		goto lose;
856	case T_INSTFLT+T_USER:
857		/* User mode instruction access fault */
858		/* FALLTHROUGH */
859	case T_DATAFLT+T_USER:
860		KERNEL_LOCK();
861m88110_user_fault:
862		if (type == T_INSTFLT+T_USER) {
863			ftype = PROT_READ;
864			fault_code = PROT_READ;
865#ifdef TRAPDEBUG
866			printf("User Instruction fault exip %x isr %x ilar %x\n",
867			    frame->tf_exip, frame->tf_isr, frame->tf_ilar);
868#endif
869		} else {
870			fault_addr = frame->tf_dlar;
871			if (frame->tf_dsr & CMMU_DSR_RW) {
872				ftype = PROT_READ;
873				fault_code = PROT_READ;
874			} else {
875				ftype = PROT_READ | PROT_WRITE;
876				fault_code = PROT_WRITE;
877			}
878#ifdef TRAPDEBUG
879			printf("User Data access fault exip %x dsr %x dlar %x\n",
880			    frame->tf_exip, frame->tf_dsr, frame->tf_dlar);
881#endif
882		}
883
884		va = trunc_page((vaddr_t)fault_addr);
885
886		vm = p->p_vmspace;
887		map = &vm->vm_map;
888		if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
889			p->p_addr->u_pcb.pcb_onfault = 0;
890
891		/*
892		 * Call uvm_fault() to resolve non-bus error faults
893		 * whenever possible.
894		 */
895		if (type == T_INSTFLT+T_USER) {
896			/* instruction faults */
897			if (frame->tf_isr &
898			    (CMMU_ISR_BE | CMMU_ISR_SP | CMMU_ISR_TBE)) {
899				/* bus error, supervisor protection */
900				result = EACCES;
901			} else
902			if (frame->tf_isr & (CMMU_ISR_SI | CMMU_ISR_PI)) {
903				/* segment or page fault */
904				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
905				if (result == EACCES)
906					result = EFAULT;
907			} else {
908#ifdef TRAPDEBUG
909				printf("Unexpected Instruction fault isr %x\n",
910				    frame->tf_isr);
911#endif
912				KERNEL_UNLOCK();
913				goto lose;
914			}
915		} else {
916			/* data faults */
917			if (frame->tf_dsr & CMMU_DSR_BE) {
918				/* bus error */
919				result = EACCES;
920			} else
921			if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) {
922				/* segment or page fault */
923				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
924				if (result == EACCES)
925					result = EFAULT;
926			} else
927			if (frame->tf_dsr & (CMMU_DSR_CP | CMMU_DSR_WA)) {
928				/* copyback or write allocate error */
929				result = EACCES;
930			} else
931			if (frame->tf_dsr & CMMU_DSR_WE) {
932				/* write fault  */
933				/* This could be a write protection fault or an
934				 * exception to set the used and modified bits
935				 * in the pte. Basically, if we got a write
936				 * error, then we already have a pte entry that
937				 * faulted in from a previous seg fault or page
938				 * fault.
939				 * Get the pte and check the status of the
940				 * modified and valid bits to determine if this
941				 * indeed a real write fault.  XXX smurph
942				 */
943				if (pmap_set_modify(map->pmap, va)) {
944#ifdef TRAPDEBUG
945					printf("Corrected userland write fault, pmap %p va %p\n",
946					    map->pmap, va);
947#endif
948					result = 0;
949				} else {
950					/* must be a real wp fault */
951#ifdef TRAPDEBUG
952					printf("Uncorrected userland write fault, pmap %p va %p\n",
953					    map->pmap, va);
954#endif
955					result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
956					if (result == EACCES)
957						result = EFAULT;
958				}
959			} else {
960#ifdef TRAPDEBUG
961				printf("Unexpected Data access fault dsr %x\n",
962				    frame->tf_dsr);
963#endif
964				KERNEL_UNLOCK();
965				goto lose;
966			}
967		}
968		p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
969
970		if (result == 0 && (caddr_t)va >= vm->vm_maxsaddr)
971			uvm_grow(p, va);
972		KERNEL_UNLOCK();
973
974		/*
975		 * This could be a fault caused in copyin*()
976		 * while accessing user space.
977		 */
978		if (result != 0 && pcb_onfault != 0) {
979			frame->tf_exip = pcb_onfault;
980			/*
981			 * Continue as if the fault had been resolved.
982			 */
983			result = 0;
984		}
985
986		if (result != 0) {
987			sig = result == EACCES ? SIGBUS : SIGSEGV;
988			fault_type = result == EACCES ?
989			    BUS_ADRERR : SEGV_MAPERR;
990		}
991		break;
992	case T_MISALGNFLT+T_USER:
993		/* Fix any misaligned ld.d or st.d instructions */
994		sig = double_reg_fixup(frame, T_MISALGNFLT);
995		fault_type = BUS_ADRALN;
996		if (sig == 0) {
997			/* skip recovered instruction */
998			m88110_skip_insn(frame);
999			goto userexit;
1000		}
1001		break;
1002	case T_ILLFLT+T_USER:
1003		/* Fix any ld.d or st.d instruction with an odd register */
1004		sig = double_reg_fixup(frame, T_ILLFLT);
1005		fault_type = ILL_PRVREG;
1006		if (sig == 0) {
1007			/* skip recovered instruction */
1008			m88110_skip_insn(frame);
1009			goto userexit;
1010		}
1011		break;
1012	case T_PRIVINFLT+T_USER:
1013		fault_type = ILL_PRVREG;
1014		/* FALLTHROUGH */
1015#ifndef DDB
1016	case T_KDB_BREAK:
1017	case T_KDB_ENTRY:
1018	case T_KDB_TRACE:
1019#endif
1020	case T_KDB_BREAK+T_USER:
1021	case T_KDB_ENTRY+T_USER:
1022	case T_KDB_TRACE+T_USER:
1023		sig = SIGILL;
1024		break;
1025	case T_BNDFLT+T_USER:
1026		sig = SIGFPE;
1027		/* skip trap instruction */
1028		m88110_skip_insn(frame);
1029		break;
1030	case T_ZERODIV+T_USER:
1031		sig = SIGFPE;
1032		fault_type = FPE_INTDIV;
1033		/* skip trap instruction */
1034		m88110_skip_insn(frame);
1035		break;
1036	case T_OVFFLT+T_USER:
1037		sig = SIGFPE;
1038		fault_type = FPE_INTOVF;
1039		/* skip trap instruction */
1040		m88110_skip_insn(frame);
1041		break;
1042	case T_FPEPFLT+T_USER:
1043		m88110_fpu_exception(frame);
1044		goto userexit;
1045	case T_SIGSYS+T_USER:
1046		sig = SIGSYS;
1047		break;
1048	case T_STEPBPT+T_USER:
1049#ifdef PTRACE
1050		/*
1051		 * This trap is used by the kernel to support single-step
1052		 * debugging (although any user could generate this trap
1053		 * which should probably be handled differently). When a
1054		 * process is continued by a debugger with the PT_STEP
1055		 * function of ptrace (single step), the kernel inserts
1056		 * one or two breakpoints in the user process so that only
1057		 * one instruction (or two in the case of a delayed branch)
1058		 * is executed.  When this breakpoint is hit, we get the
1059		 * T_STEPBPT trap.
1060		 */
1061		{
1062			u_int instr;
1063			vaddr_t pc = PC_REGS(&frame->tf_regs);
1064
1065			/* read break instruction */
1066			copyin((caddr_t)pc, &instr, sizeof(u_int));
1067
1068			/* check and see if we got here by accident */
1069			if ((p->p_md.md_bp0va != pc &&
1070			     p->p_md.md_bp1va != pc) ||
1071			    instr != SSBREAKPOINT) {
1072				sig = SIGTRAP;
1073				fault_type = TRAP_TRACE;
1074				break;
1075			}
1076
1077			/* restore original instruction and clear breakpoint */
1078			KERNEL_LOCK();
1079			if (p->p_md.md_bp0va == pc) {
1080				ss_put_value(p, pc, p->p_md.md_bp0save);
1081				p->p_md.md_bp0va = 0;
1082			}
1083			if (p->p_md.md_bp1va == pc) {
1084				ss_put_value(p, pc, p->p_md.md_bp1save);
1085				p->p_md.md_bp1va = 0;
1086			}
1087			KERNEL_UNLOCK();
1088
1089			sig = SIGTRAP;
1090			fault_type = TRAP_BRKPT;
1091		}
1092#else
1093		sig = SIGTRAP;
1094		fault_type = TRAP_TRACE;
1095#endif
1096		break;
1097	case T_USERBPT+T_USER:
1098		/*
1099		 * This trap is meant to be used by debuggers to implement
1100		 * breakpoint debugging.  When we get this trap, we just
1101		 * return a signal which gets caught by the debugger.
1102		 */
1103		sig = SIGTRAP;
1104		fault_type = TRAP_BRKPT;
1105		break;
1106	}
1107
1108	/*
1109	 * If trap from supervisor mode, just return
1110	 */
1111	if (type < T_USER)
1112		return;
1113
1114	if (sig) {
1115deliver:
1116		sv.sival_ptr = (void *)fault_addr;
1117		KERNEL_LOCK();
1118		trapsignal(p, sig, fault_code, fault_type, sv);
1119		KERNEL_UNLOCK();
1120	}
1121
1122userexit:
1123	userret(p);
1124}
1125#endif /* M88110 */
1126
1127__dead void
1128error_fatal(struct trapframe *frame)
1129{
1130	if (frame->tf_vector == 0)
1131		printf("\nCPU %d Reset Exception\n", cpu_number());
1132	else
1133		printf("\nCPU %d Error Exception\n", cpu_number());
1134
1135#ifdef DDB
1136	regdump((struct trapframe*)frame);
1137#endif
1138	panic("unrecoverable exception %ld", frame->tf_vector);
1139}
1140
1141#ifdef M88100
1142void
1143m88100_syscall(register_t code, struct trapframe *tf)
1144{
1145	int i, nsys, nap;
1146	struct sysent *callp;
1147	struct proc *p = curproc;
1148	int error;
1149	register_t args[8], rval[2], *ap;
1150
1151	uvmexp.syscalls++;
1152
1153	callp = p->p_p->ps_emul->e_sysent;
1154	nsys  = p->p_p->ps_emul->e_nsysent;
1155
1156	p->p_md.md_tf = tf;
1157
1158	/*
1159	 * For 88k, all the arguments are passed in the registers (r2-r9),
1160	 * and further arguments (if any) on stack.
1161	 * For syscall (and __syscall), r2 (and r3) has the actual code.
1162	 * __syscall  takes a quad syscall number, so that other
1163	 * arguments are at their natural alignments.
1164	 */
1165	ap = &tf->tf_r[2];
1166	nap = 8; /* r2-r9 */
1167
1168	switch (code) {
1169	case SYS_syscall:
1170		code = *ap++;
1171		nap--;
1172		break;
1173	case SYS___syscall:
1174		if (callp != sysent)
1175			break;
1176		code = ap[_QUAD_LOWWORD];
1177		ap += 2;
1178		nap -= 2;
1179		break;
1180	}
1181
1182	if (code < 0 || code >= nsys)
1183		callp += p->p_p->ps_emul->e_nosys;
1184	else
1185		callp += code;
1186
1187	i = callp->sy_argsize / sizeof(register_t);
1188	if (i > sizeof(args) / sizeof(register_t))
1189		panic("syscall nargs");
1190	if (i > nap) {
1191		bcopy((caddr_t)ap, (caddr_t)args, nap * sizeof(register_t));
1192		if ((error = copyin((caddr_t)tf->tf_r[31], args + nap,
1193		    (i - nap) * sizeof(register_t))))
1194			goto bad;
1195	} else
1196		bcopy((caddr_t)ap, (caddr_t)args, i * sizeof(register_t));
1197
1198	rval[0] = 0;
1199	rval[1] = tf->tf_r[3];
1200
1201	error = mi_syscall(p, code, callp, args, rval);
1202
1203	/*
1204	 * system call will look like:
1205	 *	 or r13, r0, <code>
1206	 *       tb0 0, r0, <128> <- sxip
1207	 *	 br err 	  <- snip
1208	 *       jmp r1 	  <- sfip
1209	 *  err: or.u r3, r0, hi16(errno)
1210	 *	 st r2, r3, lo16(errno)
1211	 *	 subu r2, r0, 1
1212	 *	 jmp r1
1213	 *
1214	 * So, when we take syscall trap, sxip/snip/sfip will be as
1215	 * shown above.
1216	 * Given this,
1217	 * 1. If the system call returned 0, need to skip nip.
1218	 *	nip = fip, fip += 4
1219	 *    (doesn't matter what fip + 4 will be but we will never
1220	 *    execute this since jmp r1 at nip will change the execution flow.)
1221	 * 2. If the system call returned an errno > 0, plug the value
1222	 *    in r2, and leave nip and fip unchanged. This will have us
1223	 *    executing "br err" on return to user space.
1224	 * 3. If the system call code returned ERESTART,
1225	 *    we need to rexecute the trap instruction. Back up the pipe
1226	 *    line.
1227	 *     fip = nip, nip = xip
1228	 * 4. If the system call returned EJUSTRETURN, don't need to adjust
1229	 *    any pointers.
1230	 */
1231
1232	switch (error) {
1233	case 0:
1234		tf->tf_r[2] = rval[0];
1235		tf->tf_r[3] = rval[1];
1236		tf->tf_epsr &= ~PSR_C;
1237		tf->tf_snip = tf->tf_sfip & ~NIP_E;
1238		tf->tf_sfip = tf->tf_snip + 4;
1239		break;
1240	case ERESTART:
1241		m88100_rewind_insn(&(tf->tf_regs));
1242		/* clear the error bit */
1243		tf->tf_sfip &= ~FIP_E;
1244		tf->tf_snip &= ~NIP_E;
1245		break;
1246	case EJUSTRETURN:
1247		break;
1248	default:
1249	bad:
1250		tf->tf_r[2] = error;
1251		tf->tf_epsr |= PSR_C;   /* fail */
1252		tf->tf_snip = tf->tf_snip & ~NIP_E;
1253		tf->tf_sfip = tf->tf_sfip & ~FIP_E;
1254		break;
1255	}
1256
1257	mi_syscall_return(p, code, error, rval);
1258}
1259#endif /* M88100 */
1260
1261#ifdef M88110
1262/* Instruction pointers operate differently on mc88110 */
1263void
1264m88110_syscall(register_t code, struct trapframe *tf)
1265{
1266	int i, nsys, nap;
1267	struct sysent *callp;
1268	struct proc *p = curproc;
1269	int error;
1270	register_t args[8], rval[2], *ap;
1271
1272	uvmexp.syscalls++;
1273
1274	callp = p->p_p->ps_emul->e_sysent;
1275	nsys  = p->p_p->ps_emul->e_nsysent;
1276
1277	p->p_md.md_tf = tf;
1278
1279	/*
1280	 * For 88k, all the arguments are passed in the registers (r2-r9),
1281	 * and further arguments (if any) on stack.
1282	 * For syscall (and __syscall), r2 (and r3) has the actual code.
1283	 * __syscall  takes a quad syscall number, so that other
1284	 * arguments are at their natural alignments.
1285	 */
1286	ap = &tf->tf_r[2];
1287	nap = 8;	/* r2-r9 */
1288
1289	switch (code) {
1290	case SYS_syscall:
1291		code = *ap++;
1292		nap--;
1293		break;
1294	case SYS___syscall:
1295		if (callp != sysent)
1296			break;
1297		code = ap[_QUAD_LOWWORD];
1298		ap += 2;
1299		nap -= 2;
1300		break;
1301	}
1302
1303	if (code < 0 || code >= nsys)
1304		callp += p->p_p->ps_emul->e_nosys;
1305	else
1306		callp += code;
1307
1308	i = callp->sy_argsize / sizeof(register_t);
1309	if (i > sizeof(args) / sizeof(register_t))
1310		panic("syscall nargs");
1311	if (i > nap) {
1312		bcopy((caddr_t)ap, (caddr_t)args, nap * sizeof(register_t));
1313		if ((error = copyin((caddr_t)tf->tf_r[31], args + nap,
1314		    (i - nap) * sizeof(register_t))))
1315			goto bad;
1316	} else
1317		bcopy((caddr_t)ap, (caddr_t)args, i * sizeof(register_t));
1318
1319	rval[0] = 0;
1320	rval[1] = tf->tf_r[3];
1321
1322	error = mi_syscall(p, code, callp, args, rval);
1323
1324	/*
1325	 * system call will look like:
1326	 *	 or r13, r0, <code>
1327	 *       tb0 0, r0, <128> <- exip
1328	 *	 br err 	  <- enip
1329	 *       jmp r1
1330	 *  err: or.u r3, r0, hi16(errno)
1331	 *	 st r2, r3, lo16(errno)
1332	 *	 subu r2, r0, 1
1333	 *	 jmp r1
1334	 *
1335	 * So, when we take syscall trap, exip/enip will be as
1336	 * shown above.
1337	 * Given this,
1338	 * 1. If the system call returned 0, need to jmp r1.
1339	 *    exip += 8
1340	 * 2. If the system call returned an errno > 0, increment
1341	 *    exip += 4 and plug the value in r2. This will have us
1342	 *    executing "br err" on return to user space.
1343	 * 3. If the system call code returned ERESTART,
1344	 *    we need to rexecute the trap instruction. leave exip as is.
1345	 * 4. If the system call returned EJUSTRETURN, just return.
1346	 *    exip += 4
1347	 */
1348
1349	switch (error) {
1350	case 0:
1351		tf->tf_r[2] = rval[0];
1352		tf->tf_r[3] = rval[1];
1353		tf->tf_epsr &= ~PSR_C;
1354		/* skip two instructions */
1355		m88110_skip_insn(tf);
1356		m88110_skip_insn(tf);
1357		break;
1358	case ERESTART:
1359		/*
1360		 * Reexecute the trap.
1361		 * exip is already at the trap instruction, so
1362		 * there is nothing to do.
1363		 */
1364		break;
1365	case EJUSTRETURN:
1366		/* skip one instruction */
1367		m88110_skip_insn(tf);
1368		break;
1369	default:
1370	bad:
1371		tf->tf_r[2] = error;
1372		tf->tf_epsr |= PSR_C;   /* fail */
1373		/* skip one instruction */
1374		m88110_skip_insn(tf);
1375		break;
1376	}
1377
1378	mi_syscall_return(p, code, error, rval);
1379}
1380#endif	/* M88110 */
1381
1382/*
1383 * Set up return-value registers as fork() libc stub expects,
1384 * and do normal return-to-user-mode stuff.
1385 */
1386void
1387child_return(arg)
1388	void *arg;
1389{
1390	struct proc *p = arg;
1391	struct trapframe *tf;
1392
1393	tf = (struct trapframe *)USER_REGS(p);
1394	tf->tf_r[2] = 0;
1395	tf->tf_r[3] = 0;
1396	tf->tf_epsr &= ~PSR_C;
1397	/* reset r26 (used by the threads library) if __tfork */
1398	if (p->p_flag & P_THREAD)
1399		tf->tf_r[26] = 0;
1400	/* skip br instruction as in syscall() */
1401#ifdef M88100
1402	if (CPU_IS88100) {
1403		tf->tf_snip = (tf->tf_sfip & XIP_ADDR) | XIP_V;
1404		tf->tf_sfip = tf->tf_snip + 4;
1405	}
1406#endif
1407#ifdef M88110
1408	if (CPU_IS88110) {
1409		/* skip two instructions */
1410		m88110_skip_insn(tf);
1411		m88110_skip_insn(tf);
1412	}
1413#endif
1414
1415	KERNEL_UNLOCK();
1416
1417	mi_child_return(p);
1418}
1419
1420#ifdef PTRACE
1421
1422/*
1423 * User Single Step Debugging Support
1424 */
1425
1426#include <sys/ptrace.h>
1427
1428vaddr_t	ss_branch_taken(u_int, vaddr_t, struct reg *);
1429int	ss_get_value(struct proc *, vaddr_t, u_int *);
1430int	ss_inst_branch_or_call(u_int);
1431int	ss_put_breakpoint(struct proc *, vaddr_t, vaddr_t *, u_int *);
1432
1433#define	SYSCALL_INSTR	0xf000d080	/* tb0 0,r0,128 */
1434
1435int
1436ss_get_value(struct proc *p, vaddr_t addr, u_int *value)
1437{
1438	struct uio uio;
1439	struct iovec iov;
1440
1441	iov.iov_base = (caddr_t)value;
1442	iov.iov_len = sizeof(u_int);
1443	uio.uio_iov = &iov;
1444	uio.uio_iovcnt = 1;
1445	uio.uio_offset = (off_t)addr;
1446	uio.uio_resid = sizeof(u_int);
1447	uio.uio_segflg = UIO_SYSSPACE;
1448	uio.uio_rw = UIO_READ;
1449	uio.uio_procp = curproc;
1450	return (process_domem(curproc, p, &uio, PT_READ_I));
1451}
1452
1453int
1454ss_put_value(struct proc *p, vaddr_t addr, u_int value)
1455{
1456	struct uio uio;
1457	struct iovec iov;
1458
1459	iov.iov_base = (caddr_t)&value;
1460	iov.iov_len = sizeof(u_int);
1461	uio.uio_iov = &iov;
1462	uio.uio_iovcnt = 1;
1463	uio.uio_offset = (off_t)addr;
1464	uio.uio_resid = sizeof(u_int);
1465	uio.uio_segflg = UIO_SYSSPACE;
1466	uio.uio_rw = UIO_WRITE;
1467	uio.uio_procp = curproc;
1468	return (process_domem(curproc, p, &uio, PT_WRITE_I));
1469}
1470
1471/*
1472 * ss_branch_taken(instruction, pc, regs)
1473 *
1474 * instruction will be a control flow instruction location at address pc.
1475 * Branch taken is supposed to return the address to which the instruction
1476 * would jump if the branch is taken.
1477 *
1478 * This is different from branch_taken() in ddb, as we also need to process
1479 * system calls.
1480 */
1481vaddr_t
1482ss_branch_taken(u_int inst, vaddr_t pc, struct reg *regs)
1483{
1484	u_int regno;
1485
1486	/*
1487	 * Quick check of the instruction. Note that we know we are only
1488	 * invoked if ss_inst_branch_or_call() returns TRUE, so we do not
1489	 * need to repeat the jpm, jsr and syscall stricter checks here.
1490	 */
1491	switch (inst >> (32 - 5)) {
1492	case 0x18:	/* br */
1493	case 0x19:	/* bsr */
1494		/* signed 26 bit pc relative displacement, shift left 2 bits */
1495		inst = (inst & 0x03ffffff) << 2;
1496		/* check if sign extension is needed */
1497		if (inst & 0x08000000)
1498			inst |= 0xf0000000;
1499		return (pc + inst);
1500
1501	case 0x1a:	/* bb0 */
1502	case 0x1b:	/* bb1 */
1503	case 0x1d:	/* bcnd */
1504		/* signed 16 bit pc relative displacement, shift left 2 bits */
1505		inst = (inst & 0x0000ffff) << 2;
1506		/* check if sign extension is needed */
1507		if (inst & 0x00020000)
1508			inst |= 0xfffc0000;
1509		return (pc + inst);
1510
1511	case 0x1e:	/* jmp or jsr */
1512		regno = inst & 0x1f;	/* get the register value */
1513		return (regno == 0 ? 0 : regs->r[regno]);
1514
1515	default:	/* system call */
1516		/*
1517		 * The regular (pc + 4) breakpoint will match the error
1518		 * return. Successful system calls return at (pc + 8),
1519		 * so we'll set up a branch breakpoint there.
1520		 */
1521		return (pc + 8);
1522	}
1523}
1524
1525int
1526ss_inst_branch_or_call(u_int ins)
1527{
1528	/* check high five bits */
1529	switch (ins >> (32 - 5)) {
1530	case 0x18: /* br */
1531	case 0x19: /* bsr */
1532	case 0x1a: /* bb0 */
1533	case 0x1b: /* bb1 */
1534	case 0x1d: /* bcnd */
1535		return (TRUE);
1536	case 0x1e: /* could be jmp or jsr */
1537		if ((ins & 0xfffff3e0) == 0xf400c000)
1538			return (TRUE);
1539	}
1540
1541	return (FALSE);
1542}
1543
1544int
1545ss_put_breakpoint(struct proc *p, vaddr_t va, vaddr_t *bpva, u_int *bpsave)
1546{
1547	int rc;
1548
1549	/* Restore previous breakpoint if we did not trigger it. */
1550	if (*bpva != 0) {
1551		ss_put_value(p, *bpva, *bpsave);
1552		*bpva = 0;
1553	}
1554
1555	/* Save instruction. */
1556	if ((rc = ss_get_value(p, va, bpsave)) != 0)
1557		return (rc);
1558
1559	/* Store breakpoint instruction at the location now. */
1560	*bpva = va;
1561	return (ss_put_value(p, va, SSBREAKPOINT));
1562}
1563
1564int
1565process_sstep(struct proc *p, int sstep)
1566{
1567	struct reg *sstf = USER_REGS(p);
1568	vaddr_t pc, brpc;
1569	u_int32_t instr;
1570	int rc;
1571
1572	if (sstep == 0) {
1573		/* Restore previous breakpoints if any. */
1574		if (p->p_md.md_bp0va != 0) {
1575			ss_put_value(p, p->p_md.md_bp0va, p->p_md.md_bp0save);
1576			p->p_md.md_bp0va = 0;
1577		}
1578		if (p->p_md.md_bp1va != 0) {
1579			ss_put_value(p, p->p_md.md_bp1va, p->p_md.md_bp1save);
1580			p->p_md.md_bp1va = 0;
1581		}
1582
1583		return (0);
1584	}
1585
1586	/*
1587	 * User was stopped at pc, e.g. the instruction at pc was not executed.
1588	 * Fetch what's at the current location.
1589	 */
1590	pc = PC_REGS(sstf);
1591	if ((rc = ss_get_value(p, pc, &instr)) != 0)
1592		return (rc);
1593
1594	/*
1595	 * Find if this instruction may cause a branch, and set up a breakpoint
1596	 * at the branch location.
1597	 */
1598	if (ss_inst_branch_or_call(instr) || instr == SYSCALL_INSTR) {
1599		brpc = ss_branch_taken(instr, pc, sstf);
1600
1601		/* self-branches are hopeless */
1602		if (brpc != pc && brpc != 0) {
1603			if ((rc = ss_put_breakpoint(p, brpc,
1604			    &p->p_md.md_bp1va, &p->p_md.md_bp1save)) != 0)
1605				return (rc);
1606		}
1607	}
1608
1609	if ((rc = ss_put_breakpoint(p, pc + 4,
1610	    &p->p_md.md_bp0va, &p->p_md.md_bp0save)) != 0)
1611		return (rc);
1612
1613	return (0);
1614}
1615
1616#endif	/* PTRACE */
1617
1618#ifdef DIAGNOSTIC
1619void
1620splassert_check(int wantipl, const char *func)
1621{
1622	int oldipl;
1623
1624	oldipl = getipl();
1625
1626	if (oldipl < wantipl) {
1627		splassert_fail(wantipl, oldipl, func);
1628		/*
1629		 * This will raise the spl,
1630		 * in a feeble attempt to reduce further damage.
1631		 */
1632		(void)raiseipl(wantipl);
1633	}
1634}
1635#endif
1636
1637/*
1638 * ld.d and st.d instructions referencing long aligned but not long long
1639 * aligned addresses will trigger a misaligned address exception.
1640 *
1641 * This routine attempts to recover these (valid) statements, by simulating
1642 * the split form of the instruction. If it fails, it returns the appropriate
1643 * signal number to deliver.
1644 *
1645 * Note that we do not attempt to do anything for .d.usr instructions - the
1646 * kernel never issues such instructions, and they cause a privileged
1647 * instruction exception from userland.
1648 */
1649int
1650double_reg_fixup(struct trapframe *frame, int fault)
1651{
1652	u_int32_t pc, instr, value;
1653	int regno, store;
1654	vaddr_t addr;
1655
1656	/*
1657	 * Decode the faulting instruction.
1658	 */
1659
1660	pc = PC_REGS(&frame->tf_regs);
1661	if (copyin((void *)pc, &instr, sizeof(u_int32_t)) != 0)
1662		return SIGSEGV;
1663
1664	switch (instr & 0xfc00ff00) {
1665	case 0xf4001000:	/* ld.d rD, rS1, rS2 */
1666		addr = frame->tf_r[(instr >> 16) & 0x1f]
1667		    + frame->tf_r[(instr & 0x1f)];
1668		store = 0;
1669		break;
1670	case 0xf4002000:	/* st.d rD, rS1, rS2 */
1671		addr = frame->tf_r[(instr >> 16) & 0x1f]
1672		    + frame->tf_r[(instr & 0x1f)];
1673		store = 1;
1674		break;
1675	default:
1676		switch (instr >> 26) {
1677		case 0x10000000 >> 26:	/* ld.d rD, rS, imm16 */
1678			addr = (instr & 0x0000ffff) +
1679			    frame->tf_r[(instr >> 16) & 0x1f];
1680			store = 0;
1681			break;
1682		case 0x20000000 >> 26:	/* st.d rD, rS, imm16 */
1683			addr = (instr & 0x0000ffff) +
1684			    frame->tf_r[(instr >> 16) & 0x1f];
1685			store = 1;
1686			break;
1687		default:
1688			return SIGBUS;
1689		}
1690		break;
1691	}
1692
1693	regno = (instr >> 21) & 0x1f;
1694
1695	switch (fault) {
1696	case T_MISALGNFLT:
1697		/* We only handle long but not long long aligned access here */
1698		if ((addr & 0x07) != 4)
1699			return SIGBUS;
1700		break;
1701	case T_ILLFLT:
1702		/* We only handle odd register pair number here */
1703		if ((regno & 0x01) == 0)
1704			return SIGILL;
1705		/* We only handle long aligned access here */
1706		if ((addr & 0x03) != 0)
1707			return SIGBUS;
1708		break;
1709	}
1710
1711	if (store) {
1712		/*
1713		 * Two word stores.
1714		 */
1715		if (regno == 0)
1716			value = 0;
1717		else
1718			value = frame->tf_r[regno];
1719		if (copyout(&value, (void *)addr, sizeof(u_int32_t)) != 0)
1720			return SIGSEGV;
1721		if (regno == 31)
1722			value = 0;
1723		else
1724			value = frame->tf_r[regno + 1];
1725		if (copyout(&value, (void *)(addr + 4), sizeof(u_int32_t)) != 0)
1726			return SIGSEGV;
1727	} else {
1728		/*
1729		 * Two word loads. r0 should be left unaltered, but the
1730		 * value should still be fetched even if it is discarded.
1731		 */
1732		if (copyin((void *)addr, &value, sizeof(u_int32_t)) != 0)
1733			return SIGSEGV;
1734		if (regno != 0)
1735			frame->tf_r[regno] = value;
1736		if (copyin((void *)(addr + 4), &value, sizeof(u_int32_t)) != 0)
1737			return SIGSEGV;
1738		if (regno != 31)
1739			frame->tf_r[regno + 1] = value;
1740	}
1741
1742	return 0;
1743}
1744
1745void
1746cache_flush(struct trapframe *tf)
1747{
1748	struct proc *p = curproc;
1749	struct pmap *pmap;
1750	paddr_t pa;
1751	vaddr_t va;
1752	vsize_t len, count;
1753
1754	p->p_md.md_tf = tf;
1755
1756	pmap = vm_map_pmap(&p->p_vmspace->vm_map);
1757	va = tf->tf_r[2];
1758	len = tf->tf_r[3];
1759
1760	if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS ||
1761	    va + len <= va || va + len >= VM_MAXUSER_ADDRESS)
1762		len = 0;
1763
1764	while (len != 0) {
1765		count = min(len, PAGE_SIZE - (va & PAGE_MASK));
1766		if (pmap_extract(pmap, va, &pa) != FALSE)
1767			dma_cachectl(pa, count, DMA_CACHE_SYNC);
1768		va += count;
1769		len -= count;
1770	}
1771
1772#ifdef M88100
1773	if (CPU_IS88100) {
1774		/* clear the error bit */
1775		tf->tf_sfip &= ~FIP_E;
1776		tf->tf_snip &= ~NIP_E;
1777	}
1778#endif
1779#ifdef M88110
1780	if (CPU_IS88110) {
1781		/* skip instruction */
1782		m88110_skip_insn(tf);
1783	}
1784#endif
1785
1786	userret(p);
1787}
1788