trap.c revision 1.33
1/*	$OpenBSD: trap.c,v 1.33 2006/05/08 14:36:09 miod Exp $	*/
2/*
3 * Copyright (c) 2004, Miodrag Vallat.
4 * Copyright (c) 1998 Steve Murphree, Jr.
5 * Copyright (c) 1996 Nivas Madhur
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *      This product includes software developed by Nivas Madhur.
19 * 4. The name of the author may not be used to endorse or promote products
20 *    derived from this software without specific prior written permission
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34/*
35 * Mach Operating System
36 * Copyright (c) 1991 Carnegie Mellon University
37 * Copyright (c) 1991 OMRON Corporation
38 * All Rights Reserved.
39 *
40 * Permission to use, copy, modify and distribute this software and its
41 * documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 */
47
48#include <sys/types.h>
49#include <sys/param.h>
50#include <sys/proc.h>
51#include <sys/signalvar.h>
52#include <sys/user.h>
53#include <sys/syscall.h>
54#include <sys/systm.h>
55#include <sys/ktrace.h>
56
57#include "systrace.h"
58#include <dev/systrace.h>
59
60#include <uvm/uvm_extern.h>
61
62#include <machine/asm_macro.h>   /* enable/disable interrupts */
63#include <machine/cmmu.h>
64#include <machine/cpu.h>
65#ifdef M88100
66#include <machine/m88100.h>		/* DMT_xxx */
67#include <machine/m8820x.h>		/* CMMU_PFSR_xxx */
68#endif
69#ifdef M88110
70#include <machine/m88110.h>
71#endif
72#include <machine/pcb.h>		/* FIP_E, etc. */
73#include <machine/psl.h>		/* FIP_E, etc. */
74#include <machine/trap.h>
75
76#include <machine/db_machdep.h>
77
78#define SSBREAKPOINT (0xF000D1F8U) /* Single Step Breakpoint */
79
80#define USERMODE(PSR)   (((PSR) & PSR_MODE) == 0)
81#define SYSTEMMODE(PSR) (((PSR) & PSR_MODE) != 0)
82
83__dead void panictrap(int, struct trapframe *);
84__dead void error_fatal(struct trapframe *);
85int double_reg_fixup(struct trapframe *);
86int ss_put_value(struct proc *, vaddr_t, u_int);
87
88extern void regdump(struct trapframe *f);
89
90const char *trap_type[] = {
91	"Reset",
92	"Interrupt Exception",
93	"Instruction Access",
94	"Data Access Exception",
95	"Misaligned Access",
96	"Unimplemented Opcode",
97	"Privilege Violation"
98	"Bounds Check Violation",
99	"Illegal Integer Divide",
100	"Integer Overflow",
101	"Error Exception",
102	"Non-Maskable Exception",
103};
104
105const int trap_types = sizeof trap_type / sizeof trap_type[0];
106
107#ifdef M88100
108const char *pbus_exception_type[] = {
109	"Success (No Fault)",
110	"unknown 1",
111	"unknown 2",
112	"Bus Error",
113	"Segment Fault",
114	"Page Fault",
115	"Supervisor Violation",
116	"Write Violation",
117};
118#endif
119
120static inline void
121userret(struct proc *p, struct trapframe *frame, u_quad_t oticks)
122{
123	int sig;
124	struct cpu_info *cpu = curcpu();
125
126	/* take pending signals */
127	while ((sig = CURSIG(p)) != 0)
128		postsig(sig);
129	p->p_priority = p->p_usrpri;
130
131	if (cpu->ci_want_resched) {
132		/*
133		 * We're being preempted.
134		 */
135		preempt(NULL);
136		while ((sig = CURSIG(p)) != 0)
137			postsig(sig);
138	}
139
140	/*
141	 * If profiling, charge recent system time to the trapped pc.
142	 */
143	if (p->p_flag & P_PROFIL) {
144		extern int psratio;
145
146		addupc_task(p, frame->tf_sxip & XIP_ADDR,
147		    (int)(p->p_sticks - oticks) * psratio);
148	}
149	cpu->ci_schedstate.spc_curpriority = p->p_priority;
150}
151
152__dead void
153panictrap(int type, struct trapframe *frame)
154{
155	static int panicing = 0;
156
157	if (panicing++ == 0) {
158#ifdef M88100
159		if (CPU_IS88100) {
160			if (type == 2) {
161				/* instruction exception */
162				printf("\nInstr access fault (%s) v = %x, "
163				    "frame %p\n",
164				    pbus_exception_type[
165				      CMMU_PFSR_FAULT(frame->tf_ipfsr)],
166				    frame->tf_sxip & XIP_ADDR, frame);
167			} else if (type == 3) {
168				/* data access exception */
169				printf("\nData access fault (%s) v = %x, "
170				    "frame %p\n",
171				    pbus_exception_type[
172				      CMMU_PFSR_FAULT(frame->tf_dpfsr)],
173				    frame->tf_sxip & XIP_ADDR, frame);
174			} else
175				printf("\nTrap type %d, v = %x, frame %p\n",
176				    type, frame->tf_sxip & XIP_ADDR, frame);
177		}
178#endif
179#ifdef M88110
180		if (CPU_IS88110) {
181			printf("\nTrap type %d, v = %x, frame %p\n",
182			    type, frame->tf_exip, frame);
183		}
184#endif
185#ifdef DDB
186		regdump(frame);
187#endif
188	}
189	if ((u_int)type < trap_types)
190		panic(trap_type[type]);
191	else
192		panic("trap %d", type);
193	/*NOTREACHED*/
194}
195
196#ifdef M88100
197void
198m88100_trap(unsigned type, struct trapframe *frame)
199{
200	struct proc *p;
201	u_quad_t sticks = 0;
202	struct vm_map *map;
203	vaddr_t va, pcb_onfault;
204	vm_prot_t ftype;
205	int fault_type, pbus_type;
206	u_long fault_code;
207	unsigned fault_addr;
208	struct vmspace *vm;
209	union sigval sv;
210	int result;
211#ifdef DDB
212	int s;
213	u_int psr;
214#endif
215	int sig = 0;
216
217	extern struct vm_map *kernel_map;
218
219	uvmexp.traps++;
220	if ((p = curproc) == NULL)
221		p = &proc0;
222
223	if (USERMODE(frame->tf_epsr)) {
224		sticks = p->p_sticks;
225		type += T_USER;
226		p->p_md.md_tf = frame;	/* for ptrace/signals */
227	}
228	fault_type = 0;
229	fault_code = 0;
230	fault_addr = frame->tf_sxip & XIP_ADDR;
231
232	switch (type) {
233	default:
234		panictrap(frame->tf_vector, frame);
235		break;
236		/*NOTREACHED*/
237
238#if defined(DDB)
239	case T_KDB_BREAK:
240		s = splhigh();
241		set_psr((psr = get_psr()) & ~PSR_IND);
242		ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame);
243		set_psr(psr);
244		splx(s);
245		return;
246	case T_KDB_ENTRY:
247		s = splhigh();
248		set_psr((psr = get_psr()) & ~PSR_IND);
249		ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame);
250		set_psr(psr);
251		splx(s);
252		return;
253#endif /* DDB */
254	case T_ILLFLT:
255		printf("Unimplemented opcode!\n");
256		panictrap(frame->tf_vector, frame);
257		break;
258	case T_INT:
259	case T_INT+T_USER:
260		curcpu()->ci_intrdepth++;
261		md_interrupt_func(T_INT, frame);
262		curcpu()->ci_intrdepth--;
263		return;
264
265	case T_MISALGNFLT:
266		printf("kernel misaligned access exception @ 0x%08x\n",
267		    frame->tf_sxip);
268		panictrap(frame->tf_vector, frame);
269		break;
270
271	case T_INSTFLT:
272		/* kernel mode instruction access fault.
273		 * Should never, never happen for a non-paged kernel.
274		 */
275#ifdef TRAPDEBUG
276		pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr);
277		printf("Kernel Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
278		    pbus_type, pbus_exception_type[pbus_type],
279		    fault_addr, frame, frame->tf_cpu);
280#endif
281		panictrap(frame->tf_vector, frame);
282		break;
283
284	case T_DATAFLT:
285		/* kernel mode data fault */
286
287		/* data fault on the user address? */
288		if ((frame->tf_dmt0 & DMT_DAS) == 0) {
289			type = T_DATAFLT + T_USER;
290			goto user_fault;
291		}
292
293		fault_addr = frame->tf_dma0;
294		if (frame->tf_dmt0 & (DMT_WRITE|DMT_LOCKBAR)) {
295			ftype = VM_PROT_READ|VM_PROT_WRITE;
296			fault_code = VM_PROT_WRITE;
297		} else {
298			ftype = VM_PROT_READ;
299			fault_code = VM_PROT_READ;
300		}
301
302		va = trunc_page((vaddr_t)fault_addr);
303		if (va == 0) {
304			panic("trap: bad kernel access at %x", fault_addr);
305		}
306
307		KERNEL_LOCK(LK_CANRECURSE | LK_EXCLUSIVE);
308		vm = p->p_vmspace;
309		map = kernel_map;
310
311		pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr);
312#ifdef TRAPDEBUG
313		printf("Kernel Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
314		    pbus_type, pbus_exception_type[pbus_type],
315		    fault_addr, frame, frame->tf_cpu);
316#endif
317
318		switch (pbus_type) {
319		case CMMU_PFSR_SUCCESS:
320			/*
321			 * The fault was resolved. Call data_access_emulation
322			 * to drain the data unit pipe line and reset dmt0
323			 * so that trap won't get called again.
324			 */
325			data_access_emulation((unsigned *)frame);
326			frame->tf_dpfsr = 0;
327			frame->tf_dmt0 = 0;
328			KERNEL_UNLOCK();
329			return;
330		case CMMU_PFSR_SFAULT:
331		case CMMU_PFSR_PFAULT:
332			if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
333				p->p_addr->u_pcb.pcb_onfault = 0;
334			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
335			p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
336			if (result == 0) {
337				/*
338				 * We could resolve the fault. Call
339				 * data_access_emulation to drain the data
340				 * unit pipe line and reset dmt0 so that trap
341				 * won't get called again.
342				 */
343				data_access_emulation((unsigned *)frame);
344				frame->tf_dpfsr = 0;
345				frame->tf_dmt0 = 0;
346				KERNEL_UNLOCK();
347				return;
348			}
349			break;
350		}
351#ifdef TRAPDEBUG
352		printf("PBUS Fault %d (%s) va = 0x%x\n", pbus_type,
353		    pbus_exception_type[pbus_type], va);
354#endif
355		KERNEL_UNLOCK();
356		panictrap(frame->tf_vector, frame);
357		/* NOTREACHED */
358	case T_INSTFLT+T_USER:
359		/* User mode instruction access fault */
360		/* FALLTHROUGH */
361	case T_DATAFLT+T_USER:
362user_fault:
363		if (type == T_INSTFLT + T_USER) {
364			pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr);
365#ifdef TRAPDEBUG
366			printf("User Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
367			    pbus_type, pbus_exception_type[pbus_type],
368			    fault_addr, frame, frame->tf_cpu);
369#endif
370		} else {
371			fault_addr = frame->tf_dma0;
372			pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr);
373#ifdef TRAPDEBUG
374			printf("User Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
375			    pbus_type, pbus_exception_type[pbus_type],
376			    fault_addr, frame, frame->tf_cpu);
377#endif
378		}
379
380		if (frame->tf_dmt0 & (DMT_WRITE | DMT_LOCKBAR)) {
381			ftype = VM_PROT_READ | VM_PROT_WRITE;
382			fault_code = VM_PROT_WRITE;
383		} else {
384			ftype = VM_PROT_READ;
385			fault_code = VM_PROT_READ;
386		}
387
388		va = trunc_page((vaddr_t)fault_addr);
389
390		KERNEL_PROC_LOCK(p);
391		vm = p->p_vmspace;
392		map = &vm->vm_map;
393		if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
394			p->p_addr->u_pcb.pcb_onfault = 0;
395
396		/* Call uvm_fault() to resolve non-bus error faults */
397		switch (pbus_type) {
398		case CMMU_PFSR_SUCCESS:
399			result = 0;
400			break;
401		case CMMU_PFSR_BERROR:
402			result = EACCES;
403			break;
404		default:
405			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
406			break;
407		}
408
409		p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
410
411		if ((caddr_t)va >= vm->vm_maxsaddr) {
412			if (result == 0)
413				uvm_grow(p, va);
414			else if (result == EACCES)
415				result = EFAULT;
416		}
417		KERNEL_PROC_UNLOCK(p);
418
419		/*
420		 * This could be a fault caused in copyin*()
421		 * while accessing user space.
422		 */
423		if (result != 0 && pcb_onfault != 0) {
424			frame->tf_snip = pcb_onfault | NIP_V;
425			frame->tf_sfip = (pcb_onfault + 4) | FIP_V;
426			frame->tf_sxip = 0;
427			/*
428			 * Continue as if the fault had been resolved, but
429			 * do not try to complete the faulting access.
430			 */
431			frame->tf_dmt0 |= DMT_SKIP;
432			result = 0;
433		}
434
435		if (result == 0) {
436			if (type == T_DATAFLT+T_USER) {
437				/*
438			 	 * We could resolve the fault. Call
439			 	 * data_access_emulation to drain the data unit
440			 	 * pipe line and reset dmt0 so that trap won't
441			 	 * get called again.
442			 	 */
443				data_access_emulation((unsigned *)frame);
444				frame->tf_dpfsr = 0;
445				frame->tf_dmt0 = 0;
446			} else {
447				/*
448				 * back up SXIP, SNIP,
449				 * clearing the Error bit
450				 */
451				frame->tf_sfip = frame->tf_snip & ~FIP_E;
452				frame->tf_snip = frame->tf_sxip & ~NIP_E;
453				frame->tf_ipfsr = 0;
454			}
455		} else {
456			sig = result == EACCES ? SIGBUS : SIGSEGV;
457			fault_type = result == EACCES ?
458			    BUS_ADRERR : SEGV_MAPERR;
459		}
460		break;
461	case T_MISALGNFLT+T_USER:
462		/* Fix any misaligned ld.d or st.d instructions */
463		sig = double_reg_fixup(frame);
464		fault_type = BUS_ADRALN;
465		break;
466	case T_PRIVINFLT+T_USER:
467	case T_ILLFLT+T_USER:
468#ifndef DDB
469	case T_KDB_BREAK:
470	case T_KDB_ENTRY:
471#endif
472	case T_KDB_BREAK+T_USER:
473	case T_KDB_ENTRY+T_USER:
474	case T_KDB_TRACE:
475	case T_KDB_TRACE+T_USER:
476		sig = SIGILL;
477		break;
478	case T_BNDFLT+T_USER:
479		sig = SIGFPE;
480		break;
481	case T_ZERODIV+T_USER:
482		sig = SIGFPE;
483		fault_type = FPE_INTDIV;
484		break;
485	case T_OVFFLT+T_USER:
486		sig = SIGFPE;
487		fault_type = FPE_INTOVF;
488		break;
489	case T_FPEPFLT+T_USER:
490		sig = SIGFPE;
491		break;
492	case T_SIGSYS+T_USER:
493		sig = SIGSYS;
494		break;
495	case T_STEPBPT+T_USER:
496#ifdef PTRACE
497		/*
498		 * This trap is used by the kernel to support single-step
499		 * debugging (although any user could generate this trap
500		 * which should probably be handled differently). When a
501		 * process is continued by a debugger with the PT_STEP
502		 * function of ptrace (single step), the kernel inserts
503		 * one or two breakpoints in the user process so that only
504		 * one instruction (or two in the case of a delayed branch)
505		 * is executed.  When this breakpoint is hit, we get the
506		 * T_STEPBPT trap.
507		 */
508		{
509			u_int instr;
510			vaddr_t pc = PC_REGS(&frame->tf_regs);
511
512			/* read break instruction */
513			copyin((caddr_t)pc, &instr, sizeof(u_int));
514
515			/* check and see if we got here by accident */
516			if ((p->p_md.md_bp0va != pc &&
517			     p->p_md.md_bp1va != pc) ||
518			    instr != SSBREAKPOINT) {
519				sig = SIGTRAP;
520				fault_type = TRAP_TRACE;
521				break;
522			}
523
524			/* restore original instruction and clear breakpoint */
525			if (p->p_md.md_bp0va == pc) {
526				ss_put_value(p, pc, p->p_md.md_bp0save);
527				p->p_md.md_bp0va = 0;
528			}
529			if (p->p_md.md_bp1va == pc) {
530				ss_put_value(p, pc, p->p_md.md_bp1save);
531				p->p_md.md_bp1va = 0;
532			}
533
534#if 1
535			frame->tf_sfip = frame->tf_snip;
536			frame->tf_snip = pc | NIP_V;
537#endif
538			sig = SIGTRAP;
539			fault_type = TRAP_BRKPT;
540		}
541#else
542		sig = SIGTRAP;
543		fault_type = TRAP_TRACE;
544#endif
545		break;
546
547	case T_USERBPT+T_USER:
548		/*
549		 * This trap is meant to be used by debuggers to implement
550		 * breakpoint debugging.  When we get this trap, we just
551		 * return a signal which gets caught by the debugger.
552		 */
553		frame->tf_sfip = frame->tf_snip;
554		frame->tf_snip = frame->tf_sxip;
555		sig = SIGTRAP;
556		fault_type = TRAP_BRKPT;
557		break;
558
559	case T_ASTFLT+T_USER:
560		uvmexp.softs++;
561		p->p_md.md_astpending = 0;
562		if (p->p_flag & P_OWEUPC) {
563			p->p_flag &= ~P_OWEUPC;
564			KERNEL_PROC_LOCK(p);
565			ADDUPROF(p);
566			KERNEL_PROC_UNLOCK(p);
567		}
568		break;
569	}
570
571	/*
572	 * If trap from supervisor mode, just return
573	 */
574	if (type < T_USER)
575		return;
576
577	if (sig) {
578		sv.sival_int = fault_addr;
579		KERNEL_PROC_LOCK(p);
580		trapsignal(p, sig, fault_code, fault_type, sv);
581		KERNEL_PROC_UNLOCK(p);
582		/*
583		 * don't want multiple faults - we are going to
584		 * deliver signal.
585		 */
586		frame->tf_dmt0 = 0;
587		frame->tf_ipfsr = frame->tf_dpfsr = 0;
588	}
589
590	userret(p, frame, sticks);
591}
592#endif /* M88100 */
593
594#ifdef M88110
595void
596m88110_trap(unsigned type, struct trapframe *frame)
597{
598	struct proc *p;
599	u_quad_t sticks = 0;
600	struct vm_map *map;
601	vaddr_t va, pcb_onfault;
602	vm_prot_t ftype;
603	int fault_type;
604	u_long fault_code;
605	unsigned fault_addr;
606	struct vmspace *vm;
607	union sigval sv;
608	int result;
609#ifdef DDB
610        int s;
611	u_int psr;
612#endif
613	int sig = 0;
614	pt_entry_t *pte;
615
616	extern struct vm_map *kernel_map;
617	extern pt_entry_t *pmap_pte(pmap_t, vaddr_t);
618
619	uvmexp.traps++;
620	if ((p = curproc) == NULL)
621		p = &proc0;
622
623	if (USERMODE(frame->tf_epsr)) {
624		sticks = p->p_sticks;
625		type += T_USER;
626		p->p_md.md_tf = frame;	/* for ptrace/signals */
627	}
628	fault_type = 0;
629	fault_code = 0;
630	fault_addr = frame->tf_exip & XIP_ADDR;
631
632	switch (type) {
633	default:
634		panictrap(frame->tf_vector, frame);
635		break;
636		/*NOTREACHED*/
637
638	case T_110_DRM+T_USER:
639	case T_110_DRM:
640#ifdef DEBUG
641		printf("DMMU read miss: Hardware Table Searches should be enabled!\n");
642#endif
643		panictrap(frame->tf_vector, frame);
644		break;
645		/*NOTREACHED*/
646	case T_110_DWM+T_USER:
647	case T_110_DWM:
648#ifdef DEBUG
649		printf("DMMU write miss: Hardware Table Searches should be enabled!\n");
650#endif
651		panictrap(frame->tf_vector, frame);
652		break;
653		/*NOTREACHED*/
654	case T_110_IAM+T_USER:
655	case T_110_IAM:
656#ifdef DEBUG
657		printf("IMMU miss: Hardware Table Searches should be enabled!\n");
658#endif
659		panictrap(frame->tf_vector, frame);
660		break;
661		/*NOTREACHED*/
662
663#ifdef DDB
664	case T_KDB_TRACE:
665		s = splhigh();
666		set_psr((psr = get_psr()) & ~PSR_IND);
667		ddb_break_trap(T_KDB_TRACE, (db_regs_t*)frame);
668		set_psr(psr);
669		splx(s);
670		return;
671	case T_KDB_BREAK:
672		s = splhigh();
673		set_psr((psr = get_psr()) & ~PSR_IND);
674		ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame);
675		set_psr(psr);
676		splx(s);
677		return;
678	case T_KDB_ENTRY:
679		s = splhigh();
680		set_psr((psr = get_psr()) & ~PSR_IND);
681		ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame);
682		set_psr(psr);
683		/* skip one instruction */
684		if (frame->tf_exip & 1)
685			frame->tf_exip = frame->tf_enip;
686		else
687			frame->tf_exip += 4;
688		splx(s);
689		return;
690#if 0
691	case T_ILLFLT:
692		s = splhigh();
693		set_psr((psr = get_psr()) & ~PSR_IND);
694		ddb_error_trap(type == T_ILLFLT ? "unimplemented opcode" :
695		       "error fault", (db_regs_t*)frame);
696		set_psr(psr);
697		splx(s);
698		return;
699#endif /* 0 */
700#endif /* DDB */
701	case T_ILLFLT:
702		printf("Unimplemented opcode!\n");
703		panictrap(frame->tf_vector, frame);
704		break;
705	case T_NON_MASK:
706	case T_NON_MASK+T_USER:
707		curcpu()->ci_intrdepth++;
708		md_interrupt_func(T_NON_MASK, frame);
709		curcpu()->ci_intrdepth--;
710		return;
711	case T_INT:
712	case T_INT+T_USER:
713		curcpu()->ci_intrdepth++;
714		md_interrupt_func(T_INT, frame);
715		curcpu()->ci_intrdepth--;
716		return;
717	case T_MISALGNFLT:
718		printf("kernel mode misaligned access exception @ 0x%08x\n",
719		    frame->tf_exip);
720		panictrap(frame->tf_vector, frame);
721		break;
722		/*NOTREACHED*/
723
724	case T_INSTFLT:
725		/* kernel mode instruction access fault.
726		 * Should never, never happen for a non-paged kernel.
727		 */
728#ifdef TRAPDEBUG
729		printf("Kernel Instruction fault exip %x isr %x ilar %x\n",
730		    frame->tf_exip, frame->tf_isr, frame->tf_ilar);
731#endif
732		panictrap(frame->tf_vector, frame);
733		break;
734		/*NOTREACHED*/
735
736	case T_DATAFLT:
737		/* kernel mode data fault */
738
739		/* data fault on the user address? */
740		if ((frame->tf_dsr & CMMU_DSR_SU) == 0) {
741			type = T_DATAFLT + T_USER;
742			goto m88110_user_fault;
743		}
744
745#ifdef TRAPDEBUG
746		printf("Kernel Data access fault exip %x dsr %x dlar %x\n",
747		    frame->tf_exip, frame->tf_dsr, frame->tf_dlar);
748#endif
749
750		fault_addr = frame->tf_dlar;
751		if (frame->tf_dsr & CMMU_DSR_RW) {
752			ftype = VM_PROT_READ;
753			fault_code = VM_PROT_READ;
754		} else {
755			ftype = VM_PROT_READ|VM_PROT_WRITE;
756			fault_code = VM_PROT_WRITE;
757		}
758
759		va = trunc_page((vaddr_t)fault_addr);
760		if (va == 0) {
761			panic("trap: bad kernel access at %x", fault_addr);
762		}
763
764		KERNEL_LOCK(LK_CANRECURSE | LK_EXCLUSIVE);
765		vm = p->p_vmspace;
766		map = kernel_map;
767
768		if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) {
769			frame->tf_dsr &= ~CMMU_DSR_WE;	/* undefined */
770			/*
771			 * On a segment or a page fault, call uvm_fault() to
772			 * resolve the fault.
773			 */
774			if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
775				p->p_addr->u_pcb.pcb_onfault = 0;
776			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
777			p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
778			if (result == 0) {
779				KERNEL_UNLOCK();
780				return;
781			}
782		}
783		if (frame->tf_dsr & CMMU_DSR_WE) {	/* write fault  */
784			/*
785			 * This could be a write protection fault or an
786			 * exception to set the used and modified bits
787			 * in the pte. Basically, if we got a write error,
788			 * then we already have a pte entry that faulted
789			 * in from a previous seg fault or page fault.
790			 * Get the pte and check the status of the
791			 * modified and valid bits to determine if this
792			 * indeed a real write fault.  XXX smurph
793			 */
794			pte = pmap_pte(map->pmap, va);
795#ifdef DEBUG
796			if (pte == NULL) {
797				KERNEL_UNLOCK();
798				panic("NULL pte on write fault??");
799			}
800#endif
801			if (!(*pte & PG_M) && !(*pte & PG_RO)) {
802				/* Set modified bit and try the write again. */
803#ifdef TRAPDEBUG
804				printf("Corrected kernel write fault, map %x pte %x\n",
805				    map->pmap, *pte);
806#endif
807				*pte |= PG_M;
808				KERNEL_UNLOCK();
809				return;
810#if 1	/* shouldn't happen */
811			} else {
812				/* must be a real wp fault */
813#ifdef TRAPDEBUG
814				printf("Uncorrected kernel write fault, map %x pte %x\n",
815				    map->pmap, *pte);
816#endif
817				if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
818					p->p_addr->u_pcb.pcb_onfault = 0;
819				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
820				p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
821				if (result == 0) {
822					KERNEL_UNLOCK();
823					return;
824				}
825#endif
826			}
827		}
828		KERNEL_UNLOCK();
829		panictrap(frame->tf_vector, frame);
830		/* NOTREACHED */
831	case T_INSTFLT+T_USER:
832		/* User mode instruction access fault */
833		/* FALLTHROUGH */
834	case T_DATAFLT+T_USER:
835m88110_user_fault:
836		if (type == T_INSTFLT+T_USER) {
837			ftype = VM_PROT_READ;
838			fault_code = VM_PROT_READ;
839#ifdef TRAPDEBUG
840			printf("User Instruction fault exip %x isr %x ilar %x\n",
841			    frame->tf_exip, frame->tf_isr, frame->tf_ilar);
842#endif
843		} else {
844			fault_addr = frame->tf_dlar;
845			if (frame->tf_dsr & CMMU_DSR_RW) {
846				ftype = VM_PROT_READ;
847				fault_code = VM_PROT_READ;
848			} else {
849				ftype = VM_PROT_READ|VM_PROT_WRITE;
850				fault_code = VM_PROT_WRITE;
851			}
852#ifdef TRAPDEBUG
853			printf("User Data access fault exip %x dsr %x dlar %x\n",
854			    frame->tf_exip, frame->tf_dsr, frame->tf_dlar);
855#endif
856		}
857
858		va = trunc_page((vaddr_t)fault_addr);
859
860		KERNEL_PROC_LOCK(p);
861		vm = p->p_vmspace;
862		map = &vm->vm_map;
863		if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
864			p->p_addr->u_pcb.pcb_onfault = 0;
865
866		/*
867		 * Call uvm_fault() to resolve non-bus error faults
868		 * whenever possible.
869		 */
870		if (type == T_DATAFLT+T_USER) {
871			/* data faults */
872			if (frame->tf_dsr & CMMU_DSR_BE) {
873				/* bus error */
874				result = EACCES;
875			} else
876			if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) {
877				/* segment or page fault */
878				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
879				p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
880			} else
881			if (frame->tf_dsr & (CMMU_DSR_CP | CMMU_DSR_WA)) {
882				/* copyback or write allocate error */
883				result = EACCES;
884			} else
885			if (frame->tf_dsr & CMMU_DSR_WE) {
886				/* write fault  */
887				/* This could be a write protection fault or an
888				 * exception to set the used and modified bits
889				 * in the pte. Basically, if we got a write
890				 * error, then we already have a pte entry that
891				 * faulted in from a previous seg fault or page
892				 * fault.
893				 * Get the pte and check the status of the
894				 * modified and valid bits to determine if this
895				 * indeed a real write fault.  XXX smurph
896				 */
897				pte = pmap_pte(vm_map_pmap(map), va);
898#ifdef DEBUG
899				if (pte == NULL) {
900					KERNEL_PROC_UNLOCK(p);
901					panic("NULL pte on write fault??");
902				}
903#endif
904				if (!(*pte & PG_M) && !(*pte & PG_RO)) {
905					/*
906					 * Set modified bit and try the
907					 * write again.
908					 */
909#ifdef TRAPDEBUG
910					printf("Corrected userland write fault, map %x pte %x\n",
911					    map->pmap, *pte);
912#endif
913					*pte |= PG_M;
914					/*
915					 * invalidate ATCs to force
916					 * table search
917					 */
918					set_dcmd(CMMU_DCMD_INV_UATC);
919					KERNEL_PROC_UNLOCK(p);
920					return;
921				} else {
922					/* must be a real wp fault */
923#ifdef TRAPDEBUG
924					printf("Uncorrected userland write fault, map %x pte %x\n",
925					    map->pmap, *pte);
926#endif
927					result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
928					p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
929				}
930			} else {
931#ifdef TRAPDEBUG
932				printf("Unexpected Data access fault dsr %x\n",
933				    frame->tf_dsr);
934#endif
935				KERNEL_PROC_UNLOCK(p);
936				panictrap(frame->tf_vector, frame);
937			}
938		} else {
939			/* instruction faults */
940			if (frame->tf_isr &
941			    (CMMU_ISR_BE | CMMU_ISR_SP | CMMU_ISR_TBE)) {
942				/* bus error, supervisor protection */
943				result = EACCES;
944			} else
945			if (frame->tf_isr & (CMMU_ISR_SI | CMMU_ISR_PI)) {
946				/* segment or page fault */
947				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
948				p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
949			} else {
950#ifdef TRAPDEBUG
951				printf("Unexpected Instruction fault isr %x\n",
952				    frame->tf_isr);
953#endif
954				KERNEL_PROC_UNLOCK(p);
955				panictrap(frame->tf_vector, frame);
956			}
957		}
958
959		if ((caddr_t)va >= vm->vm_maxsaddr) {
960			if (result == 0)
961				uvm_grow(p, va);
962			else if (result == EACCES)
963				result = EFAULT;
964		}
965		KERNEL_PROC_UNLOCK(p);
966
967		/*
968		 * This could be a fault caused in copyin*()
969		 * while accessing user space.
970		 */
971		if (result != 0 && pcb_onfault != 0) {
972			frame->tf_exip = pcb_onfault;
973			/*
974			 * Continue as if the fault had been resolved.
975			 */
976			result = 0;
977		}
978
979		if (result != 0) {
980			sig = result == EACCES ? SIGBUS : SIGSEGV;
981			fault_type = result == EACCES ?
982			    BUS_ADRERR : SEGV_MAPERR;
983		}
984		break;
985	case T_MISALGNFLT+T_USER:
986		/* Fix any misaligned ld.d or st.d instructions */
987		sig = double_reg_fixup(frame);
988		fault_type = BUS_ADRALN;
989		break;
990	case T_PRIVINFLT+T_USER:
991	case T_ILLFLT+T_USER:
992#ifndef DDB
993	case T_KDB_BREAK:
994	case T_KDB_ENTRY:
995	case T_KDB_TRACE:
996#endif
997	case T_KDB_BREAK+T_USER:
998	case T_KDB_ENTRY+T_USER:
999	case T_KDB_TRACE+T_USER:
1000		sig = SIGILL;
1001		break;
1002	case T_BNDFLT+T_USER:
1003		sig = SIGFPE;
1004		break;
1005	case T_ZERODIV+T_USER:
1006		sig = SIGFPE;
1007		fault_type = FPE_INTDIV;
1008		break;
1009	case T_OVFFLT+T_USER:
1010		sig = SIGFPE;
1011		fault_type = FPE_INTOVF;
1012		break;
1013	case T_FPEPFLT+T_USER:
1014		sig = SIGFPE;
1015		break;
1016	case T_SIGSYS+T_USER:
1017		sig = SIGSYS;
1018		break;
1019	case T_STEPBPT+T_USER:
1020#ifdef PTRACE
1021		/*
1022		 * This trap is used by the kernel to support single-step
1023		 * debugging (although any user could generate this trap
1024		 * which should probably be handled differently). When a
1025		 * process is continued by a debugger with the PT_STEP
1026		 * function of ptrace (single step), the kernel inserts
1027		 * one or two breakpoints in the user process so that only
1028		 * one instruction (or two in the case of a delayed branch)
1029		 * is executed.  When this breakpoint is hit, we get the
1030		 * T_STEPBPT trap.
1031		 */
1032		{
1033			u_int instr;
1034			vaddr_t pc = PC_REGS(&frame->tf_regs);
1035
1036			/* read break instruction */
1037			copyin((caddr_t)pc, &instr, sizeof(u_int));
1038
1039			/* check and see if we got here by accident */
1040			if ((p->p_md.md_bp0va != pc &&
1041			     p->p_md.md_bp1va != pc) ||
1042			    instr != SSBREAKPOINT) {
1043				sig = SIGTRAP;
1044				fault_type = TRAP_TRACE;
1045				break;
1046			}
1047
1048			/* restore original instruction and clear breakpoint */
1049			if (p->p_md.md_bp0va == pc) {
1050				ss_put_value(p, pc, p->p_md.md_bp0save);
1051				p->p_md.md_bp0va = 0;
1052			}
1053			if (p->p_md.md_bp1va == pc) {
1054				ss_put_value(p, pc, p->p_md.md_bp1save);
1055				p->p_md.md_bp1va = 0;
1056			}
1057
1058			sig = SIGTRAP;
1059			fault_type = TRAP_BRKPT;
1060		}
1061#else
1062		sig = SIGTRAP;
1063		fault_type = TRAP_TRACE;
1064#endif
1065		break;
1066	case T_USERBPT+T_USER:
1067		/*
1068		 * This trap is meant to be used by debuggers to implement
1069		 * breakpoint debugging.  When we get this trap, we just
1070		 * return a signal which gets caught by the debugger.
1071		 */
1072		sig = SIGTRAP;
1073		fault_type = TRAP_BRKPT;
1074		break;
1075
1076	case T_ASTFLT+T_USER:
1077		uvmexp.softs++;
1078		p->p_md.md_astpending = 0;
1079		if (p->p_flag & P_OWEUPC) {
1080			p->p_flag &= ~P_OWEUPC;
1081			KERNEL_PROC_LOCK(p);
1082			ADDUPROF(p);
1083			KERNEL_PROC_UNLOCK(p);
1084		}
1085		break;
1086	}
1087
1088	/*
1089	 * If trap from supervisor mode, just return
1090	 */
1091	if (type < T_USER)
1092		return;
1093
1094	if (sig) {
1095		sv.sival_int = fault_addr;
1096		KERNEL_PROC_LOCK(p);
1097		trapsignal(p, sig, fault_code, fault_type, sv);
1098		KERNEL_PROC_UNLOCK(p);
1099	}
1100
1101	userret(p, frame, sticks);
1102}
1103#endif /* M88110 */
1104
1105__dead void
1106error_fatal(struct trapframe *frame)
1107{
1108	if (frame->tf_vector == 0)
1109		printf("\nCPU %d Reset Exception\n", cpu_number());
1110	else
1111		printf("\nCPU %d Error Exception\n", cpu_number());
1112
1113#ifdef DDB
1114	regdump((struct trapframe*)frame);
1115#endif
1116	panic("unrecoverable exception %d", frame->tf_vector);
1117}
1118
1119#ifdef M88100
1120void
1121m88100_syscall(register_t code, struct trapframe *tf)
1122{
1123	int i, nsys, nap;
1124	struct sysent *callp;
1125	struct proc *p;
1126	int error;
1127	register_t args[11], rval[2], *ap;
1128	u_quad_t sticks;
1129
1130	uvmexp.syscalls++;
1131
1132	p = curproc;
1133
1134	callp = p->p_emul->e_sysent;
1135	nsys  = p->p_emul->e_nsysent;
1136
1137	sticks = p->p_sticks;
1138	p->p_md.md_tf = tf;
1139
1140	/*
1141	 * For 88k, all the arguments are passed in the registers (r2-r12)
1142	 * For syscall (and __syscall), r2 (and r3) has the actual code.
1143	 * __syscall  takes a quad syscall number, so that other
1144	 * arguments are at their natural alignments.
1145	 */
1146	ap = &tf->tf_r[2];
1147	nap = 11; /* r2-r12 */
1148
1149	switch (code) {
1150	case SYS_syscall:
1151		code = *ap++;
1152		nap--;
1153		break;
1154	case SYS___syscall:
1155		if (callp != sysent)
1156			break;
1157		code = ap[_QUAD_LOWWORD];
1158		ap += 2;
1159		nap -= 2;
1160		break;
1161	}
1162
1163	/* Callp currently points to syscall, which returns ENOSYS. */
1164	if (code < 0 || code >= nsys)
1165		callp += p->p_emul->e_nosys;
1166	else {
1167		callp += code;
1168		i = callp->sy_argsize / sizeof(register_t);
1169		if (i > nap)
1170			panic("syscall nargs");
1171		/*
1172		 * just copy them; syscall stub made sure all the
1173		 * args are moved from user stack to registers.
1174		 */
1175		bcopy((caddr_t)ap, (caddr_t)args, i * sizeof(register_t));
1176	}
1177
1178	KERNEL_PROC_LOCK(p);
1179#ifdef SYSCALL_DEBUG
1180	scdebug_call(p, code, args);
1181#endif
1182#ifdef KTRACE
1183	if (KTRPOINT(p, KTR_SYSCALL))
1184		ktrsyscall(p, code, callp->sy_argsize, args);
1185#endif
1186	rval[0] = 0;
1187	rval[1] = tf->tf_r[3];
1188#if NSYSTRACE > 0
1189	if (ISSET(p->p_flag, P_SYSTRACE))
1190		error = systrace_redirect(code, p, args, rval);
1191	else
1192#endif
1193		error = (*callp->sy_call)(p, args, rval);
1194	/*
1195	 * system call will look like:
1196	 *	 ld r10, r31, 32; r10,r11,r12 might be garbage.
1197	 *	 ld r11, r31, 36
1198	 *	 ld r12, r31, 40
1199	 *	 or r13, r0, <code>
1200	 *       tb0 0, r0, <128> <- sxip
1201	 *	 br err 	  <- snip
1202	 *       jmp r1 	  <- sfip
1203	 *  err: or.u r3, r0, hi16(errno)
1204	 *	 st r2, r3, lo16(errno)
1205	 *	 subu r2, r0, 1
1206	 *	 jmp r1
1207	 *
1208	 * So, when we take syscall trap, sxip/snip/sfip will be as
1209	 * shown above.
1210	 * Given this,
1211	 * 1. If the system call returned 0, need to skip nip.
1212	 *	nip = fip, fip += 4
1213	 *    (doesn't matter what fip + 4 will be but we will never
1214	 *    execute this since jmp r1 at nip will change the execution flow.)
1215	 * 2. If the system call returned an errno > 0, plug the value
1216	 *    in r2, and leave nip and fip unchanged. This will have us
1217	 *    executing "br err" on return to user space.
1218	 * 3. If the system call code returned ERESTART,
1219	 *    we need to rexecute the trap instruction. Back up the pipe
1220	 *    line.
1221	 *     fip = nip, nip = xip
1222	 * 4. If the system call returned EJUSTRETURN, don't need to adjust
1223	 *    any pointers.
1224	 */
1225
1226	KERNEL_PROC_UNLOCK(p);
1227	switch (error) {
1228	case 0:
1229		tf->tf_r[2] = rval[0];
1230		tf->tf_r[3] = rval[1];
1231		tf->tf_epsr &= ~PSR_C;
1232		tf->tf_snip = tf->tf_sfip & ~NIP_E;
1233		tf->tf_sfip = tf->tf_snip + 4;
1234		break;
1235	case ERESTART:
1236		tf->tf_epsr &= ~PSR_C;
1237		tf->tf_sfip = tf->tf_snip & ~FIP_E;
1238		tf->tf_snip = tf->tf_sxip & ~NIP_E;
1239		break;
1240	case EJUSTRETURN:
1241		tf->tf_epsr &= ~PSR_C;
1242		break;
1243	default:
1244		if (p->p_emul->e_errno)
1245			error = p->p_emul->e_errno[error];
1246		tf->tf_r[2] = error;
1247		tf->tf_epsr |= PSR_C;   /* fail */
1248		tf->tf_snip = tf->tf_snip & ~NIP_E;
1249		tf->tf_sfip = tf->tf_sfip & ~FIP_E;
1250		break;
1251	}
1252#ifdef SYSCALL_DEBUG
1253	KERNEL_PROC_LOCK(p);
1254	scdebug_ret(p, code, error, rval);
1255	KERNEL_PROC_UNLOCK(p);
1256#endif
1257	userret(p, tf, sticks);
1258#ifdef KTRACE
1259	if (KTRPOINT(p, KTR_SYSRET)) {
1260		KERNEL_PROC_LOCK(p);
1261		ktrsysret(p, code, error, rval[0]);
1262		KERNEL_PROC_UNLOCK(p);
1263	}
1264#endif
1265}
1266#endif /* M88100 */
1267
1268#ifdef M88110
1269/* Instruction pointers operate differently on mc88110 */
1270void
1271m88110_syscall(register_t code, struct trapframe *tf)
1272{
1273	int i, nsys, nap;
1274	struct sysent *callp;
1275	struct proc *p;
1276	int error;
1277	register_t args[11], rval[2], *ap;
1278	u_quad_t sticks;
1279
1280	uvmexp.syscalls++;
1281
1282	p = curproc;
1283
1284	callp = p->p_emul->e_sysent;
1285	nsys  = p->p_emul->e_nsysent;
1286
1287	sticks = p->p_sticks;
1288	p->p_md.md_tf = tf;
1289
1290	/*
1291	 * For 88k, all the arguments are passed in the registers (r2-r12)
1292	 * For syscall (and __syscall), r2 (and r3) has the actual code.
1293	 * __syscall  takes a quad syscall number, so that other
1294	 * arguments are at their natural alignments.
1295	 */
1296	ap = &tf->tf_r[2];
1297	nap = 11;	/* r2-r12 */
1298
1299	switch (code) {
1300	case SYS_syscall:
1301		code = *ap++;
1302		nap--;
1303		break;
1304	case SYS___syscall:
1305		if (callp != sysent)
1306			break;
1307		code = ap[_QUAD_LOWWORD];
1308		ap += 2;
1309		nap -= 2;
1310		break;
1311	}
1312
1313	/* Callp currently points to syscall, which returns ENOSYS. */
1314	if (code < 0 || code >= nsys)
1315		callp += p->p_emul->e_nosys;
1316	else {
1317		callp += code;
1318		i = callp->sy_argsize / sizeof(register_t);
1319		if (i > nap)
1320			panic("syscall nargs");
1321		/*
1322		 * just copy them; syscall stub made sure all the
1323		 * args are moved from user stack to registers.
1324		 */
1325		bcopy((caddr_t)ap, (caddr_t)args, i * sizeof(register_t));
1326	}
1327	KERNEL_PROC_LOCK(p);
1328#ifdef SYSCALL_DEBUG
1329	scdebug_call(p, code, args);
1330#endif
1331#ifdef KTRACE
1332	if (KTRPOINT(p, KTR_SYSCALL))
1333		ktrsyscall(p, code, callp->sy_argsize, args);
1334#endif
1335	rval[0] = 0;
1336	rval[1] = tf->tf_r[3];
1337#if NSYSTRACE > 0
1338	if (ISSET(p->p_flag, P_SYSTRACE))
1339		error = systrace_redirect(code, p, args, rval);
1340	else
1341#endif
1342		error = (*callp->sy_call)(p, args, rval);
1343	/*
1344	 * system call will look like:
1345	 *	 ld r10, r31, 32; r10,r11,r12 might be garbage.
1346	 *	 ld r11, r31, 36
1347	 *	 ld r12, r31, 40
1348	 *	 or r13, r0, <code>
1349	 *       tb0 0, r0, <128> <- exip
1350	 *	 br err 	  <- enip
1351	 *       jmp r1
1352	 *  err: or.u r3, r0, hi16(errno)
1353	 *	 st r2, r3, lo16(errno)
1354	 *	 subu r2, r0, 1
1355	 *	 jmp r1
1356	 *
1357	 * So, when we take syscall trap, exip/enip will be as
1358	 * shown above.
1359	 * Given this,
1360	 * 1. If the system call returned 0, need to jmp r1.
1361	 *    exip += 8
1362	 * 2. If the system call returned an errno > 0, increment
1363	 *    exip += 4 and plug the value in r2. This will have us
1364	 *    executing "br err" on return to user space.
1365	 * 3. If the system call code returned ERESTART,
1366	 *    we need to rexecute the trap instruction. leave exip as is.
1367	 * 4. If the system call returned EJUSTRETURN, just return.
1368	 *    exip += 4
1369	 */
1370
1371	KERNEL_PROC_UNLOCK(p);
1372	switch (error) {
1373	case 0:
1374		tf->tf_r[2] = rval[0];
1375		tf->tf_r[3] = rval[1];
1376		tf->tf_epsr &= ~PSR_C;
1377		/* skip two instructions */
1378		if (tf->tf_exip & 1)
1379			tf->tf_exip = tf->tf_enip + 4;
1380		else
1381			tf->tf_exip += 4 + 4;
1382		break;
1383	case ERESTART:
1384		/*
1385		 * Reexecute the trap.
1386		 * exip is already at the trap instruction, so
1387		 * there is nothing to do.
1388		 */
1389		tf->tf_epsr &= ~PSR_C;
1390		break;
1391	case EJUSTRETURN:
1392		tf->tf_epsr &= ~PSR_C;
1393		/* skip one instruction */
1394		if (tf->tf_exip & 1)
1395			tf->tf_exip = tf->tf_enip;
1396		else
1397			tf->tf_exip += 4;
1398		break;
1399	default:
1400		if (p->p_emul->e_errno)
1401			error = p->p_emul->e_errno[error];
1402		tf->tf_r[2] = error;
1403		tf->tf_epsr |= PSR_C;   /* fail */
1404		/* skip one instruction */
1405		if (tf->tf_exip & 1)
1406			tf->tf_exip = tf->tf_enip;
1407		else
1408			tf->tf_exip += 4;
1409		break;
1410	}
1411
1412#ifdef SYSCALL_DEBUG
1413	KERNEL_PROC_LOCK(p);
1414	scdebug_ret(p, code, error, rval);
1415	KERNEL_PROC_UNLOCK(p);
1416#endif
1417	userret(p, tf, sticks);
1418#ifdef KTRACE
1419	if (KTRPOINT(p, KTR_SYSRET)) {
1420		KERNEL_PROC_LOCK(p);
1421		ktrsysret(p, code, error, rval[0]);
1422		KERNEL_PROC_UNLOCK(p);
1423	}
1424#endif
1425}
1426#endif	/* M88110 */
1427
1428/*
1429 * Set up return-value registers as fork() libc stub expects,
1430 * and do normal return-to-user-mode stuff.
1431 */
1432void
1433child_return(arg)
1434	void *arg;
1435{
1436	struct proc *p = arg;
1437	struct trapframe *tf;
1438
1439	tf = (struct trapframe *)USER_REGS(p);
1440	tf->tf_r[2] = 0;
1441	tf->tf_r[3] = 0;
1442	tf->tf_epsr &= ~PSR_C;
1443	/* skip br instruction as in syscall() */
1444#ifdef M88100
1445	if (CPU_IS88100) {
1446		tf->tf_snip = tf->tf_sfip & XIP_ADDR;
1447		tf->tf_sfip = tf->tf_snip + 4;
1448	}
1449#endif
1450#ifdef M88110
1451	if (CPU_IS88110) {
1452		/* skip two instructions */
1453		if (tf->tf_exip & 1)
1454			tf->tf_exip = tf->tf_enip + 4;
1455		else
1456			tf->tf_exip += 4 + 4;
1457	}
1458#endif
1459
1460	KERNEL_PROC_UNLOCK(p);
1461	userret(p, tf, p->p_sticks);
1462
1463#ifdef KTRACE
1464	if (KTRPOINT(p, KTR_SYSRET)) {
1465		KERNEL_PROC_LOCK(p);
1466		ktrsysret(p,
1467		    (p->p_flag & P_PPWAIT) ? SYS_vfork : SYS_fork, 0, 0);
1468		KERNEL_PROC_UNLOCK(p);
1469	}
1470#endif
1471}
1472
1473#ifdef PTRACE
1474
1475/*
1476 * User Single Step Debugging Support
1477 */
1478
1479#include <sys/ptrace.h>
1480
1481vaddr_t	ss_branch_taken(u_int, vaddr_t, struct reg *);
1482int	ss_get_value(struct proc *, vaddr_t, u_int *);
1483int	ss_inst_branch_or_call(u_int);
1484int	ss_put_breakpoint(struct proc *, vaddr_t, vaddr_t *, u_int *);
1485
1486#define	SYSCALL_INSTR	0xf000d080	/* tb0 0,r0,128 */
1487
1488int
1489ss_get_value(struct proc *p, vaddr_t addr, u_int *value)
1490{
1491	struct uio uio;
1492	struct iovec iov;
1493
1494	iov.iov_base = (caddr_t)value;
1495	iov.iov_len = sizeof(u_int);
1496	uio.uio_iov = &iov;
1497	uio.uio_iovcnt = 1;
1498	uio.uio_offset = (off_t)addr;
1499	uio.uio_resid = sizeof(u_int);
1500	uio.uio_segflg = UIO_SYSSPACE;
1501	uio.uio_rw = UIO_READ;
1502	uio.uio_procp = curproc;
1503	return (process_domem(curproc, p, &uio, PT_READ_I));
1504}
1505
1506int
1507ss_put_value(struct proc *p, vaddr_t addr, u_int value)
1508{
1509	struct uio uio;
1510	struct iovec iov;
1511
1512	iov.iov_base = (caddr_t)&value;
1513	iov.iov_len = sizeof(u_int);
1514	uio.uio_iov = &iov;
1515	uio.uio_iovcnt = 1;
1516	uio.uio_offset = (off_t)addr;
1517	uio.uio_resid = sizeof(u_int);
1518	uio.uio_segflg = UIO_SYSSPACE;
1519	uio.uio_rw = UIO_WRITE;
1520	uio.uio_procp = curproc;
1521	return (process_domem(curproc, p, &uio, PT_WRITE_I));
1522}
1523
1524/*
1525 * ss_branch_taken(instruction, pc, regs)
1526 *
1527 * instruction will be a control flow instruction location at address pc.
1528 * Branch taken is supposed to return the address to which the instruction
1529 * would jump if the branch is taken.
1530 *
1531 * This is different from branch_taken() in ddb, as we also need to process
1532 * system calls.
1533 */
1534vaddr_t
1535ss_branch_taken(u_int inst, vaddr_t pc, struct reg *regs)
1536{
1537	u_int regno;
1538
1539	/*
1540	 * Quick check of the instruction. Note that we know we are only
1541	 * invoked if ss_inst_branch_or_call() returns TRUE, so we do not
1542	 * need to repeat the jpm, jsr and syscall stricter checks here.
1543	 */
1544	switch (inst >> (32 - 5)) {
1545	case 0x18:	/* br */
1546	case 0x19:	/* bsr */
1547		/* signed 26 bit pc relative displacement, shift left 2 bits */
1548		inst = (inst & 0x03ffffff) << 2;
1549		/* check if sign extension is needed */
1550		if (inst & 0x08000000)
1551			inst |= 0xf0000000;
1552		return (pc + inst);
1553
1554	case 0x1a:	/* bb0 */
1555	case 0x1b:	/* bb1 */
1556	case 0x1d:	/* bcnd */
1557		/* signed 16 bit pc relative displacement, shift left 2 bits */
1558		inst = (inst & 0x0000ffff) << 2;
1559		/* check if sign extension is needed */
1560		if (inst & 0x00020000)
1561			inst |= 0xfffc0000;
1562		return (pc + inst);
1563
1564	case 0x1e:	/* jmp or jsr */
1565		regno = inst & 0x1f;	/* get the register value */
1566		return (regno == 0 ? 0 : regs->r[regno]);
1567
1568	default:	/* system call */
1569		/*
1570		 * The regular (pc + 4) breakpoint will match the error
1571		 * return. Successfull system calls return at (pc + 8),
1572		 * so we'll set up a branch breakpoint there.
1573		 */
1574		return (pc + 8);
1575	}
1576}
1577
1578int
1579ss_inst_branch_or_call(u_int ins)
1580{
1581	/* check high five bits */
1582	switch (ins >> (32 - 5)) {
1583	case 0x18: /* br */
1584	case 0x19: /* bsr */
1585	case 0x1a: /* bb0 */
1586	case 0x1b: /* bb1 */
1587	case 0x1d: /* bcnd */
1588		return (TRUE);
1589	case 0x1e: /* could be jmp or jsr */
1590		if ((ins & 0xfffff3e0) == 0xf400c000)
1591			return (TRUE);
1592	}
1593
1594	return (FALSE);
1595}
1596
1597int
1598ss_put_breakpoint(struct proc *p, vaddr_t va, vaddr_t *bpva, u_int *bpsave)
1599{
1600	int rc;
1601
1602	/* Restore previous breakpoint if we did not trigger it. */
1603	if (*bpva != 0) {
1604		ss_put_value(p, *bpva, *bpsave);
1605		*bpva = 0;
1606	}
1607
1608	/* Save instruction. */
1609	if ((rc = ss_get_value(p, va, bpsave)) != 0)
1610		return (rc);
1611
1612	/* Store breakpoint instruction at the location now. */
1613	*bpva = va;
1614	return (ss_put_value(p, va, SSBREAKPOINT));
1615}
1616
1617int
1618process_sstep(struct proc *p, int sstep)
1619{
1620	struct reg *sstf = USER_REGS(p);
1621	unsigned pc, brpc;
1622	unsigned instr;
1623	int rc;
1624
1625	if (sstep == 0) {
1626		/* Restore previous breakpoints if any. */
1627		if (p->p_md.md_bp0va != 0) {
1628			ss_put_value(p, p->p_md.md_bp0va, p->p_md.md_bp0save);
1629			p->p_md.md_bp0va = 0;
1630		}
1631		if (p->p_md.md_bp1va != 0) {
1632			ss_put_value(p, p->p_md.md_bp1va, p->p_md.md_bp1save);
1633			p->p_md.md_bp1va = 0;
1634		}
1635
1636		return (0);
1637	}
1638
1639	/*
1640	 * User was stopped at pc, e.g. the instruction at pc was not executed.
1641	 * Fetch what's at the current location.
1642	 */
1643	pc = PC_REGS(sstf);
1644	if ((rc = ss_get_value(p, pc, &instr)) != 0)
1645		return (rc);
1646
1647	/*
1648	 * Find if this instruction may cause a branch, and set up a breakpoint
1649	 * at the branch location.
1650	 */
1651	if (ss_inst_branch_or_call(instr) || instr == SYSCALL_INSTR) {
1652		brpc = ss_branch_taken(instr, pc, sstf);
1653
1654		/* self-branches are hopeless */
1655		if (brpc != pc && brpc != 0) {
1656			if ((rc = ss_put_breakpoint(p, brpc,
1657			    &p->p_md.md_bp1va, &p->p_md.md_bp1save)) != 0)
1658				return (rc);
1659		}
1660	}
1661
1662	if ((rc = ss_put_breakpoint(p, pc + 4,
1663	    &p->p_md.md_bp0va, &p->p_md.md_bp0save)) != 0)
1664		return (rc);
1665
1666	return (0);
1667}
1668
1669#endif	/* PTRACE */
1670
1671#ifdef DIAGNOSTIC
1672void
1673splassert_check(int wantipl, const char *func)
1674{
1675	int oldipl;
1676
1677	/*
1678	 * This will raise the spl if too low,
1679	 * in a feeble attempt to reduce further damage.
1680	 */
1681	oldipl = raiseipl(wantipl);
1682
1683	if (oldipl < wantipl) {
1684		splassert_fail(wantipl, oldipl, func);
1685	}
1686}
1687#endif
1688
1689/*
1690 * ld.d and st.d instructions referencing long aligned but not long long
1691 * aligned addresses will trigger a misaligned address exception.
1692 *
1693 * This routine attempts to recover these (valid) statements, by simulating
1694 * the split form of the instruction. If it fails, it returns the appropriate
1695 * signal number to deliver.
1696 *
1697 * Note that we do not attempt to do anything for .d.usr instructions - the
1698 * kernel never issues such instructions, and they cause a privileged
1699 * isntruction exception from userland.
1700 */
1701int
1702double_reg_fixup(struct trapframe *frame)
1703{
1704	u_int32_t pc, instr, value;
1705	int regno, store;
1706	vaddr_t addr;
1707
1708	/*
1709	 * Decode the faulting instruction.
1710	 */
1711
1712	pc = PC_REGS(&frame->tf_regs);
1713	if (copyin((void *)pc, &instr, sizeof(u_int32_t)) != 0)
1714		return SIGSEGV;
1715
1716	switch (instr & 0xfc00ff00) {
1717	case 0xf4001000:	/* ld.d rD, rS1, rS2 */
1718		addr = frame->tf_r[(instr >> 16) & 0x1f]
1719		    + frame->tf_r[(instr & 0x1f)];
1720		store = 0;
1721		break;
1722	case 0xf4002000:	/* st.d rD, rS1, rS2 */
1723		addr = frame->tf_r[(instr >> 16) & 0x1f]
1724		    + frame->tf_r[(instr & 0x1f)];
1725		store = 1;
1726		break;
1727	default:
1728		switch (instr & 0xfc000000) {
1729		case 0x10000000:	/* ld.d rD, rS, imm16 */
1730			addr = (instr & 0x0000ffff) +
1731			    frame->tf_r[(instr >> 16) & 0x1f];
1732			store = 0;
1733			break;
1734		case 0x20000000:	/* st.d rD, rS, imm16 */
1735			addr = (instr & 0x0000ffff) +
1736			    frame->tf_r[(instr >> 16) & 0x1f];
1737			store = 1;
1738			break;
1739		default:
1740			return SIGBUS;
1741		}
1742		break;
1743	}
1744
1745	/* We only handle long but not long long aligned access here */
1746	if ((addr & 0x07) != 4)
1747		return SIGBUS;
1748
1749	regno = (instr >> 21) & 0x1f;
1750
1751	if (store) {
1752		/*
1753		 * Two word stores.
1754		 */
1755		value = frame->tf_r[regno++];
1756		if (copyout(&value, (void *)addr, sizeof(u_int32_t)) != 0)
1757			return SIGSEGV;
1758		if (regno == 32)
1759			value = 0;
1760		else
1761			value = frame->tf_r[regno];
1762		if (copyout(&value, (void *)(addr + 4), sizeof(u_int32_t)) != 0)
1763			return SIGSEGV;
1764	} else {
1765		/*
1766		 * Two word loads. r0 should be left unaltered, but the
1767		 * value should still be fetched even if it is discarded.
1768		 */
1769		if (copyin((void *)addr, &value, sizeof(u_int32_t)) != 0)
1770			return SIGSEGV;
1771		if (regno != 0)
1772			frame->tf_r[regno] = value;
1773		if (copyin((void *)(addr + 4), &value, sizeof(u_int32_t)) != 0)
1774			return SIGSEGV;
1775		if (regno != 31)
1776			frame->tf_r[regno + 1] = value;
1777	}
1778
1779	return 0;
1780}
1781
1782void
1783cache_flush(struct trapframe *tf)
1784{
1785	struct proc *p;
1786	struct pmap *pmap;
1787	u_quad_t sticks;
1788
1789	if ((p = curproc) == NULL)
1790		p = &proc0;
1791
1792	sticks = p->p_sticks;
1793	p->p_md.md_tf = tf;
1794
1795	pmap = vm_map_pmap(&p->p_vmspace->vm_map);
1796	dma_cachectl(pmap, tf->tf_r[2], tf->tf_r[3], DMA_CACHE_SYNC);
1797
1798	tf->tf_snip = tf->tf_snip & ~NIP_E;
1799	tf->tf_sfip = tf->tf_sfip & ~FIP_E;
1800
1801	userret(p, tf, sticks);
1802}
1803