trap.c revision 1.30
1/*	$OpenBSD: trap.c,v 1.30 2006/05/02 21:44:39 miod Exp $	*/
2/*
3 * Copyright (c) 2004, Miodrag Vallat.
4 * Copyright (c) 1998 Steve Murphree, Jr.
5 * Copyright (c) 1996 Nivas Madhur
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *      This product includes software developed by Nivas Madhur.
19 * 4. The name of the author may not be used to endorse or promote products
20 *    derived from this software without specific prior written permission
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34/*
35 * Mach Operating System
36 * Copyright (c) 1991 Carnegie Mellon University
37 * Copyright (c) 1991 OMRON Corporation
38 * All Rights Reserved.
39 *
40 * Permission to use, copy, modify and distribute this software and its
41 * documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 */
47
48#include <sys/types.h>
49#include <sys/param.h>
50#include <sys/proc.h>
51#include <sys/signalvar.h>
52#include <sys/user.h>
53#include <sys/syscall.h>
54#include <sys/systm.h>
55#include <sys/ktrace.h>
56
57#include "systrace.h"
58#include <dev/systrace.h>
59
60#include <uvm/uvm_extern.h>
61
62#include <machine/asm_macro.h>   /* enable/disable interrupts */
63#include <machine/cmmu.h>
64#include <machine/cpu.h>
65#include <machine/locore.h>
66#ifdef M88100
67#include <machine/m88100.h>		/* DMT_xxx */
68#include <machine/m8820x.h>		/* CMMU_PFSR_xxx */
69#endif
70#ifdef M88110
71#include <machine/m88110.h>
72#endif
73#include <machine/pcb.h>		/* FIP_E, etc. */
74#include <machine/psl.h>		/* FIP_E, etc. */
75#include <machine/trap.h>
76
77#include <machine/db_machdep.h>
78
79#define SSBREAKPOINT (0xF000D1F8U) /* Single Step Breakpoint */
80
81#define USERMODE(PSR)   (((PSR) & PSR_MODE) == 0)
82#define SYSTEMMODE(PSR) (((PSR) & PSR_MODE) != 0)
83
84__dead void panictrap(int, struct trapframe *);
85__dead void error_fatal(struct trapframe *);
86int double_reg_fixup(struct trapframe *);
87int ss_put_value(struct proc *, vaddr_t, u_int);
88
89extern void regdump(struct trapframe *f);
90
91const char *trap_type[] = {
92	"Reset",
93	"Interrupt Exception",
94	"Instruction Access",
95	"Data Access Exception",
96	"Misaligned Access",
97	"Unimplemented Opcode",
98	"Privilege Violation"
99	"Bounds Check Violation",
100	"Illegal Integer Divide",
101	"Integer Overflow",
102	"Error Exception",
103	"Non-Maskable Exception",
104};
105const int trap_types = sizeof trap_type / sizeof trap_type[0];
106
107#ifdef M88100
108const char *pbus_exception_type[] = {
109	"Success (No Fault)",
110	"unknown 1",
111	"unknown 2",
112	"Bus Error",
113	"Segment Fault",
114	"Page Fault",
115	"Supervisor Violation",
116	"Write Violation",
117};
118#endif
119
120static inline void
121userret(struct proc *p, struct trapframe *frame, u_quad_t oticks)
122{
123	int sig;
124	struct cpu_info *cpu = curcpu();
125
126	/* take pending signals */
127	while ((sig = CURSIG(p)) != 0)
128		postsig(sig);
129	p->p_priority = p->p_usrpri;
130
131	if (cpu->ci_want_resched) {
132		/*
133		 * We're being preempted.
134		 */
135		preempt(NULL);
136		while ((sig = CURSIG(p)) != 0)
137			postsig(sig);
138	}
139
140	/*
141	 * If profiling, charge recent system time to the trapped pc.
142	 */
143	if (p->p_flag & P_PROFIL) {
144		extern int psratio;
145
146		addupc_task(p, frame->tf_sxip & XIP_ADDR,
147		    (int)(p->p_sticks - oticks) * psratio);
148	}
149	cpu->ci_schedstate.spc_curpriority = p->p_priority;
150}
151
152__dead void
153panictrap(int type, struct trapframe *frame)
154{
155	static int panicing = 0;
156
157	if (panicing++ == 0) {
158#ifdef M88100
159		if (CPU_IS88100) {
160			if (type == 2) {
161				/* instruction exception */
162				printf("\nInstr access fault (%s) v = %x, "
163				    "frame %p\n",
164				    pbus_exception_type[
165				      CMMU_PFSR_FAULT(frame->tf_ipfsr)],
166				    frame->tf_sxip & XIP_ADDR, frame);
167			} else if (type == 3) {
168				/* data access exception */
169				printf("\nData access fault (%s) v = %x, "
170				    "frame %p\n",
171				    pbus_exception_type[
172				      CMMU_PFSR_FAULT(frame->tf_dpfsr)],
173				    frame->tf_sxip & XIP_ADDR, frame);
174			} else
175				printf("\nTrap type %d, v = %x, frame %p\n",
176				    type, frame->tf_sxip & XIP_ADDR, frame);
177		}
178#endif
179#ifdef M88110
180		if (CPU_IS88110) {
181			printf("\nTrap type %d, v = %x, frame %p\n",
182			    type, frame->tf_exip, frame);
183		}
184#endif
185#ifdef DDB
186		regdump(frame);
187#endif
188	}
189	if ((u_int)type < trap_types)
190		panic(trap_type[type]);
191	else
192		panic("trap %d", type);
193	/*NOTREACHED*/
194}
195
196#ifdef M88100
197void
198m88100_trap(unsigned type, struct trapframe *frame)
199{
200	struct proc *p;
201	u_quad_t sticks = 0;
202	struct vm_map *map;
203	vaddr_t va, pcb_onfault;
204	vm_prot_t ftype;
205	int fault_type, pbus_type;
206	u_long fault_code;
207	unsigned fault_addr;
208	struct vmspace *vm;
209	union sigval sv;
210	int result;
211#ifdef DDB
212	int s;
213	u_int psr;
214#endif
215	int sig = 0;
216
217	extern struct vm_map *kernel_map;
218
219	uvmexp.traps++;
220	if ((p = curproc) == NULL)
221		p = &proc0;
222
223	if (USERMODE(frame->tf_epsr)) {
224		sticks = p->p_sticks;
225		type += T_USER;
226		p->p_md.md_tf = frame;	/* for ptrace/signals */
227	}
228	fault_type = 0;
229	fault_code = 0;
230	fault_addr = frame->tf_sxip & XIP_ADDR;
231
232	switch (type) {
233	default:
234		panictrap(frame->tf_vector, frame);
235		break;
236		/*NOTREACHED*/
237
238#if defined(DDB)
239	case T_KDB_BREAK:
240		s = splhigh();
241		set_psr((psr = get_psr()) & ~PSR_IND);
242		ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame);
243		set_psr(psr);
244		splx(s);
245		return;
246	case T_KDB_ENTRY:
247		s = splhigh();
248		set_psr((psr = get_psr()) & ~PSR_IND);
249		ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame);
250		set_psr(psr);
251		splx(s);
252		return;
253#endif /* DDB */
254	case T_ILLFLT:
255		printf("Unimplemented opcode!\n");
256		panictrap(frame->tf_vector, frame);
257		break;
258	case T_INT:
259	case T_INT+T_USER:
260		curcpu()->ci_intrdepth++;
261		md_interrupt_func(T_INT, frame);
262		curcpu()->ci_intrdepth--;
263		return;
264
265	case T_MISALGNFLT:
266		printf("kernel misaligned access exception @ 0x%08x\n",
267		    frame->tf_sxip);
268		panictrap(frame->tf_vector, frame);
269		break;
270
271	case T_INSTFLT:
272		/* kernel mode instruction access fault.
273		 * Should never, never happen for a non-paged kernel.
274		 */
275#ifdef TRAPDEBUG
276		pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr);
277		printf("Kernel Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
278		    pbus_type, pbus_exception_type[pbus_type],
279		    fault_addr, frame, frame->tf_cpu);
280#endif
281		panictrap(frame->tf_vector, frame);
282		break;
283
284	case T_DATAFLT:
285		/* kernel mode data fault */
286
287		/* data fault on the user address? */
288		if ((frame->tf_dmt0 & DMT_DAS) == 0) {
289			type = T_DATAFLT + T_USER;
290			goto user_fault;
291		}
292
293		fault_addr = frame->tf_dma0;
294		if (frame->tf_dmt0 & (DMT_WRITE|DMT_LOCKBAR)) {
295			ftype = VM_PROT_READ|VM_PROT_WRITE;
296			fault_code = VM_PROT_WRITE;
297		} else {
298			ftype = VM_PROT_READ;
299			fault_code = VM_PROT_READ;
300		}
301
302		va = trunc_page((vaddr_t)fault_addr);
303		if (va == 0) {
304			panic("trap: bad kernel access at %x", fault_addr);
305		}
306
307		KERNEL_LOCK(LK_CANRECURSE | LK_EXCLUSIVE);
308		vm = p->p_vmspace;
309		map = kernel_map;
310
311		pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr);
312#ifdef TRAPDEBUG
313		printf("Kernel Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
314		    pbus_type, pbus_exception_type[pbus_type],
315		    fault_addr, frame, frame->tf_cpu);
316#endif
317
318		switch (pbus_type) {
319		case CMMU_PFSR_SUCCESS:
320			/*
321			 * The fault was resolved. Call data_access_emulation
322			 * to drain the data unit pipe line and reset dmt0
323			 * so that trap won't get called again.
324			 */
325			data_access_emulation((unsigned *)frame);
326			frame->tf_dpfsr = 0;
327			frame->tf_dmt0 = 0;
328			KERNEL_UNLOCK();
329			return;
330		case CMMU_PFSR_SFAULT:
331		case CMMU_PFSR_PFAULT:
332			if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
333				p->p_addr->u_pcb.pcb_onfault = 0;
334			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
335			p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
336			if (result == 0) {
337				/*
338				 * We could resolve the fault. Call
339				 * data_access_emulation to drain the data
340				 * unit pipe line and reset dmt0 so that trap
341				 * won't get called again.
342				 */
343				data_access_emulation((unsigned *)frame);
344				frame->tf_dpfsr = 0;
345				frame->tf_dmt0 = 0;
346				KERNEL_UNLOCK();
347				return;
348			}
349			break;
350		}
351#ifdef TRAPDEBUG
352		printf("PBUS Fault %d (%s) va = 0x%x\n", pbus_type,
353		    pbus_exception_type[pbus_type], va);
354#endif
355		KERNEL_UNLOCK();
356		panictrap(frame->tf_vector, frame);
357		/* NOTREACHED */
358	case T_INSTFLT+T_USER:
359		/* User mode instruction access fault */
360		/* FALLTHROUGH */
361	case T_DATAFLT+T_USER:
362user_fault:
363		if (type == T_INSTFLT + T_USER) {
364			pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr);
365#ifdef TRAPDEBUG
366			printf("User Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
367			    pbus_type, pbus_exception_type[pbus_type],
368			    fault_addr, frame, frame->tf_cpu);
369#endif
370		} else {
371			fault_addr = frame->tf_dma0;
372			pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr);
373#ifdef TRAPDEBUG
374			printf("User Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
375			    pbus_type, pbus_exception_type[pbus_type],
376			    fault_addr, frame, frame->tf_cpu);
377#endif
378		}
379
380		if (frame->tf_dmt0 & (DMT_WRITE | DMT_LOCKBAR)) {
381			ftype = VM_PROT_READ | VM_PROT_WRITE;
382			fault_code = VM_PROT_WRITE;
383		} else {
384			ftype = VM_PROT_READ;
385			fault_code = VM_PROT_READ;
386		}
387
388		va = trunc_page((vaddr_t)fault_addr);
389
390		KERNEL_PROC_LOCK(p);
391		vm = p->p_vmspace;
392		map = &vm->vm_map;
393		if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
394			p->p_addr->u_pcb.pcb_onfault = 0;
395
396		/* Call uvm_fault() to resolve non-bus error faults */
397		switch (pbus_type) {
398		case CMMU_PFSR_SUCCESS:
399			result = 0;
400			break;
401		case CMMU_PFSR_BERROR:
402			result = EACCES;
403			break;
404		default:
405			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
406			break;
407		}
408
409		p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
410
411		if ((caddr_t)va >= vm->vm_maxsaddr) {
412			if (result == 0)
413				uvm_grow(p, va);
414			else if (result == EACCES)
415				result = EFAULT;
416		}
417		KERNEL_PROC_UNLOCK(p);
418
419		/*
420		 * This could be a fault caused in copyin*()
421		 * while accessing user space.
422		 */
423		if (result != 0 && pcb_onfault != 0) {
424			frame->tf_snip = pcb_onfault | NIP_V;
425			frame->tf_sfip = (pcb_onfault + 4) | FIP_V;
426			frame->tf_sxip = 0;
427			/*
428			 * Continue as if the fault had been resolved, but
429			 * do not try to complete the faulting access.
430			 */
431			frame->tf_dmt0 |= DMT_SKIP;
432			result = 0;
433		}
434
435		if (result == 0) {
436			if (type == T_DATAFLT+T_USER) {
437				/*
438			 	 * We could resolve the fault. Call
439			 	 * data_access_emulation to drain the data unit
440			 	 * pipe line and reset dmt0 so that trap won't
441			 	 * get called again.
442			 	 */
443				data_access_emulation((unsigned *)frame);
444				frame->tf_dpfsr = 0;
445				frame->tf_dmt0 = 0;
446			} else {
447				/*
448				 * back up SXIP, SNIP,
449				 * clearing the Error bit
450				 */
451				frame->tf_sfip = frame->tf_snip & ~FIP_E;
452				frame->tf_snip = frame->tf_sxip & ~NIP_E;
453				frame->tf_ipfsr = 0;
454			}
455		} else {
456			sig = result == EACCES ? SIGBUS : SIGSEGV;
457			fault_type = result == EACCES ?
458			    BUS_ADRERR : SEGV_MAPERR;
459		}
460		break;
461	case T_MISALGNFLT+T_USER:
462		/* Fix any misaligned ld.d or st.d instructions */
463		sig = double_reg_fixup(frame);
464		fault_type = BUS_ADRALN;
465		break;
466	case T_PRIVINFLT+T_USER:
467	case T_ILLFLT+T_USER:
468#ifndef DDB
469	case T_KDB_BREAK:
470	case T_KDB_ENTRY:
471#endif
472	case T_KDB_BREAK+T_USER:
473	case T_KDB_ENTRY+T_USER:
474	case T_KDB_TRACE:
475	case T_KDB_TRACE+T_USER:
476		sig = SIGILL;
477		break;
478	case T_BNDFLT+T_USER:
479		sig = SIGFPE;
480		break;
481	case T_ZERODIV+T_USER:
482		sig = SIGFPE;
483		fault_type = FPE_INTDIV;
484		break;
485	case T_OVFFLT+T_USER:
486		sig = SIGFPE;
487		fault_type = FPE_INTOVF;
488		break;
489	case T_FPEPFLT+T_USER:
490	case T_FPEIFLT+T_USER:
491		sig = SIGFPE;
492		break;
493	case T_SIGSYS+T_USER:
494		sig = SIGSYS;
495		break;
496	case T_SIGTRAP+T_USER:
497		sig = SIGTRAP;
498		fault_type = TRAP_TRACE;
499		break;
500	case T_STEPBPT+T_USER:
501#ifdef PTRACE
502		/*
503		 * This trap is used by the kernel to support single-step
504		 * debugging (although any user could generate this trap
505		 * which should probably be handled differently). When a
506		 * process is continued by a debugger with the PT_STEP
507		 * function of ptrace (single step), the kernel inserts
508		 * one or two breakpoints in the user process so that only
509		 * one instruction (or two in the case of a delayed branch)
510		 * is executed.  When this breakpoint is hit, we get the
511		 * T_STEPBPT trap.
512		 */
513		{
514			u_int instr;
515			vaddr_t pc = PC_REGS(&frame->tf_regs);
516
517			/* read break instruction */
518			copyin((caddr_t)pc, &instr, sizeof(u_int));
519
520			/* check and see if we got here by accident */
521			if ((p->p_md.md_bp0va != pc &&
522			     p->p_md.md_bp1va != pc) ||
523			    instr != SSBREAKPOINT) {
524				sig = SIGTRAP;
525				fault_type = TRAP_TRACE;
526				break;
527			}
528
529			/* restore original instruction and clear breakpoint */
530			if (p->p_md.md_bp0va == pc) {
531				ss_put_value(p, pc, p->p_md.md_bp0save);
532				p->p_md.md_bp0va = 0;
533			}
534			if (p->p_md.md_bp1va == pc) {
535				ss_put_value(p, pc, p->p_md.md_bp1save);
536				p->p_md.md_bp1va = 0;
537			}
538
539#if 1
540			frame->tf_sfip = frame->tf_snip;
541			frame->tf_snip = pc | NIP_V;
542#endif
543			sig = SIGTRAP;
544			fault_type = TRAP_BRKPT;
545		}
546#else
547		sig = SIGTRAP;
548		fault_type = TRAP_TRACE;
549#endif
550		break;
551
552	case T_USERBPT+T_USER:
553		/*
554		 * This trap is meant to be used by debuggers to implement
555		 * breakpoint debugging.  When we get this trap, we just
556		 * return a signal which gets caught by the debugger.
557		 */
558		frame->tf_sfip = frame->tf_snip;
559		frame->tf_snip = frame->tf_sxip;
560		sig = SIGTRAP;
561		fault_type = TRAP_BRKPT;
562		break;
563
564	case T_ASTFLT+T_USER:
565		uvmexp.softs++;
566		p->p_md.md_astpending = 0;
567		if (p->p_flag & P_OWEUPC) {
568			p->p_flag &= ~P_OWEUPC;
569			KERNEL_PROC_LOCK(p);
570			ADDUPROF(p);
571			KERNEL_PROC_UNLOCK(p);
572		}
573		break;
574	}
575
576	/*
577	 * If trap from supervisor mode, just return
578	 */
579	if (type < T_USER)
580		return;
581
582	if (sig) {
583		sv.sival_int = fault_addr;
584		KERNEL_PROC_LOCK(p);
585		trapsignal(p, sig, fault_code, fault_type, sv);
586		KERNEL_PROC_UNLOCK(p);
587		/*
588		 * don't want multiple faults - we are going to
589		 * deliver signal.
590		 */
591		frame->tf_dmt0 = 0;
592		frame->tf_ipfsr = frame->tf_dpfsr = 0;
593	}
594
595	userret(p, frame, sticks);
596}
597#endif /* M88100 */
598
599#ifdef M88110
600void
601m88110_trap(unsigned type, struct trapframe *frame)
602{
603	struct proc *p;
604	u_quad_t sticks = 0;
605	struct vm_map *map;
606	vaddr_t va, pcb_onfault;
607	vm_prot_t ftype;
608	int fault_type;
609	u_long fault_code;
610	unsigned fault_addr;
611	struct vmspace *vm;
612	union sigval sv;
613	int result;
614#ifdef DDB
615        int s;
616	u_int psr;
617#endif
618	int sig = 0;
619	pt_entry_t *pte;
620
621	extern struct vm_map *kernel_map;
622	extern pt_entry_t *pmap_pte(pmap_t, vaddr_t);
623
624	uvmexp.traps++;
625	if ((p = curproc) == NULL)
626		p = &proc0;
627
628	if (USERMODE(frame->tf_epsr)) {
629		sticks = p->p_sticks;
630		type += T_USER;
631		p->p_md.md_tf = frame;	/* for ptrace/signals */
632	}
633	fault_type = 0;
634	fault_code = 0;
635	fault_addr = frame->tf_exip & XIP_ADDR;
636
637	switch (type) {
638	default:
639		panictrap(frame->tf_vector, frame);
640		break;
641		/*NOTREACHED*/
642
643	case T_197_READ+T_USER:
644	case T_197_READ:
645		printf("DMMU read miss: Hardware Table Searches should be enabled!\n");
646		panictrap(frame->tf_vector, frame);
647		break;
648		/*NOTREACHED*/
649	case T_197_WRITE+T_USER:
650	case T_197_WRITE:
651		printf("DMMU write miss: Hardware Table Searches should be enabled!\n");
652		panictrap(frame->tf_vector, frame);
653		break;
654		/*NOTREACHED*/
655	case T_197_INST+T_USER:
656	case T_197_INST:
657		printf("IMMU miss: Hardware Table Searches should be enabled!\n");
658		panictrap(frame->tf_vector, frame);
659		break;
660		/*NOTREACHED*/
661#ifdef DDB
662	case T_KDB_TRACE:
663		s = splhigh();
664		set_psr((psr = get_psr()) & ~PSR_IND);
665		ddb_break_trap(T_KDB_TRACE, (db_regs_t*)frame);
666		set_psr(psr);
667		splx(s);
668		return;
669	case T_KDB_BREAK:
670		s = splhigh();
671		set_psr((psr = get_psr()) & ~PSR_IND);
672		ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame);
673		set_psr(psr);
674		splx(s);
675		return;
676	case T_KDB_ENTRY:
677		s = splhigh();
678		set_psr((psr = get_psr()) & ~PSR_IND);
679		ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame);
680		set_psr(psr);
681		/* skip one instruction */
682		if (frame->tf_exip & 1)
683			frame->tf_exip = frame->tf_enip;
684		else
685			frame->tf_exip += 4;
686		splx(s);
687		return;
688#if 0
689	case T_ILLFLT:
690		s = splhigh();
691		set_psr((psr = get_psr()) & ~PSR_IND);
692		ddb_error_trap(type == T_ILLFLT ? "unimplemented opcode" :
693		       "error fault", (db_regs_t*)frame);
694		set_psr(psr);
695		splx(s);
696		return;
697#endif /* 0 */
698#endif /* DDB */
699	case T_ILLFLT:
700		printf("Unimplemented opcode!\n");
701		panictrap(frame->tf_vector, frame);
702		break;
703	case T_NON_MASK:
704	case T_NON_MASK+T_USER:
705		curcpu()->ci_intrdepth++;
706		md_interrupt_func(T_NON_MASK, frame);
707		curcpu()->ci_intrdepth--;
708		return;
709	case T_INT:
710	case T_INT+T_USER:
711		curcpu()->ci_intrdepth++;
712		md_interrupt_func(T_INT, frame);
713		curcpu()->ci_intrdepth--;
714		return;
715	case T_MISALGNFLT:
716		printf("kernel mode misaligned access exception @ 0x%08x\n",
717		    frame->tf_exip);
718		panictrap(frame->tf_vector, frame);
719		break;
720		/*NOTREACHED*/
721
722	case T_INSTFLT:
723		/* kernel mode instruction access fault.
724		 * Should never, never happen for a non-paged kernel.
725		 */
726#ifdef TRAPDEBUG
727		printf("Kernel Instruction fault exip %x isr %x ilar %x\n",
728		    frame->tf_exip, frame->tf_isr, frame->tf_ilar);
729#endif
730		panictrap(frame->tf_vector, frame);
731		break;
732		/*NOTREACHED*/
733
734	case T_DATAFLT:
735		/* kernel mode data fault */
736
737		/* data fault on the user address? */
738		if ((frame->tf_dsr & CMMU_DSR_SU) == 0) {
739			type = T_DATAFLT + T_USER;
740			goto m88110_user_fault;
741		}
742
743#ifdef TRAPDEBUG
744		printf("Kernel Data access fault exip %x dsr %x dlar %x\n",
745		    frame->tf_exip, frame->tf_dsr, frame->tf_dlar);
746#endif
747
748		fault_addr = frame->tf_dlar;
749		if (frame->tf_dsr & CMMU_DSR_RW) {
750			ftype = VM_PROT_READ;
751			fault_code = VM_PROT_READ;
752		} else {
753			ftype = VM_PROT_READ|VM_PROT_WRITE;
754			fault_code = VM_PROT_WRITE;
755		}
756
757		va = trunc_page((vaddr_t)fault_addr);
758		if (va == 0) {
759			panic("trap: bad kernel access at %x", fault_addr);
760		}
761
762		KERNEL_LOCK(LK_CANRECURSE | LK_EXCLUSIVE);
763		vm = p->p_vmspace;
764		map = kernel_map;
765
766		if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) {
767			frame->tf_dsr &= ~CMMU_DSR_WE;	/* undefined */
768			/*
769			 * On a segment or a page fault, call uvm_fault() to
770			 * resolve the fault.
771			 */
772			if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
773				p->p_addr->u_pcb.pcb_onfault = 0;
774			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
775			p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
776			if (result == 0) {
777				KERNEL_UNLOCK();
778				return;
779			}
780		}
781		if (frame->tf_dsr & CMMU_DSR_WE) {	/* write fault  */
782			/*
783			 * This could be a write protection fault or an
784			 * exception to set the used and modified bits
785			 * in the pte. Basically, if we got a write error,
786			 * then we already have a pte entry that faulted
787			 * in from a previous seg fault or page fault.
788			 * Get the pte and check the status of the
789			 * modified and valid bits to determine if this
790			 * indeed a real write fault.  XXX smurph
791			 */
792			pte = pmap_pte(map->pmap, va);
793#ifdef DEBUG
794			if (pte == NULL) {
795				KERNEL_UNLOCK();
796				panic("NULL pte on write fault??");
797			}
798#endif
799			if (!(*pte & PG_M) && !(*pte & PG_RO)) {
800				/* Set modified bit and try the write again. */
801#ifdef TRAPDEBUG
802				printf("Corrected kernel write fault, map %x pte %x\n",
803				    map->pmap, *pte);
804#endif
805				*pte |= PG_M;
806				KERNEL_UNLOCK();
807				return;
808#if 1	/* shouldn't happen */
809			} else {
810				/* must be a real wp fault */
811#ifdef TRAPDEBUG
812				printf("Uncorrected kernel write fault, map %x pte %x\n",
813				    map->pmap, *pte);
814#endif
815				if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
816					p->p_addr->u_pcb.pcb_onfault = 0;
817				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
818				p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
819				if (result == 0) {
820					KERNEL_UNLOCK();
821					return;
822				}
823#endif
824			}
825		}
826		KERNEL_UNLOCK();
827		panictrap(frame->tf_vector, frame);
828		/* NOTREACHED */
829	case T_INSTFLT+T_USER:
830		/* User mode instruction access fault */
831		/* FALLTHROUGH */
832	case T_DATAFLT+T_USER:
833m88110_user_fault:
834		if (type == T_INSTFLT+T_USER) {
835			ftype = VM_PROT_READ;
836			fault_code = VM_PROT_READ;
837#ifdef TRAPDEBUG
838			printf("User Instruction fault exip %x isr %x ilar %x\n",
839			    frame->tf_exip, frame->tf_isr, frame->tf_ilar);
840#endif
841		} else {
842			fault_addr = frame->tf_dlar;
843			if (frame->tf_dsr & CMMU_DSR_RW) {
844				ftype = VM_PROT_READ;
845				fault_code = VM_PROT_READ;
846			} else {
847				ftype = VM_PROT_READ|VM_PROT_WRITE;
848				fault_code = VM_PROT_WRITE;
849			}
850#ifdef TRAPDEBUG
851			printf("User Data access fault exip %x dsr %x dlar %x\n",
852			    frame->tf_exip, frame->tf_dsr, frame->tf_dlar);
853#endif
854		}
855
856		va = trunc_page((vaddr_t)fault_addr);
857
858		KERNEL_PROC_LOCK(p);
859		vm = p->p_vmspace;
860		map = &vm->vm_map;
861		if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
862			p->p_addr->u_pcb.pcb_onfault = 0;
863
864		/*
865		 * Call uvm_fault() to resolve non-bus error faults
866		 * whenever possible.
867		 */
868		if (type == T_DATAFLT+T_USER) {
869			/* data faults */
870			if (frame->tf_dsr & CMMU_DSR_BE) {
871				/* bus error */
872				result = EACCES;
873			} else
874			if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) {
875				/* segment or page fault */
876				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
877				p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
878			} else
879			if (frame->tf_dsr & (CMMU_DSR_CP | CMMU_DSR_WA)) {
880				/* copyback or write allocate error */
881				result = EACCES;
882			} else
883			if (frame->tf_dsr & CMMU_DSR_WE) {
884				/* write fault  */
885				/* This could be a write protection fault or an
886				 * exception to set the used and modified bits
887				 * in the pte. Basically, if we got a write
888				 * error, then we already have a pte entry that
889				 * faulted in from a previous seg fault or page
890				 * fault.
891				 * Get the pte and check the status of the
892				 * modified and valid bits to determine if this
893				 * indeed a real write fault.  XXX smurph
894				 */
895				pte = pmap_pte(vm_map_pmap(map), va);
896#ifdef DEBUG
897				if (pte == NULL) {
898					KERNEL_PROC_UNLOCK(p);
899					panic("NULL pte on write fault??");
900				}
901#endif
902				if (!(*pte & PG_M) && !(*pte & PG_RO)) {
903					/*
904					 * Set modified bit and try the
905					 * write again.
906					 */
907#ifdef TRAPDEBUG
908					printf("Corrected userland write fault, map %x pte %x\n",
909					    map->pmap, *pte);
910#endif
911					*pte |= PG_M;
912					/*
913					 * invalidate ATCs to force
914					 * table search
915					 */
916					set_dcmd(CMMU_DCMD_INV_UATC);
917					KERNEL_PROC_UNLOCK(p);
918					return;
919				} else {
920					/* must be a real wp fault */
921#ifdef TRAPDEBUG
922					printf("Uncorrected userland write fault, map %x pte %x\n",
923					    map->pmap, *pte);
924#endif
925					result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
926					p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
927				}
928			} else {
929#ifdef TRAPDEBUG
930				printf("Unexpected Data access fault dsr %x\n",
931				    frame->tf_dsr);
932#endif
933				KERNEL_PROC_UNLOCK(p);
934				panictrap(frame->tf_vector, frame);
935			}
936		} else {
937			/* instruction faults */
938			if (frame->tf_isr &
939			    (CMMU_ISR_BE | CMMU_ISR_SP | CMMU_ISR_TBE)) {
940				/* bus error, supervisor protection */
941				result = EACCES;
942			} else
943			if (frame->tf_isr & (CMMU_ISR_SI | CMMU_ISR_PI)) {
944				/* segment or page fault */
945				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
946				p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
947			} else {
948#ifdef TRAPDEBUG
949				printf("Unexpected Instruction fault isr %x\n",
950				    frame->tf_isr);
951#endif
952				KERNEL_PROC_UNLOCK(p);
953				panictrap(frame->tf_vector, frame);
954			}
955		}
956
957		if ((caddr_t)va >= vm->vm_maxsaddr) {
958			if (result == 0)
959				uvm_grow(p, va);
960			else if (result == EACCES)
961				result = EFAULT;
962		}
963		KERNEL_PROC_UNLOCK(p);
964
965		/*
966		 * This could be a fault caused in copyin*()
967		 * while accessing user space.
968		 */
969		if (result != 0 && pcb_onfault != 0) {
970			frame->tf_exip = pcb_onfault;
971			/*
972			 * Continue as if the fault had been resolved.
973			 */
974			result = 0;
975		}
976
977		if (result != 0) {
978			sig = result == EACCES ? SIGBUS : SIGSEGV;
979			fault_type = result == EACCES ?
980			    BUS_ADRERR : SEGV_MAPERR;
981		}
982		break;
983	case T_MISALGNFLT+T_USER:
984		/* Fix any misaligned ld.d or st.d instructions */
985		sig = double_reg_fixup(frame);
986		fault_type = BUS_ADRALN;
987		break;
988	case T_PRIVINFLT+T_USER:
989	case T_ILLFLT+T_USER:
990#ifndef DDB
991	case T_KDB_BREAK:
992	case T_KDB_ENTRY:
993	case T_KDB_TRACE:
994#endif
995	case T_KDB_BREAK+T_USER:
996	case T_KDB_ENTRY+T_USER:
997	case T_KDB_TRACE+T_USER:
998		sig = SIGILL;
999		break;
1000	case T_BNDFLT+T_USER:
1001		sig = SIGFPE;
1002		break;
1003	case T_ZERODIV+T_USER:
1004		sig = SIGFPE;
1005		fault_type = FPE_INTDIV;
1006		break;
1007	case T_OVFFLT+T_USER:
1008		sig = SIGFPE;
1009		fault_type = FPE_INTOVF;
1010		break;
1011	case T_FPEPFLT+T_USER:
1012	case T_FPEIFLT+T_USER:
1013		sig = SIGFPE;
1014		break;
1015	case T_SIGSYS+T_USER:
1016		sig = SIGSYS;
1017		break;
1018	case T_SIGTRAP+T_USER:
1019		sig = SIGTRAP;
1020		fault_type = TRAP_TRACE;
1021		break;
1022	case T_STEPBPT+T_USER:
1023#ifdef PTRACE
1024		/*
1025		 * This trap is used by the kernel to support single-step
1026		 * debugging (although any user could generate this trap
1027		 * which should probably be handled differently). When a
1028		 * process is continued by a debugger with the PT_STEP
1029		 * function of ptrace (single step), the kernel inserts
1030		 * one or two breakpoints in the user process so that only
1031		 * one instruction (or two in the case of a delayed branch)
1032		 * is executed.  When this breakpoint is hit, we get the
1033		 * T_STEPBPT trap.
1034		 */
1035		{
1036			u_int instr;
1037			vaddr_t pc = PC_REGS(&frame->tf_regs);
1038
1039			/* read break instruction */
1040			copyin((caddr_t)pc, &instr, sizeof(u_int));
1041
1042			/* check and see if we got here by accident */
1043			if ((p->p_md.md_bp0va != pc &&
1044			     p->p_md.md_bp1va != pc) ||
1045			    instr != SSBREAKPOINT) {
1046				sig = SIGTRAP;
1047				fault_type = TRAP_TRACE;
1048				break;
1049			}
1050
1051			/* restore original instruction and clear breakpoint */
1052			if (p->p_md.md_bp0va == pc) {
1053				ss_put_value(p, pc, p->p_md.md_bp0save);
1054				p->p_md.md_bp0va = 0;
1055			}
1056			if (p->p_md.md_bp1va == pc) {
1057				ss_put_value(p, pc, p->p_md.md_bp1save);
1058				p->p_md.md_bp1va = 0;
1059			}
1060
1061			sig = SIGTRAP;
1062			fault_type = TRAP_BRKPT;
1063		}
1064#else
1065		sig = SIGTRAP;
1066		fault_type = TRAP_TRACE;
1067#endif
1068		break;
1069	case T_USERBPT+T_USER:
1070		/*
1071		 * This trap is meant to be used by debuggers to implement
1072		 * breakpoint debugging.  When we get this trap, we just
1073		 * return a signal which gets caught by the debugger.
1074		 */
1075		sig = SIGTRAP;
1076		fault_type = TRAP_BRKPT;
1077		break;
1078
1079	case T_ASTFLT+T_USER:
1080		uvmexp.softs++;
1081		p->p_md.md_astpending = 0;
1082		if (p->p_flag & P_OWEUPC) {
1083			p->p_flag &= ~P_OWEUPC;
1084			KERNEL_PROC_LOCK(p);
1085			ADDUPROF(p);
1086			KERNEL_PROC_UNLOCK(p);
1087		}
1088		break;
1089	}
1090
1091	/*
1092	 * If trap from supervisor mode, just return
1093	 */
1094	if (type < T_USER)
1095		return;
1096
1097	if (sig) {
1098		sv.sival_int = fault_addr;
1099		KERNEL_PROC_LOCK(p);
1100		trapsignal(p, sig, fault_code, fault_type, sv);
1101		KERNEL_PROC_UNLOCK(p);
1102	}
1103
1104	userret(p, frame, sticks);
1105}
1106#endif /* M88110 */
1107
1108__dead void
1109error_fatal(struct trapframe *frame)
1110{
1111	if (frame->tf_vector == 0)
1112		printf("\nCPU %d Reset Exception\n", cpu_number());
1113	else
1114		printf("\nCPU %d Error Exception\n", cpu_number());
1115
1116#ifdef DDB
1117	regdump((struct trapframe*)frame);
1118#endif
1119	panic("unrecoverable exception %d", frame->tf_vector);
1120}
1121
1122#ifdef M88100
1123void
1124m88100_syscall(register_t code, struct trapframe *tf)
1125{
1126	int i, nsys, nap;
1127	struct sysent *callp;
1128	struct proc *p;
1129	int error;
1130	register_t args[11], rval[2], *ap;
1131	u_quad_t sticks;
1132
1133	uvmexp.syscalls++;
1134
1135	p = curproc;
1136
1137	callp = p->p_emul->e_sysent;
1138	nsys  = p->p_emul->e_nsysent;
1139
1140	sticks = p->p_sticks;
1141	p->p_md.md_tf = tf;
1142
1143	/*
1144	 * For 88k, all the arguments are passed in the registers (r2-r12)
1145	 * For syscall (and __syscall), r2 (and r3) has the actual code.
1146	 * __syscall  takes a quad syscall number, so that other
1147	 * arguments are at their natural alignments.
1148	 */
1149	ap = &tf->tf_r[2];
1150	nap = 11; /* r2-r12 */
1151
1152	switch (code) {
1153	case SYS_syscall:
1154		code = *ap++;
1155		nap--;
1156		break;
1157	case SYS___syscall:
1158		if (callp != sysent)
1159			break;
1160		code = ap[_QUAD_LOWWORD];
1161		ap += 2;
1162		nap -= 2;
1163		break;
1164	}
1165
1166	/* Callp currently points to syscall, which returns ENOSYS. */
1167	if (code < 0 || code >= nsys)
1168		callp += p->p_emul->e_nosys;
1169	else {
1170		callp += code;
1171		i = callp->sy_argsize / sizeof(register_t);
1172		if (i > nap)
1173			panic("syscall nargs");
1174		/*
1175		 * just copy them; syscall stub made sure all the
1176		 * args are moved from user stack to registers.
1177		 */
1178		bcopy((caddr_t)ap, (caddr_t)args, i * sizeof(register_t));
1179	}
1180
1181	KERNEL_PROC_LOCK(p);
1182#ifdef SYSCALL_DEBUG
1183	scdebug_call(p, code, args);
1184#endif
1185#ifdef KTRACE
1186	if (KTRPOINT(p, KTR_SYSCALL))
1187		ktrsyscall(p, code, callp->sy_argsize, args);
1188#endif
1189	rval[0] = 0;
1190	rval[1] = tf->tf_r[3];
1191#if NSYSTRACE > 0
1192	if (ISSET(p->p_flag, P_SYSTRACE))
1193		error = systrace_redirect(code, p, args, rval);
1194	else
1195#endif
1196		error = (*callp->sy_call)(p, args, rval);
1197	/*
1198	 * system call will look like:
1199	 *	 ld r10, r31, 32; r10,r11,r12 might be garbage.
1200	 *	 ld r11, r31, 36
1201	 *	 ld r12, r31, 40
1202	 *	 or r13, r0, <code>
1203	 *       tb0 0, r0, <128> <- sxip
1204	 *	 br err 	  <- snip
1205	 *       jmp r1 	  <- sfip
1206	 *  err: or.u r3, r0, hi16(errno)
1207	 *	 st r2, r3, lo16(errno)
1208	 *	 subu r2, r0, 1
1209	 *	 jmp r1
1210	 *
1211	 * So, when we take syscall trap, sxip/snip/sfip will be as
1212	 * shown above.
1213	 * Given this,
1214	 * 1. If the system call returned 0, need to skip nip.
1215	 *	nip = fip, fip += 4
1216	 *    (doesn't matter what fip + 4 will be but we will never
1217	 *    execute this since jmp r1 at nip will change the execution flow.)
1218	 * 2. If the system call returned an errno > 0, plug the value
1219	 *    in r2, and leave nip and fip unchanged. This will have us
1220	 *    executing "br err" on return to user space.
1221	 * 3. If the system call code returned ERESTART,
1222	 *    we need to rexecute the trap instruction. Back up the pipe
1223	 *    line.
1224	 *     fip = nip, nip = xip
1225	 * 4. If the system call returned EJUSTRETURN, don't need to adjust
1226	 *    any pointers.
1227	 */
1228
1229	KERNEL_PROC_UNLOCK(p);
1230	switch (error) {
1231	case 0:
1232		tf->tf_r[2] = rval[0];
1233		tf->tf_r[3] = rval[1];
1234		tf->tf_epsr &= ~PSR_C;
1235		tf->tf_snip = tf->tf_sfip & ~NIP_E;
1236		tf->tf_sfip = tf->tf_snip + 4;
1237		break;
1238	case ERESTART:
1239		tf->tf_epsr &= ~PSR_C;
1240		tf->tf_sfip = tf->tf_snip & ~FIP_E;
1241		tf->tf_snip = tf->tf_sxip & ~NIP_E;
1242		break;
1243	case EJUSTRETURN:
1244		tf->tf_epsr &= ~PSR_C;
1245		break;
1246	default:
1247		if (p->p_emul->e_errno)
1248			error = p->p_emul->e_errno[error];
1249		tf->tf_r[2] = error;
1250		tf->tf_epsr |= PSR_C;   /* fail */
1251		tf->tf_snip = tf->tf_snip & ~NIP_E;
1252		tf->tf_sfip = tf->tf_sfip & ~FIP_E;
1253		break;
1254	}
1255#ifdef SYSCALL_DEBUG
1256	KERNEL_PROC_LOCK(p);
1257	scdebug_ret(p, code, error, rval);
1258	KERNEL_PROC_UNLOCK(p);
1259#endif
1260	userret(p, tf, sticks);
1261#ifdef KTRACE
1262	if (KTRPOINT(p, KTR_SYSRET)) {
1263		KERNEL_PROC_LOCK(p);
1264		ktrsysret(p, code, error, rval[0]);
1265		KERNEL_PROC_UNLOCK(p);
1266	}
1267#endif
1268}
1269#endif /* M88100 */
1270
1271#ifdef M88110
1272/* Instruction pointers operate differently on mc88110 */
1273void
1274m88110_syscall(register_t code, struct trapframe *tf)
1275{
1276	int i, nsys, nap;
1277	struct sysent *callp;
1278	struct proc *p;
1279	int error;
1280	register_t args[11], rval[2], *ap;
1281	u_quad_t sticks;
1282
1283	uvmexp.syscalls++;
1284
1285	p = curproc;
1286
1287	callp = p->p_emul->e_sysent;
1288	nsys  = p->p_emul->e_nsysent;
1289
1290	sticks = p->p_sticks;
1291	p->p_md.md_tf = tf;
1292
1293	/*
1294	 * For 88k, all the arguments are passed in the registers (r2-r12)
1295	 * For syscall (and __syscall), r2 (and r3) has the actual code.
1296	 * __syscall  takes a quad syscall number, so that other
1297	 * arguments are at their natural alignments.
1298	 */
1299	ap = &tf->tf_r[2];
1300	nap = 11;	/* r2-r12 */
1301
1302	switch (code) {
1303	case SYS_syscall:
1304		code = *ap++;
1305		nap--;
1306		break;
1307	case SYS___syscall:
1308		if (callp != sysent)
1309			break;
1310		code = ap[_QUAD_LOWWORD];
1311		ap += 2;
1312		nap -= 2;
1313		break;
1314	}
1315
1316	/* Callp currently points to syscall, which returns ENOSYS. */
1317	if (code < 0 || code >= nsys)
1318		callp += p->p_emul->e_nosys;
1319	else {
1320		callp += code;
1321		i = callp->sy_argsize / sizeof(register_t);
1322		if (i > nap)
1323			panic("syscall nargs");
1324		/*
1325		 * just copy them; syscall stub made sure all the
1326		 * args are moved from user stack to registers.
1327		 */
1328		bcopy((caddr_t)ap, (caddr_t)args, i * sizeof(register_t));
1329	}
1330	KERNEL_PROC_LOCK(p);
1331#ifdef SYSCALL_DEBUG
1332	scdebug_call(p, code, args);
1333#endif
1334#ifdef KTRACE
1335	if (KTRPOINT(p, KTR_SYSCALL))
1336		ktrsyscall(p, code, callp->sy_argsize, args);
1337#endif
1338	rval[0] = 0;
1339	rval[1] = tf->tf_r[3];
1340#if NSYSTRACE > 0
1341	if (ISSET(p->p_flag, P_SYSTRACE))
1342		error = systrace_redirect(code, p, args, rval);
1343	else
1344#endif
1345		error = (*callp->sy_call)(p, args, rval);
1346	/*
1347	 * system call will look like:
1348	 *	 ld r10, r31, 32; r10,r11,r12 might be garbage.
1349	 *	 ld r11, r31, 36
1350	 *	 ld r12, r31, 40
1351	 *	 or r13, r0, <code>
1352	 *       tb0 0, r0, <128> <- exip
1353	 *	 br err 	  <- enip
1354	 *       jmp r1
1355	 *  err: or.u r3, r0, hi16(errno)
1356	 *	 st r2, r3, lo16(errno)
1357	 *	 subu r2, r0, 1
1358	 *	 jmp r1
1359	 *
1360	 * So, when we take syscall trap, exip/enip will be as
1361	 * shown above.
1362	 * Given this,
1363	 * 1. If the system call returned 0, need to jmp r1.
1364	 *    exip += 8
1365	 * 2. If the system call returned an errno > 0, increment
1366	 *    exip += 4 and plug the value in r2. This will have us
1367	 *    executing "br err" on return to user space.
1368	 * 3. If the system call code returned ERESTART,
1369	 *    we need to rexecute the trap instruction. leave exip as is.
1370	 * 4. If the system call returned EJUSTRETURN, just return.
1371	 *    exip += 4
1372	 */
1373
1374	KERNEL_PROC_UNLOCK(p);
1375	switch (error) {
1376	case 0:
1377		tf->tf_r[2] = rval[0];
1378		tf->tf_r[3] = rval[1];
1379		tf->tf_epsr &= ~PSR_C;
1380		/* skip two instructions */
1381		if (tf->tf_exip & 1)
1382			tf->tf_exip = tf->tf_enip + 4;
1383		else
1384			tf->tf_exip += 4 + 4;
1385		break;
1386	case ERESTART:
1387		/*
1388		 * Reexecute the trap.
1389		 * exip is already at the trap instruction, so
1390		 * there is nothing to do.
1391		 */
1392		tf->tf_epsr &= ~PSR_C;
1393		break;
1394	case EJUSTRETURN:
1395		tf->tf_epsr &= ~PSR_C;
1396		/* skip one instruction */
1397		if (tf->tf_exip & 1)
1398			tf->tf_exip = tf->tf_enip;
1399		else
1400			tf->tf_exip += 4;
1401		break;
1402	default:
1403		if (p->p_emul->e_errno)
1404			error = p->p_emul->e_errno[error];
1405		tf->tf_r[2] = error;
1406		tf->tf_epsr |= PSR_C;   /* fail */
1407		/* skip one instruction */
1408		if (tf->tf_exip & 1)
1409			tf->tf_exip = tf->tf_enip;
1410		else
1411			tf->tf_exip += 4;
1412		break;
1413	}
1414
1415#ifdef SYSCALL_DEBUG
1416	KERNEL_PROC_LOCK(p);
1417	scdebug_ret(p, code, error, rval);
1418	KERNEL_PROC_UNLOCK(p);
1419#endif
1420	userret(p, tf, sticks);
1421#ifdef KTRACE
1422	if (KTRPOINT(p, KTR_SYSRET)) {
1423		KERNEL_PROC_LOCK(p);
1424		ktrsysret(p, code, error, rval[0]);
1425		KERNEL_PROC_UNLOCK(p);
1426	}
1427#endif
1428}
1429#endif	/* M88110 */
1430
1431/*
1432 * Set up return-value registers as fork() libc stub expects,
1433 * and do normal return-to-user-mode stuff.
1434 */
1435void
1436child_return(arg)
1437	void *arg;
1438{
1439	struct proc *p = arg;
1440	struct trapframe *tf;
1441
1442	tf = (struct trapframe *)USER_REGS(p);
1443	tf->tf_r[2] = 0;
1444	tf->tf_r[3] = 0;
1445	tf->tf_epsr &= ~PSR_C;
1446	/* skip br instruction as in syscall() */
1447#ifdef M88100
1448	if (CPU_IS88100) {
1449		tf->tf_snip = tf->tf_sfip & XIP_ADDR;
1450		tf->tf_sfip = tf->tf_snip + 4;
1451	}
1452#endif
1453#ifdef M88110
1454	if (CPU_IS88110) {
1455		/* skip two instructions */
1456		if (tf->tf_exip & 1)
1457			tf->tf_exip = tf->tf_enip + 4;
1458		else
1459			tf->tf_exip += 4 + 4;
1460	}
1461#endif
1462
1463	KERNEL_PROC_UNLOCK(p);
1464	userret(p, tf, p->p_sticks);
1465
1466#ifdef KTRACE
1467	if (KTRPOINT(p, KTR_SYSRET)) {
1468		KERNEL_PROC_LOCK(p);
1469		ktrsysret(p,
1470		    (p->p_flag & P_PPWAIT) ? SYS_vfork : SYS_fork, 0, 0);
1471		KERNEL_PROC_UNLOCK(p);
1472	}
1473#endif
1474}
1475
1476#ifdef PTRACE
1477
1478/*
1479 * User Single Step Debugging Support
1480 */
1481
1482#include <sys/ptrace.h>
1483
1484vaddr_t	ss_branch_taken(u_int, vaddr_t, struct reg *);
1485int	ss_get_value(struct proc *, vaddr_t, u_int *);
1486int	ss_inst_branch_or_call(u_int);
1487int	ss_put_breakpoint(struct proc *, vaddr_t, vaddr_t *, u_int *);
1488
1489#define	SYSCALL_INSTR	0xf000d080	/* tb0 0,r0,128 */
1490
1491int
1492ss_get_value(struct proc *p, vaddr_t addr, u_int *value)
1493{
1494	struct uio uio;
1495	struct iovec iov;
1496
1497	iov.iov_base = (caddr_t)value;
1498	iov.iov_len = sizeof(u_int);
1499	uio.uio_iov = &iov;
1500	uio.uio_iovcnt = 1;
1501	uio.uio_offset = (off_t)addr;
1502	uio.uio_resid = sizeof(u_int);
1503	uio.uio_segflg = UIO_SYSSPACE;
1504	uio.uio_rw = UIO_READ;
1505	uio.uio_procp = curproc;
1506	return (process_domem(curproc, p, &uio, PT_READ_I));
1507}
1508
1509int
1510ss_put_value(struct proc *p, vaddr_t addr, u_int value)
1511{
1512	struct uio uio;
1513	struct iovec iov;
1514
1515	iov.iov_base = (caddr_t)&value;
1516	iov.iov_len = sizeof(u_int);
1517	uio.uio_iov = &iov;
1518	uio.uio_iovcnt = 1;
1519	uio.uio_offset = (off_t)addr;
1520	uio.uio_resid = sizeof(u_int);
1521	uio.uio_segflg = UIO_SYSSPACE;
1522	uio.uio_rw = UIO_WRITE;
1523	uio.uio_procp = curproc;
1524	return (process_domem(curproc, p, &uio, PT_WRITE_I));
1525}
1526
1527/*
1528 * ss_branch_taken(instruction, pc, regs)
1529 *
1530 * instruction will be a control flow instruction location at address pc.
1531 * Branch taken is supposed to return the address to which the instruction
1532 * would jump if the branch is taken.
1533 *
1534 * This is different from branch_taken() in ddb, as we also need to process
1535 * system calls.
1536 */
1537vaddr_t
1538ss_branch_taken(u_int inst, vaddr_t pc, struct reg *regs)
1539{
1540	u_int regno;
1541
1542	/*
1543	 * Quick check of the instruction. Note that we know we are only
1544	 * invoked if ss_inst_branch_or_call() returns TRUE, so we do not
1545	 * need to repeat the jpm, jsr and syscall stricter checks here.
1546	 */
1547	switch (inst >> (32 - 5)) {
1548	case 0x18:	/* br */
1549	case 0x19:	/* bsr */
1550		/* signed 26 bit pc relative displacement, shift left 2 bits */
1551		inst = (inst & 0x03ffffff) << 2;
1552		/* check if sign extension is needed */
1553		if (inst & 0x08000000)
1554			inst |= 0xf0000000;
1555		return (pc + inst);
1556
1557	case 0x1a:	/* bb0 */
1558	case 0x1b:	/* bb1 */
1559	case 0x1d:	/* bcnd */
1560		/* signed 16 bit pc relative displacement, shift left 2 bits */
1561		inst = (inst & 0x0000ffff) << 2;
1562		/* check if sign extension is needed */
1563		if (inst & 0x00020000)
1564			inst |= 0xfffc0000;
1565		return (pc + inst);
1566
1567	case 0x1e:	/* jmp or jsr */
1568		regno = inst & 0x1f;	/* get the register value */
1569		return (regno == 0 ? 0 : regs->r[regno]);
1570
1571	default:	/* system call */
1572		/*
1573		 * The regular (pc + 4) breakpoint will match the error
1574		 * return. Successfull system calls return at (pc + 8),
1575		 * so we'll set up a branch breakpoint there.
1576		 */
1577		return (pc + 8);
1578	}
1579}
1580
1581int
1582ss_inst_branch_or_call(u_int ins)
1583{
1584	/* check high five bits */
1585	switch (ins >> (32 - 5)) {
1586	case 0x18: /* br */
1587	case 0x19: /* bsr */
1588	case 0x1a: /* bb0 */
1589	case 0x1b: /* bb1 */
1590	case 0x1d: /* bcnd */
1591		return (TRUE);
1592	case 0x1e: /* could be jmp or jsr */
1593		if ((ins & 0xfffff3e0) == 0xf400c000)
1594			return (TRUE);
1595	}
1596
1597	return (FALSE);
1598}
1599
1600int
1601ss_put_breakpoint(struct proc *p, vaddr_t va, vaddr_t *bpva, u_int *bpsave)
1602{
1603	int rc;
1604
1605	/* Restore previous breakpoint if we did not trigger it. */
1606	if (*bpva != 0) {
1607		ss_put_value(p, *bpva, *bpsave);
1608		*bpva = 0;
1609	}
1610
1611	/* Save instruction. */
1612	if ((rc = ss_get_value(p, va, bpsave)) != 0)
1613		return (rc);
1614
1615	/* Store breakpoint instruction at the location now. */
1616	*bpva = va;
1617	return (ss_put_value(p, va, SSBREAKPOINT));
1618}
1619
1620int
1621process_sstep(struct proc *p, int sstep)
1622{
1623	struct reg *sstf = USER_REGS(p);
1624	unsigned pc, brpc;
1625	unsigned instr;
1626	int rc;
1627
1628	if (sstep == 0) {
1629		/* Restore previous breakpoints if any. */
1630		if (p->p_md.md_bp0va != 0) {
1631			ss_put_value(p, p->p_md.md_bp0va, p->p_md.md_bp0save);
1632			p->p_md.md_bp0va = 0;
1633		}
1634		if (p->p_md.md_bp1va != 0) {
1635			ss_put_value(p, p->p_md.md_bp1va, p->p_md.md_bp1save);
1636			p->p_md.md_bp1va = 0;
1637		}
1638
1639		return (0);
1640	}
1641
1642	/*
1643	 * User was stopped at pc, e.g. the instruction at pc was not executed.
1644	 * Fetch what's at the current location.
1645	 */
1646	pc = PC_REGS(sstf);
1647	if ((rc = ss_get_value(p, pc, &instr)) != 0)
1648		return (rc);
1649
1650	/*
1651	 * Find if this instruction may cause a branch, and set up a breakpoint
1652	 * at the branch location.
1653	 */
1654	if (ss_inst_branch_or_call(instr) || instr == SYSCALL_INSTR) {
1655		brpc = ss_branch_taken(instr, pc, sstf);
1656
1657		/* self-branches are hopeless */
1658		if (brpc != pc && brpc != 0) {
1659			if ((rc = ss_put_breakpoint(p, brpc,
1660			    &p->p_md.md_bp1va, &p->p_md.md_bp1save)) != 0)
1661				return (rc);
1662		}
1663	}
1664
1665	if ((rc = ss_put_breakpoint(p, pc + 4,
1666	    &p->p_md.md_bp0va, &p->p_md.md_bp0save)) != 0)
1667		return (rc);
1668
1669	return (0);
1670}
1671
1672#endif	/* PTRACE */
1673
1674#ifdef DIAGNOSTIC
1675void
1676splassert_check(int wantipl, const char *func)
1677{
1678	int oldipl;
1679
1680	/*
1681	 * This will raise the spl if too low,
1682	 * in a feeble attempt to reduce further damage.
1683	 */
1684	oldipl = raiseipl(wantipl);
1685
1686	if (oldipl < wantipl) {
1687		splassert_fail(wantipl, oldipl, func);
1688	}
1689}
1690#endif
1691
1692/*
1693 * ld.d and st.d instructions referencing long aligned but not long long
1694 * aligned addresses will trigger a misaligned address exception.
1695 *
1696 * This routine attempts to recover these (valid) statements, by simulating
1697 * the split form of the instruction. If it fails, it returns the appropriate
1698 * signal number to deliver.
1699 */
1700int
1701double_reg_fixup(struct trapframe *frame)
1702{
1703	u_int32_t pc, instr, value;
1704	int regno, store;
1705	vaddr_t addr;
1706
1707	/*
1708	 * Decode the faulting instruction.
1709	 */
1710
1711	pc = PC_REGS(&frame->tf_regs);
1712	if (copyin((void *)pc, &instr, sizeof(u_int32_t)) != 0)
1713		return SIGSEGV;
1714
1715	switch (instr & 0xfc00ff00) {
1716	case 0xf4001000:	/* ld.d rD, rS1, rS2 */
1717		addr = frame->tf_r[(instr >> 16) & 0x1f]
1718		    + frame->tf_r[(instr & 0x1f)];
1719		store = 0;
1720		break;
1721	case 0xf4001200:	/* ld.d rD, rS1[rS2] */
1722		addr = frame->tf_r[(instr >> 16) & 0x1f]
1723		    + (frame->tf_r[(instr & 0x1f)] << 3);
1724		store = 0;
1725		break;
1726	case 0xf4002000:	/* st.d rD, rS1, rS2 */
1727		addr = frame->tf_r[(instr >> 16) & 0x1f]
1728		    + frame->tf_r[(instr & 0x1f)];
1729		store = 1;
1730		break;
1731	case 0xf4002200:	/* st.d rD, rS1[rS2] */
1732		addr = frame->tf_r[(instr >> 16) & 0x1f]
1733		    + (frame->tf_r[(instr & 0x1f)] << 3);
1734		store = 1;
1735		break;
1736	default:
1737		switch (instr & 0xfc000000) {
1738		case 0x10000000:	/* ld.d rD, rS, imm16 */
1739			addr = (instr & 0x0000ffff) +
1740			    frame->tf_r[(instr >> 16) & 0x1f];
1741			store = 0;
1742			break;
1743		case 0x20000000:	/* st.d rD, rS, imm16 */
1744			addr = (instr & 0x0000ffff) +
1745			    frame->tf_r[(instr >> 16) & 0x1f];
1746			store = 1;
1747			break;
1748		default:
1749			return SIGBUS;
1750		}
1751		break;
1752	}
1753
1754	/* We only handle long but not long long aligned access here */
1755	if ((addr & 0x07) != 4)
1756		return SIGBUS;
1757
1758	regno = (instr >> 21) & 0x1f;
1759
1760	if (store) {
1761		/*
1762		 * Two word stores.
1763		 */
1764		value = frame->tf_r[regno++];
1765		if (copyout(&value, (void *)addr, sizeof(u_int32_t)) != 0)
1766			return SIGSEGV;
1767		if (regno == 32)
1768			value = 0;
1769		else
1770			value = frame->tf_r[regno];
1771		if (copyout(&value, (void *)(addr + 4), sizeof(u_int32_t)) != 0)
1772			return SIGSEGV;
1773	} else {
1774		/*
1775		 * Two word loads. r0 should be left unaltered, but the
1776		 * value should still be fetched even if it is discarded.
1777		 */
1778		if (copyin((void *)addr, &value, sizeof(u_int32_t)) != 0)
1779			return SIGSEGV;
1780		if (regno != 0)
1781			frame->tf_r[regno] = value;
1782		if (copyin((void *)(addr + 4), &value, sizeof(u_int32_t)) != 0)
1783			return SIGSEGV;
1784		if (regno != 31)
1785			frame->tf_r[regno + 1] = value;
1786	}
1787
1788	return 0;
1789}
1790
1791void
1792cache_flush(struct trapframe *tf)
1793{
1794	struct proc *p;
1795	struct pmap *pmap;
1796	u_quad_t sticks;
1797
1798	if ((p = curproc) == NULL)
1799		p = &proc0;
1800
1801	sticks = p->p_sticks;
1802	p->p_md.md_tf = tf;
1803
1804	pmap = vm_map_pmap(&p->p_vmspace->vm_map);
1805	dma_cachectl(pmap, tf->tf_r[2], tf->tf_r[3], DMA_CACHE_SYNC);
1806
1807	tf->tf_snip = tf->tf_snip & ~NIP_E;
1808	tf->tf_sfip = tf->tf_sfip & ~FIP_E;
1809
1810	userret(p, tf, sticks);
1811}
1812