trap.c revision 1.81
1/*	$OpenBSD: trap.c,v 1.81 2012/08/07 05:16:53 guenther Exp $	*/
2/*
3 * Copyright (c) 2004, Miodrag Vallat.
4 * Copyright (c) 1998 Steve Murphree, Jr.
5 * Copyright (c) 1996 Nivas Madhur
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *      This product includes software developed by Nivas Madhur.
19 * 4. The name of the author may not be used to endorse or promote products
20 *    derived from this software without specific prior written permission
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34/*
35 * Mach Operating System
36 * Copyright (c) 1991 Carnegie Mellon University
37 * Copyright (c) 1991 OMRON Corporation
38 * All Rights Reserved.
39 *
40 * Permission to use, copy, modify and distribute this software and its
41 * documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 */
47
48#include <sys/types.h>
49#include <sys/param.h>
50#include <sys/proc.h>
51#include <sys/signalvar.h>
52#include <sys/user.h>
53#include <sys/syscall.h>
54#include <sys/syscall_mi.h>
55#include <sys/systm.h>
56#include <sys/ktrace.h>
57
58#include <uvm/uvm_extern.h>
59
60#include <machine/asm_macro.h>
61#include <machine/cmmu.h>
62#include <machine/cpu.h>
63#ifdef M88100
64#include <machine/m88100.h>
65#include <machine/m8820x.h>
66#endif
67#ifdef M88110
68#include <machine/m88110.h>
69#endif
70#include <machine/fpu.h>
71#include <machine/pcb.h>
72#include <machine/psl.h>
73#include <machine/trap.h>
74
75#include <machine/db_machdep.h>
76
77#define SSBREAKPOINT (0xF000D1F8U) /* Single Step Breakpoint */
78
79#define USERMODE(PSR)   (((PSR) & PSR_MODE) == 0)
80#define SYSTEMMODE(PSR) (((PSR) & PSR_MODE) != 0)
81
82void printtrap(int, struct trapframe *);
83__dead void panictrap(int, struct trapframe *);
84__dead void error_fatal(struct trapframe *);
85int double_reg_fixup(struct trapframe *);
86int ss_put_value(struct proc *, vaddr_t, u_int);
87
88extern void regdump(struct trapframe *f);
89
90const char *trap_type[] = {
91	"Reset",
92	"Interrupt Exception",
93	"Instruction Access",
94	"Data Access Exception",
95	"Misaligned Access",
96	"Unimplemented Opcode",
97	"Privilege Violation"
98	"Bounds Check Violation",
99	"Illegal Integer Divide",
100	"Integer Overflow",
101	"Error Exception",
102	"Non-Maskable Exception",
103};
104
105const int trap_types = sizeof trap_type / sizeof trap_type[0];
106
107#ifdef M88100
108const char *pbus_exception_type[] = {
109	"Success (No Fault)",
110	"unknown 1",
111	"unknown 2",
112	"Bus Error",
113	"Segment Fault",
114	"Page Fault",
115	"Supervisor Violation",
116	"Write Violation",
117};
118#endif
119
120void
121printtrap(int type, struct trapframe *frame)
122{
123#ifdef M88100
124	if (CPU_IS88100) {
125		if (type == 2) {
126			/* instruction exception */
127			printf("\nInstr access fault (%s) v = %x, frame %p\n",
128			    pbus_exception_type[
129			      CMMU_PFSR_FAULT(frame->tf_ipfsr)],
130			    frame->tf_sxip & XIP_ADDR, frame);
131		} else if (type == 3) {
132			/* data access exception */
133			printf("\nData access fault (%s) v = %x, frame %p\n",
134			    pbus_exception_type[
135			      CMMU_PFSR_FAULT(frame->tf_dpfsr)],
136			    frame->tf_sxip & XIP_ADDR, frame);
137		} else
138			printf("\nTrap type %d, v = %x, frame %p\n",
139			    type, frame->tf_sxip & XIP_ADDR, frame);
140	}
141#endif
142#ifdef M88110
143	if (CPU_IS88110) {
144		printf("\nTrap type %d, v = %x, frame %p\n",
145		    type, frame->tf_exip, frame);
146	}
147#endif
148#ifdef DDB
149	regdump(frame);
150#endif
151}
152
153__dead void
154panictrap(int type, struct trapframe *frame)
155{
156	static int panicing = 0;
157
158	if (panicing++ == 0)
159		printtrap(type, frame);
160	if ((u_int)type < trap_types)
161		panic(trap_type[type]);
162	else
163		panic("trap %d", type);
164	/*NOTREACHED*/
165}
166
167/*
168 * Handle external interrupts.
169 */
170void
171interrupt(struct trapframe *frame)
172{
173	struct cpu_info *ci = curcpu();
174
175	ci->ci_intrdepth++;
176	md_interrupt_func(frame);
177	ci->ci_intrdepth--;
178}
179
180#ifdef M88110
181/*
182 * Handle non-maskable interrupts.
183 */
184int
185nmi(struct trapframe *frame)
186{
187	return md_nmi_func(frame);
188}
189
190/*
191 * Reenable non-maskable interrupts.
192 */
193void
194nmi_wrapup(struct trapframe *frame)
195{
196	md_nmi_wrapup_func(frame);
197}
198#endif
199
200/*
201 * Handle asynchronous software traps.
202 */
203void
204ast(struct trapframe *frame)
205{
206	struct cpu_info *ci = curcpu();
207	struct proc *p = ci->ci_curproc;
208
209	uvmexp.softs++;
210	p->p_md.md_astpending = 0;
211	if (p->p_flag & P_OWEUPC) {
212		KERNEL_LOCK();
213		ADDUPROF(p);
214		KERNEL_UNLOCK();
215	}
216	if (ci->ci_want_resched)
217		preempt(NULL);
218
219	userret(p);
220}
221
222#ifdef M88100
223void
224m88100_trap(u_int type, struct trapframe *frame)
225{
226	struct proc *p;
227	struct vm_map *map;
228	vaddr_t va, pcb_onfault;
229	vm_prot_t ftype;
230	int fault_type, pbus_type;
231	u_long fault_code;
232	vaddr_t fault_addr;
233	struct vmspace *vm;
234	union sigval sv;
235	int result;
236#ifdef DDB
237	int s;
238	u_int psr;
239#endif
240	int sig = 0;
241
242	uvmexp.traps++;
243	if ((p = curproc) == NULL)
244		p = &proc0;
245
246	if (USERMODE(frame->tf_epsr)) {
247		type += T_USER;
248		p->p_md.md_tf = frame;	/* for ptrace/signals */
249	}
250	fault_type = SI_NOINFO;
251	fault_code = 0;
252	fault_addr = frame->tf_sxip & XIP_ADDR;
253
254	switch (type) {
255	default:
256	case T_ILLFLT:
257lose:
258		panictrap(frame->tf_vector, frame);
259		break;
260		/*NOTREACHED*/
261
262#if defined(DDB)
263	case T_KDB_BREAK:
264		s = splhigh();
265		set_psr((psr = get_psr()) & ~PSR_IND);
266		ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame);
267		set_psr(psr);
268		splx(s);
269		return;
270	case T_KDB_ENTRY:
271		s = splhigh();
272		set_psr((psr = get_psr()) & ~PSR_IND);
273		ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame);
274		set_psr(psr);
275		splx(s);
276		return;
277#endif /* DDB */
278	case T_MISALGNFLT:
279		printf("kernel misaligned access exception @%p\n",
280		    frame->tf_sxip);
281		goto lose;
282	case T_INSTFLT:
283		/* kernel mode instruction access fault.
284		 * Should never, never happen for a non-paged kernel.
285		 */
286#ifdef TRAPDEBUG
287		pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr);
288		printf("Kernel Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
289		    pbus_type, pbus_exception_type[pbus_type],
290		    fault_addr, frame, frame->tf_cpu);
291#endif
292		goto lose;
293	case T_DATAFLT:
294		/* kernel mode data fault */
295
296		/* data fault on the user address? */
297		if ((frame->tf_dmt0 & DMT_DAS) == 0) {
298			KERNEL_LOCK();
299			goto user_fault;
300		}
301
302		fault_addr = frame->tf_dma0;
303		if (frame->tf_dmt0 & (DMT_WRITE|DMT_LOCKBAR)) {
304			ftype = VM_PROT_READ|VM_PROT_WRITE;
305			fault_code = VM_PROT_WRITE;
306		} else {
307			ftype = VM_PROT_READ;
308			fault_code = VM_PROT_READ;
309		}
310
311		va = trunc_page((vaddr_t)fault_addr);
312
313		KERNEL_LOCK();
314		vm = p->p_vmspace;
315		map = kernel_map;
316
317		pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr);
318#ifdef TRAPDEBUG
319		printf("Kernel Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
320		    pbus_type, pbus_exception_type[pbus_type],
321		    fault_addr, frame, frame->tf_cpu);
322#endif
323
324		switch (pbus_type) {
325		case CMMU_PFSR_SUCCESS:
326			/*
327			 * The fault was resolved. Call data_access_emulation
328			 * to drain the data unit pipe line and reset dmt0
329			 * so that trap won't get called again.
330			 */
331			data_access_emulation((u_int *)frame);
332			frame->tf_dpfsr = 0;
333			frame->tf_dmt0 = 0;
334			KERNEL_UNLOCK();
335			return;
336		case CMMU_PFSR_SFAULT:
337		case CMMU_PFSR_PFAULT:
338			if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
339				p->p_addr->u_pcb.pcb_onfault = 0;
340			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
341			p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
342			/*
343			 * This could be a fault caused in copyout*()
344			 * while accessing kernel space.
345			 */
346			if (result != 0 && pcb_onfault != 0) {
347				frame->tf_snip = pcb_onfault | NIP_V;
348				frame->tf_sfip = (pcb_onfault + 4) | FIP_V;
349				frame->tf_sxip = 0;
350				/*
351				 * Continue as if the fault had been resolved,
352				 * but do not try to complete the faulting
353				 * access.
354				 */
355				frame->tf_dmt0 |= DMT_SKIP;
356				result = 0;
357			}
358			if (result == 0) {
359				/*
360				 * We could resolve the fault. Call
361				 * data_access_emulation to drain the data
362				 * unit pipe line and reset dmt0 so that trap
363				 * won't get called again.
364				 */
365				data_access_emulation((u_int *)frame);
366				frame->tf_dpfsr = 0;
367				frame->tf_dmt0 = 0;
368				KERNEL_UNLOCK();
369				return;
370			}
371			break;
372		}
373#ifdef TRAPDEBUG
374		printf("PBUS Fault %d (%s) va = 0x%x\n", pbus_type,
375		    pbus_exception_type[pbus_type], va);
376#endif
377		KERNEL_UNLOCK();
378		goto lose;
379		/* NOTREACHED */
380	case T_INSTFLT+T_USER:
381		/* User mode instruction access fault */
382		/* FALLTHROUGH */
383	case T_DATAFLT+T_USER:
384		KERNEL_LOCK();
385user_fault:
386		if (type == T_INSTFLT + T_USER) {
387			pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr);
388#ifdef TRAPDEBUG
389			printf("User Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
390			    pbus_type, pbus_exception_type[pbus_type],
391			    fault_addr, frame, frame->tf_cpu);
392#endif
393		} else {
394			fault_addr = frame->tf_dma0;
395			pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr);
396#ifdef TRAPDEBUG
397			printf("User Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
398			    pbus_type, pbus_exception_type[pbus_type],
399			    fault_addr, frame, frame->tf_cpu);
400#endif
401		}
402
403		if (frame->tf_dmt0 & (DMT_WRITE | DMT_LOCKBAR)) {
404			ftype = VM_PROT_READ | VM_PROT_WRITE;
405			fault_code = VM_PROT_WRITE;
406		} else {
407			ftype = VM_PROT_READ;
408			fault_code = VM_PROT_READ;
409		}
410
411		va = trunc_page((vaddr_t)fault_addr);
412
413		vm = p->p_vmspace;
414		map = &vm->vm_map;
415		if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
416			p->p_addr->u_pcb.pcb_onfault = 0;
417
418		/* Call uvm_fault() to resolve non-bus error faults */
419		switch (pbus_type) {
420		case CMMU_PFSR_SUCCESS:
421			result = 0;
422			break;
423		case CMMU_PFSR_BERROR:
424			result = EACCES;
425			break;
426		default:
427			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
428			break;
429		}
430
431		p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
432
433		if ((caddr_t)va >= vm->vm_maxsaddr) {
434			if (result == 0)
435				uvm_grow(p, va);
436			else if (result == EACCES)
437				result = EFAULT;
438		}
439
440		/*
441		 * This could be a fault caused in copyin*()
442		 * while accessing user space.
443		 */
444		if (result != 0 && pcb_onfault != 0) {
445			frame->tf_snip = pcb_onfault | NIP_V;
446			frame->tf_sfip = (pcb_onfault + 4) | FIP_V;
447			frame->tf_sxip = 0;
448			/*
449			 * Continue as if the fault had been resolved, but
450			 * do not try to complete the faulting access.
451			 */
452			frame->tf_dmt0 |= DMT_SKIP;
453			result = 0;
454		}
455
456		if (result == 0) {
457			if (type == T_INSTFLT + T_USER) {
458				/*
459				 * back up SXIP, SNIP,
460				 * clearing the Error bit
461				 */
462				frame->tf_sfip = frame->tf_snip & ~FIP_E;
463				frame->tf_snip = frame->tf_sxip & ~NIP_E;
464				frame->tf_ipfsr = 0;
465			} else {
466				/*
467			 	 * We could resolve the fault. Call
468			 	 * data_access_emulation to drain the data unit
469			 	 * pipe line and reset dmt0 so that trap won't
470			 	 * get called again.
471			 	 */
472				data_access_emulation((u_int *)frame);
473				frame->tf_dpfsr = 0;
474				frame->tf_dmt0 = 0;
475			}
476		} else {
477			sig = result == EACCES ? SIGBUS : SIGSEGV;
478			fault_type = result == EACCES ?
479			    BUS_ADRERR : SEGV_MAPERR;
480		}
481		KERNEL_UNLOCK();
482		break;
483	case T_MISALGNFLT+T_USER:
484		/* Fix any misaligned ld.d or st.d instructions */
485		sig = double_reg_fixup(frame);
486		fault_type = BUS_ADRALN;
487		break;
488	case T_PRIVINFLT+T_USER:
489	case T_ILLFLT+T_USER:
490#ifndef DDB
491	case T_KDB_BREAK:
492	case T_KDB_ENTRY:
493#endif
494	case T_KDB_BREAK+T_USER:
495	case T_KDB_ENTRY+T_USER:
496	case T_KDB_TRACE:
497	case T_KDB_TRACE+T_USER:
498		sig = SIGILL;
499		break;
500	case T_BNDFLT+T_USER:
501		sig = SIGFPE;
502		break;
503	case T_ZERODIV+T_USER:
504		sig = SIGFPE;
505		fault_type = FPE_INTDIV;
506		break;
507	case T_OVFFLT+T_USER:
508		sig = SIGFPE;
509		fault_type = FPE_INTOVF;
510		break;
511	case T_FPEPFLT+T_USER:
512		sig = SIGFPE;
513		break;
514	case T_SIGSYS+T_USER:
515		sig = SIGSYS;
516		break;
517	case T_STEPBPT+T_USER:
518#ifdef PTRACE
519		/*
520		 * This trap is used by the kernel to support single-step
521		 * debugging (although any user could generate this trap
522		 * which should probably be handled differently). When a
523		 * process is continued by a debugger with the PT_STEP
524		 * function of ptrace (single step), the kernel inserts
525		 * one or two breakpoints in the user process so that only
526		 * one instruction (or two in the case of a delayed branch)
527		 * is executed.  When this breakpoint is hit, we get the
528		 * T_STEPBPT trap.
529		 */
530		{
531			u_int instr;
532			vaddr_t pc = PC_REGS(&frame->tf_regs);
533
534			/* read break instruction */
535			copyin((caddr_t)pc, &instr, sizeof(u_int));
536
537			/* check and see if we got here by accident */
538			if ((p->p_md.md_bp0va != pc &&
539			     p->p_md.md_bp1va != pc) ||
540			    instr != SSBREAKPOINT) {
541				sig = SIGTRAP;
542				fault_type = TRAP_TRACE;
543				break;
544			}
545
546			/* restore original instruction and clear breakpoint */
547			if (p->p_md.md_bp0va == pc) {
548				ss_put_value(p, pc, p->p_md.md_bp0save);
549				p->p_md.md_bp0va = 0;
550			}
551			if (p->p_md.md_bp1va == pc) {
552				ss_put_value(p, pc, p->p_md.md_bp1save);
553				p->p_md.md_bp1va = 0;
554			}
555
556#if 1
557			frame->tf_sfip = frame->tf_snip;
558			frame->tf_snip = pc | NIP_V;
559#endif
560			sig = SIGTRAP;
561			fault_type = TRAP_BRKPT;
562		}
563#else
564		sig = SIGTRAP;
565		fault_type = TRAP_TRACE;
566#endif
567		break;
568
569	case T_USERBPT+T_USER:
570		/*
571		 * This trap is meant to be used by debuggers to implement
572		 * breakpoint debugging.  When we get this trap, we just
573		 * return a signal which gets caught by the debugger.
574		 */
575		frame->tf_sfip = frame->tf_snip;
576		frame->tf_snip = frame->tf_sxip;
577		sig = SIGTRAP;
578		fault_type = TRAP_BRKPT;
579		break;
580
581	}
582
583	/*
584	 * If trap from supervisor mode, just return
585	 */
586	if (type < T_USER)
587		return;
588
589	if (sig) {
590		sv.sival_ptr = (void *)fault_addr;
591		KERNEL_LOCK();
592		trapsignal(p, sig, fault_code, fault_type, sv);
593		KERNEL_UNLOCK();
594		/*
595		 * don't want multiple faults - we are going to
596		 * deliver signal.
597		 */
598		frame->tf_dmt0 = 0;
599		frame->tf_ipfsr = frame->tf_dpfsr = 0;
600	}
601
602	userret(p);
603}
604#endif /* M88100 */
605
606#ifdef M88110
607void
608m88110_trap(u_int type, struct trapframe *frame)
609{
610	struct proc *p;
611	struct vm_map *map;
612	vaddr_t va, pcb_onfault;
613	vm_prot_t ftype;
614	int fault_type;
615	u_long fault_code;
616	vaddr_t fault_addr;
617	struct vmspace *vm;
618	union sigval sv;
619	int result;
620#ifdef DDB
621        int s;
622	u_int psr;
623#endif
624	int sig = 0;
625
626	uvmexp.traps++;
627	if ((p = curproc) == NULL)
628		p = &proc0;
629
630	fault_type = SI_NOINFO;
631	fault_code = 0;
632	fault_addr = frame->tf_exip & XIP_ADDR;
633
634	/*
635	 * 88110 errata #16 (4.2) or #3 (5.1.1):
636	 * ``bsr, br, bcnd, jsr and jmp instructions with the .n extension
637	 *   can cause the enip value to be incremented by 4 incorrectly
638	 *   if the instruction in the delay slot is the first word of a
639	 *   page which misses in the mmu and results in a hardware
640	 *   tablewalk which encounters an exception or an invalid
641	 *   descriptor.  The exip value in this case will point to the
642	 *   first word of the page, and the D bit will be set.
643	 *
644	 *   Note: if the instruction is a jsr.n r1, r1 will be overwritten
645	 *   with erroneous data.  Therefore, no recovery is possible. Do
646	 *   not allow this instruction to occupy the last word of a page.
647	 *
648	 *   Suggested fix: recover in general by backing up the exip by 4
649	 *   and clearing the delay bit before an rte when the lower 3 hex
650	 *   digits of the exip are 001.''
651	 */
652	if ((frame->tf_exip & PAGE_MASK) == 0x00000001 && type == T_INSTFLT) {
653		u_int instr;
654
655		/*
656		 * Note that we have initialized fault_addr above, so that
657		 * signals provide the correct address if necessary.
658		 */
659		frame->tf_exip = (frame->tf_exip & ~1) - 4;
660
661		/*
662		 * Check the instruction at the (backed up) exip.
663		 * If it is a jsr.n, abort.
664		 */
665		if (!USERMODE(frame->tf_epsr)) {
666			instr = *(u_int *)fault_addr;
667			if (instr == 0xf400cc01)
668				panic("mc88110 errata #16, exip %p enip %p",
669				    (frame->tf_exip + 4) | 1, frame->tf_enip);
670		} else {
671			/* copyin here should not fail */
672			if (copyin((const void *)frame->tf_exip, &instr,
673			    sizeof instr) == 0 &&
674			    instr == 0xf400cc01) {
675				uprintf("mc88110 errata #16, exip %p enip %p",
676				    (frame->tf_exip + 4) | 1, frame->tf_enip);
677				sig = SIGILL;
678			}
679		}
680	}
681
682	if (USERMODE(frame->tf_epsr)) {
683		type += T_USER;
684		p->p_md.md_tf = frame;	/* for ptrace/signals */
685	}
686
687	if (sig != 0)
688		goto deliver;
689
690	switch (type) {
691	default:
692lose:
693		panictrap(frame->tf_vector, frame);
694		break;
695		/*NOTREACHED*/
696
697#ifdef DEBUG
698	case T_110_DRM+T_USER:
699	case T_110_DRM:
700		printf("DMMU read miss: Hardware Table Searches should be enabled!\n");
701		goto lose;
702	case T_110_DWM+T_USER:
703	case T_110_DWM:
704		printf("DMMU write miss: Hardware Table Searches should be enabled!\n");
705		goto lose;
706	case T_110_IAM+T_USER:
707	case T_110_IAM:
708		printf("IMMU miss: Hardware Table Searches should be enabled!\n");
709		goto lose;
710#endif
711
712#ifdef DDB
713	case T_KDB_TRACE:
714		s = splhigh();
715		set_psr((psr = get_psr()) & ~PSR_IND);
716		ddb_break_trap(T_KDB_TRACE, (db_regs_t*)frame);
717		set_psr(psr);
718		splx(s);
719		return;
720	case T_KDB_BREAK:
721		s = splhigh();
722		set_psr((psr = get_psr()) & ~PSR_IND);
723		ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame);
724		set_psr(psr);
725		splx(s);
726		return;
727	case T_KDB_ENTRY:
728		s = splhigh();
729		set_psr((psr = get_psr()) & ~PSR_IND);
730		ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame);
731		set_psr(psr);
732		/* skip trap instruction */
733		m88110_skip_insn(frame);
734		splx(s);
735		return;
736#endif /* DDB */
737	case T_ILLFLT:
738		/*
739		 * The 88110 seems to trigger an instruction fault in
740		 * supervisor mode when running the following sequence:
741		 *
742		 *	bcnd.n cond, reg, 1f
743		 *	arithmetic insn
744		 *	...
745		 *  	the same exact arithmetic insn
746		 *  1:	another arithmetic insn stalled by the previous one
747		 *	...
748		 *
749		 * The exception is reported with exip pointing to the
750		 * branch address. I don't know, at this point, if there
751		 * is any better workaround than the aggressive one
752		 * implemented below; I don't see how this could relate to
753		 * any of the 88110 errata (although it might be related to
754		 * branch prediction).
755		 *
756		 * For the record, the exact sequence triggering the
757		 * spurious exception is:
758		 *
759		 *	bcnd.n	eq0, r2,  1f
760		 *	 or	r25, r0,  r22
761		 *	bsr	somewhere
762		 *	or	r25, r0,  r22
763		 *  1:	cmp	r13, r25, r20
764		 *
765		 * within the same cache line.
766		 *
767		 * Simply ignoring the exception and returning does not
768		 * cause the exception to disappear. Clearing the
769		 * instruction cache works, but on 88110+88410 systems,
770		 * the 88410 needs to be invalidated as well. (note that
771		 * the size passed to the flush routines does not matter
772		 * since there is no way to flush a subset of the 88110
773		 * I$ anyway)
774		 */
775	    {
776		extern void *kernel_text, *etext;
777
778		if (fault_addr >= (vaddr_t)&kernel_text &&
779		    fault_addr < (vaddr_t)&etext) {
780			cmmu_icache_inv(curcpu()->ci_cpuid,
781			    trunc_page(fault_addr), PAGE_SIZE);
782			cmmu_cache_wbinv(curcpu()->ci_cpuid,
783			    trunc_page(fault_addr), PAGE_SIZE);
784			return;
785		}
786	    }
787		goto lose;
788	case T_MISALGNFLT:
789		printf("kernel misaligned access exception @%p\n",
790		    frame->tf_exip);
791		goto lose;
792	case T_INSTFLT:
793		/* kernel mode instruction access fault.
794		 * Should never, never happen for a non-paged kernel.
795		 */
796#ifdef TRAPDEBUG
797		printf("Kernel Instruction fault exip %x isr %x ilar %x\n",
798		    frame->tf_exip, frame->tf_isr, frame->tf_ilar);
799#endif
800		goto lose;
801
802	case T_DATAFLT:
803		/* kernel mode data fault */
804
805		/* data fault on the user address? */
806		if ((frame->tf_dsr & CMMU_DSR_SU) == 0) {
807			KERNEL_LOCK();
808			goto m88110_user_fault;
809		}
810
811#ifdef TRAPDEBUG
812		printf("Kernel Data access fault exip %x dsr %x dlar %x\n",
813		    frame->tf_exip, frame->tf_dsr, frame->tf_dlar);
814#endif
815
816		fault_addr = frame->tf_dlar;
817		if (frame->tf_dsr & CMMU_DSR_RW) {
818			ftype = VM_PROT_READ;
819			fault_code = VM_PROT_READ;
820		} else {
821			ftype = VM_PROT_READ|VM_PROT_WRITE;
822			fault_code = VM_PROT_WRITE;
823		}
824
825		va = trunc_page((vaddr_t)fault_addr);
826
827		KERNEL_LOCK();
828		vm = p->p_vmspace;
829		map = kernel_map;
830
831		if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) {
832			/*
833			 * On a segment or a page fault, call uvm_fault() to
834			 * resolve the fault.
835			 */
836			if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
837				p->p_addr->u_pcb.pcb_onfault = 0;
838			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
839			p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
840			/*
841			 * This could be a fault caused in copyout*()
842			 * while accessing kernel space.
843			 */
844			if (result != 0 && pcb_onfault != 0) {
845				frame->tf_exip = pcb_onfault;
846				/*
847				 * Continue as if the fault had been resolved.
848				 */
849				result = 0;
850			}
851			if (result == 0) {
852				KERNEL_UNLOCK();
853				return;
854			}
855		}
856		KERNEL_UNLOCK();
857		goto lose;
858	case T_INSTFLT+T_USER:
859		/* User mode instruction access fault */
860		/* FALLTHROUGH */
861	case T_DATAFLT+T_USER:
862		KERNEL_LOCK();
863m88110_user_fault:
864		if (type == T_INSTFLT+T_USER) {
865			ftype = VM_PROT_READ;
866			fault_code = VM_PROT_READ;
867#ifdef TRAPDEBUG
868			printf("User Instruction fault exip %x isr %x ilar %x\n",
869			    frame->tf_exip, frame->tf_isr, frame->tf_ilar);
870#endif
871		} else {
872			fault_addr = frame->tf_dlar;
873			if (frame->tf_dsr & CMMU_DSR_RW) {
874				ftype = VM_PROT_READ;
875				fault_code = VM_PROT_READ;
876			} else {
877				ftype = VM_PROT_READ|VM_PROT_WRITE;
878				fault_code = VM_PROT_WRITE;
879			}
880#ifdef TRAPDEBUG
881			printf("User Data access fault exip %x dsr %x dlar %x\n",
882			    frame->tf_exip, frame->tf_dsr, frame->tf_dlar);
883#endif
884		}
885
886		va = trunc_page((vaddr_t)fault_addr);
887
888		vm = p->p_vmspace;
889		map = &vm->vm_map;
890		if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
891			p->p_addr->u_pcb.pcb_onfault = 0;
892
893		/*
894		 * Call uvm_fault() to resolve non-bus error faults
895		 * whenever possible.
896		 */
897		if (type == T_INSTFLT+T_USER) {
898			/* instruction faults */
899			if (frame->tf_isr &
900			    (CMMU_ISR_BE | CMMU_ISR_SP | CMMU_ISR_TBE)) {
901				/* bus error, supervisor protection */
902				result = EACCES;
903			} else
904			if (frame->tf_isr & (CMMU_ISR_SI | CMMU_ISR_PI)) {
905				/* segment or page fault */
906				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
907			} else {
908#ifdef TRAPDEBUG
909				printf("Unexpected Instruction fault isr %x\n",
910				    frame->tf_isr);
911#endif
912				KERNEL_UNLOCK();
913				goto lose;
914			}
915		} else {
916			/* data faults */
917			if (frame->tf_dsr & CMMU_DSR_BE) {
918				/* bus error */
919				result = EACCES;
920			} else
921			if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) {
922				/* segment or page fault */
923				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
924			} else
925			if (frame->tf_dsr & (CMMU_DSR_CP | CMMU_DSR_WA)) {
926				/* copyback or write allocate error */
927				result = EACCES;
928			} else
929			if (frame->tf_dsr & CMMU_DSR_WE) {
930				/* write fault  */
931				/* This could be a write protection fault or an
932				 * exception to set the used and modified bits
933				 * in the pte. Basically, if we got a write
934				 * error, then we already have a pte entry that
935				 * faulted in from a previous seg fault or page
936				 * fault.
937				 * Get the pte and check the status of the
938				 * modified and valid bits to determine if this
939				 * indeed a real write fault.  XXX smurph
940				 */
941				if (pmap_set_modify(map->pmap, va)) {
942#ifdef TRAPDEBUG
943					printf("Corrected userland write fault, pmap %p va %p\n",
944					    map->pmap, va);
945#endif
946					result = 0;
947				} else {
948					/* must be a real wp fault */
949#ifdef TRAPDEBUG
950					printf("Uncorrected userland write fault, pmap %p va %p\n",
951					    map->pmap, va);
952#endif
953					result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
954				}
955			} else {
956#ifdef TRAPDEBUG
957				printf("Unexpected Data access fault dsr %x\n",
958				    frame->tf_dsr);
959#endif
960				KERNEL_UNLOCK();
961				goto lose;
962			}
963		}
964		p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
965
966		if ((caddr_t)va >= vm->vm_maxsaddr) {
967			if (result == 0)
968				uvm_grow(p, va);
969			else if (result == EACCES)
970				result = EFAULT;
971		}
972		KERNEL_UNLOCK();
973
974		/*
975		 * This could be a fault caused in copyin*()
976		 * while accessing user space.
977		 */
978		if (result != 0 && pcb_onfault != 0) {
979			frame->tf_exip = pcb_onfault;
980			/*
981			 * Continue as if the fault had been resolved.
982			 */
983			result = 0;
984		}
985
986		if (result != 0) {
987			sig = result == EACCES ? SIGBUS : SIGSEGV;
988			fault_type = result == EACCES ?
989			    BUS_ADRERR : SEGV_MAPERR;
990		}
991		break;
992	case T_MISALGNFLT+T_USER:
993		/* Fix any misaligned ld.d or st.d instructions */
994		sig = double_reg_fixup(frame);
995		fault_type = BUS_ADRALN;
996		if (sig == 0) {
997			/* skip recovered instruction */
998			m88110_skip_insn(frame);
999			goto userexit;
1000		}
1001		break;
1002	case T_PRIVINFLT+T_USER:
1003		fault_type = ILL_PRVREG;
1004		/* FALLTHROUGH */
1005	case T_ILLFLT+T_USER:
1006#ifndef DDB
1007	case T_KDB_BREAK:
1008	case T_KDB_ENTRY:
1009	case T_KDB_TRACE:
1010#endif
1011	case T_KDB_BREAK+T_USER:
1012	case T_KDB_ENTRY+T_USER:
1013	case T_KDB_TRACE+T_USER:
1014		sig = SIGILL;
1015		break;
1016	case T_BNDFLT+T_USER:
1017		sig = SIGFPE;
1018		/* skip trap instruction */
1019		m88110_skip_insn(frame);
1020		break;
1021	case T_ZERODIV+T_USER:
1022		sig = SIGFPE;
1023		fault_type = FPE_INTDIV;
1024		/* skip trap instruction */
1025		m88110_skip_insn(frame);
1026		break;
1027	case T_OVFFLT+T_USER:
1028		sig = SIGFPE;
1029		fault_type = FPE_INTOVF;
1030		/* skip trap instruction */
1031		m88110_skip_insn(frame);
1032		break;
1033	case T_FPEPFLT+T_USER:
1034		m88110_fpu_exception(frame);
1035		goto userexit;
1036	case T_SIGSYS+T_USER:
1037		sig = SIGSYS;
1038		break;
1039	case T_STEPBPT+T_USER:
1040#ifdef PTRACE
1041		/*
1042		 * This trap is used by the kernel to support single-step
1043		 * debugging (although any user could generate this trap
1044		 * which should probably be handled differently). When a
1045		 * process is continued by a debugger with the PT_STEP
1046		 * function of ptrace (single step), the kernel inserts
1047		 * one or two breakpoints in the user process so that only
1048		 * one instruction (or two in the case of a delayed branch)
1049		 * is executed.  When this breakpoint is hit, we get the
1050		 * T_STEPBPT trap.
1051		 */
1052		{
1053			u_int instr;
1054			vaddr_t pc = PC_REGS(&frame->tf_regs);
1055
1056			/* read break instruction */
1057			copyin((caddr_t)pc, &instr, sizeof(u_int));
1058
1059			/* check and see if we got here by accident */
1060			if ((p->p_md.md_bp0va != pc &&
1061			     p->p_md.md_bp1va != pc) ||
1062			    instr != SSBREAKPOINT) {
1063				sig = SIGTRAP;
1064				fault_type = TRAP_TRACE;
1065				break;
1066			}
1067
1068			/* restore original instruction and clear breakpoint */
1069			if (p->p_md.md_bp0va == pc) {
1070				ss_put_value(p, pc, p->p_md.md_bp0save);
1071				p->p_md.md_bp0va = 0;
1072			}
1073			if (p->p_md.md_bp1va == pc) {
1074				ss_put_value(p, pc, p->p_md.md_bp1save);
1075				p->p_md.md_bp1va = 0;
1076			}
1077
1078			sig = SIGTRAP;
1079			fault_type = TRAP_BRKPT;
1080		}
1081#else
1082		sig = SIGTRAP;
1083		fault_type = TRAP_TRACE;
1084#endif
1085		break;
1086	case T_USERBPT+T_USER:
1087		/*
1088		 * This trap is meant to be used by debuggers to implement
1089		 * breakpoint debugging.  When we get this trap, we just
1090		 * return a signal which gets caught by the debugger.
1091		 */
1092		sig = SIGTRAP;
1093		fault_type = TRAP_BRKPT;
1094		break;
1095	}
1096
1097	/*
1098	 * If trap from supervisor mode, just return
1099	 */
1100	if (type < T_USER)
1101		return;
1102
1103	if (sig) {
1104deliver:
1105		sv.sival_ptr = (void *)fault_addr;
1106		KERNEL_LOCK();
1107		trapsignal(p, sig, fault_code, fault_type, sv);
1108		KERNEL_UNLOCK();
1109	}
1110
1111userexit:
1112	userret(p);
1113}
1114#endif /* M88110 */
1115
1116__dead void
1117error_fatal(struct trapframe *frame)
1118{
1119	if (frame->tf_vector == 0)
1120		printf("\nCPU %d Reset Exception\n", cpu_number());
1121	else
1122		printf("\nCPU %d Error Exception\n", cpu_number());
1123
1124#ifdef DDB
1125	regdump((struct trapframe*)frame);
1126#endif
1127	panic("unrecoverable exception %d", frame->tf_vector);
1128}
1129
1130#ifdef M88100
1131void
1132m88100_syscall(register_t code, struct trapframe *tf)
1133{
1134	int i, nsys, nap;
1135	struct sysent *callp;
1136	struct proc *p = curproc;
1137	int error;
1138	register_t args[8], rval[2], *ap;
1139
1140	uvmexp.syscalls++;
1141
1142	callp = p->p_emul->e_sysent;
1143	nsys  = p->p_emul->e_nsysent;
1144
1145	p->p_md.md_tf = tf;
1146
1147	/*
1148	 * For 88k, all the arguments are passed in the registers (r2-r9),
1149	 * and further arguments (if any) on stack.
1150	 * For syscall (and __syscall), r2 (and r3) has the actual code.
1151	 * __syscall  takes a quad syscall number, so that other
1152	 * arguments are at their natural alignments.
1153	 */
1154	ap = &tf->tf_r[2];
1155	nap = 8; /* r2-r9 */
1156
1157	switch (code) {
1158	case SYS_syscall:
1159		code = *ap++;
1160		nap--;
1161		break;
1162	case SYS___syscall:
1163		if (callp != sysent)
1164			break;
1165		code = ap[_QUAD_LOWWORD];
1166		ap += 2;
1167		nap -= 2;
1168		break;
1169	}
1170
1171	if (code < 0 || code >= nsys)
1172		callp += p->p_emul->e_nosys;
1173	else
1174		callp += code;
1175
1176	i = callp->sy_argsize / sizeof(register_t);
1177	if (i > sizeof(args) / sizeof(register_t))
1178		panic("syscall nargs");
1179	if (i > nap) {
1180		bcopy((caddr_t)ap, (caddr_t)args, nap * sizeof(register_t));
1181		if ((error = copyin((caddr_t)tf->tf_r[31], args + nap,
1182		    (i - nap) * sizeof(register_t))))
1183			goto bad;
1184	} else
1185		bcopy((caddr_t)ap, (caddr_t)args, i * sizeof(register_t));
1186
1187	rval[0] = 0;
1188	rval[1] = tf->tf_r[3];
1189
1190	error = mi_syscall(p, code, callp, args, rval);
1191
1192	/*
1193	 * system call will look like:
1194	 *	 or r13, r0, <code>
1195	 *       tb0 0, r0, <128> <- sxip
1196	 *	 br err 	  <- snip
1197	 *       jmp r1 	  <- sfip
1198	 *  err: or.u r3, r0, hi16(errno)
1199	 *	 st r2, r3, lo16(errno)
1200	 *	 subu r2, r0, 1
1201	 *	 jmp r1
1202	 *
1203	 * So, when we take syscall trap, sxip/snip/sfip will be as
1204	 * shown above.
1205	 * Given this,
1206	 * 1. If the system call returned 0, need to skip nip.
1207	 *	nip = fip, fip += 4
1208	 *    (doesn't matter what fip + 4 will be but we will never
1209	 *    execute this since jmp r1 at nip will change the execution flow.)
1210	 * 2. If the system call returned an errno > 0, plug the value
1211	 *    in r2, and leave nip and fip unchanged. This will have us
1212	 *    executing "br err" on return to user space.
1213	 * 3. If the system call code returned ERESTART,
1214	 *    we need to rexecute the trap instruction. Back up the pipe
1215	 *    line.
1216	 *     fip = nip, nip = xip
1217	 * 4. If the system call returned EJUSTRETURN, don't need to adjust
1218	 *    any pointers.
1219	 */
1220
1221	switch (error) {
1222	case 0:
1223		tf->tf_r[2] = rval[0];
1224		tf->tf_r[3] = rval[1];
1225		tf->tf_epsr &= ~PSR_C;
1226		tf->tf_snip = tf->tf_sfip & ~NIP_E;
1227		tf->tf_sfip = tf->tf_snip + 4;
1228		break;
1229	case ERESTART:
1230		tf->tf_epsr &= ~PSR_C;
1231		tf->tf_sfip = tf->tf_snip & ~FIP_E;
1232		tf->tf_snip = tf->tf_sxip & ~NIP_E;
1233		break;
1234	case EJUSTRETURN:
1235		tf->tf_epsr &= ~PSR_C;
1236		break;
1237	default:
1238	bad:
1239		if (p->p_emul->e_errno)
1240			error = p->p_emul->e_errno[error];
1241		tf->tf_r[2] = error;
1242		tf->tf_epsr |= PSR_C;   /* fail */
1243		tf->tf_snip = tf->tf_snip & ~NIP_E;
1244		tf->tf_sfip = tf->tf_sfip & ~FIP_E;
1245		break;
1246	}
1247
1248	mi_syscall_return(p, code, error, rval);
1249}
1250#endif /* M88100 */
1251
1252#ifdef M88110
1253/* Instruction pointers operate differently on mc88110 */
1254void
1255m88110_syscall(register_t code, struct trapframe *tf)
1256{
1257	int i, nsys, nap;
1258	struct sysent *callp;
1259	struct proc *p = curproc;
1260	int error;
1261	register_t args[8], rval[2], *ap;
1262
1263	uvmexp.syscalls++;
1264
1265	callp = p->p_emul->e_sysent;
1266	nsys  = p->p_emul->e_nsysent;
1267
1268	p->p_md.md_tf = tf;
1269
1270	/*
1271	 * For 88k, all the arguments are passed in the registers (r2-r9),
1272	 * and further arguments (if any) on stack.
1273	 * For syscall (and __syscall), r2 (and r3) has the actual code.
1274	 * __syscall  takes a quad syscall number, so that other
1275	 * arguments are at their natural alignments.
1276	 */
1277	ap = &tf->tf_r[2];
1278	nap = 8;	/* r2-r9 */
1279
1280	switch (code) {
1281	case SYS_syscall:
1282		code = *ap++;
1283		nap--;
1284		break;
1285	case SYS___syscall:
1286		if (callp != sysent)
1287			break;
1288		code = ap[_QUAD_LOWWORD];
1289		ap += 2;
1290		nap -= 2;
1291		break;
1292	}
1293
1294	if (code < 0 || code >= nsys)
1295		callp += p->p_emul->e_nosys;
1296	else
1297		callp += code;
1298
1299	i = callp->sy_argsize / sizeof(register_t);
1300	if (i > sizeof(args) / sizeof(register_t))
1301		panic("syscall nargs");
1302	if (i > nap) {
1303		bcopy((caddr_t)ap, (caddr_t)args, nap * sizeof(register_t));
1304		if ((error = copyin((caddr_t)tf->tf_r[31], args + nap,
1305		    (i - nap) * sizeof(register_t))))
1306			goto bad;
1307	} else
1308		bcopy((caddr_t)ap, (caddr_t)args, i * sizeof(register_t));
1309
1310	rval[0] = 0;
1311	rval[1] = tf->tf_r[3];
1312
1313	error = mi_syscall(p, code, callp, args, rval);
1314
1315	/*
1316	 * system call will look like:
1317	 *	 or r13, r0, <code>
1318	 *       tb0 0, r0, <128> <- exip
1319	 *	 br err 	  <- enip
1320	 *       jmp r1
1321	 *  err: or.u r3, r0, hi16(errno)
1322	 *	 st r2, r3, lo16(errno)
1323	 *	 subu r2, r0, 1
1324	 *	 jmp r1
1325	 *
1326	 * So, when we take syscall trap, exip/enip will be as
1327	 * shown above.
1328	 * Given this,
1329	 * 1. If the system call returned 0, need to jmp r1.
1330	 *    exip += 8
1331	 * 2. If the system call returned an errno > 0, increment
1332	 *    exip += 4 and plug the value in r2. This will have us
1333	 *    executing "br err" on return to user space.
1334	 * 3. If the system call code returned ERESTART,
1335	 *    we need to rexecute the trap instruction. leave exip as is.
1336	 * 4. If the system call returned EJUSTRETURN, just return.
1337	 *    exip += 4
1338	 */
1339
1340	switch (error) {
1341	case 0:
1342		tf->tf_r[2] = rval[0];
1343		tf->tf_r[3] = rval[1];
1344		tf->tf_epsr &= ~PSR_C;
1345		/* skip two instructions */
1346		m88110_skip_insn(tf);
1347		m88110_skip_insn(tf);
1348		break;
1349	case ERESTART:
1350		/*
1351		 * Reexecute the trap.
1352		 * exip is already at the trap instruction, so
1353		 * there is nothing to do.
1354		 */
1355		tf->tf_epsr &= ~PSR_C;
1356		break;
1357	case EJUSTRETURN:
1358		tf->tf_epsr &= ~PSR_C;
1359		/* skip one instruction */
1360		m88110_skip_insn(tf);
1361		break;
1362	default:
1363	bad:
1364		if (p->p_emul->e_errno)
1365			error = p->p_emul->e_errno[error];
1366		tf->tf_r[2] = error;
1367		tf->tf_epsr |= PSR_C;   /* fail */
1368		/* skip one instruction */
1369		m88110_skip_insn(tf);
1370		break;
1371	}
1372
1373	mi_syscall_return(p, code, error, rval);
1374}
1375#endif	/* M88110 */
1376
1377/*
1378 * Set up return-value registers as fork() libc stub expects,
1379 * and do normal return-to-user-mode stuff.
1380 */
1381void
1382child_return(arg)
1383	void *arg;
1384{
1385	struct proc *p = arg;
1386	struct trapframe *tf;
1387
1388	tf = (struct trapframe *)USER_REGS(p);
1389	tf->tf_r[2] = 0;
1390	tf->tf_r[3] = 0;
1391	tf->tf_epsr &= ~PSR_C;
1392	/* skip br instruction as in syscall() */
1393#ifdef M88100
1394	if (CPU_IS88100) {
1395		tf->tf_snip = (tf->tf_sfip & XIP_ADDR) | XIP_V;
1396		tf->tf_sfip = tf->tf_snip + 4;
1397	}
1398#endif
1399#ifdef M88110
1400	if (CPU_IS88110) {
1401		/* skip two instructions */
1402		m88110_skip_insn(tf);
1403		m88110_skip_insn(tf);
1404	}
1405#endif
1406
1407	KERNEL_UNLOCK();
1408
1409	mi_child_return(p);
1410}
1411
1412#ifdef PTRACE
1413
1414/*
1415 * User Single Step Debugging Support
1416 */
1417
1418#include <sys/ptrace.h>
1419
1420vaddr_t	ss_branch_taken(u_int, vaddr_t, struct reg *);
1421int	ss_get_value(struct proc *, vaddr_t, u_int *);
1422int	ss_inst_branch_or_call(u_int);
1423int	ss_put_breakpoint(struct proc *, vaddr_t, vaddr_t *, u_int *);
1424
1425#define	SYSCALL_INSTR	0xf000d080	/* tb0 0,r0,128 */
1426
1427int
1428ss_get_value(struct proc *p, vaddr_t addr, u_int *value)
1429{
1430	struct uio uio;
1431	struct iovec iov;
1432
1433	iov.iov_base = (caddr_t)value;
1434	iov.iov_len = sizeof(u_int);
1435	uio.uio_iov = &iov;
1436	uio.uio_iovcnt = 1;
1437	uio.uio_offset = (off_t)addr;
1438	uio.uio_resid = sizeof(u_int);
1439	uio.uio_segflg = UIO_SYSSPACE;
1440	uio.uio_rw = UIO_READ;
1441	uio.uio_procp = curproc;
1442	return (process_domem(curproc, p, &uio, PT_READ_I));
1443}
1444
1445int
1446ss_put_value(struct proc *p, vaddr_t addr, u_int value)
1447{
1448	struct uio uio;
1449	struct iovec iov;
1450
1451	iov.iov_base = (caddr_t)&value;
1452	iov.iov_len = sizeof(u_int);
1453	uio.uio_iov = &iov;
1454	uio.uio_iovcnt = 1;
1455	uio.uio_offset = (off_t)addr;
1456	uio.uio_resid = sizeof(u_int);
1457	uio.uio_segflg = UIO_SYSSPACE;
1458	uio.uio_rw = UIO_WRITE;
1459	uio.uio_procp = curproc;
1460	return (process_domem(curproc, p, &uio, PT_WRITE_I));
1461}
1462
1463/*
1464 * ss_branch_taken(instruction, pc, regs)
1465 *
1466 * instruction will be a control flow instruction location at address pc.
1467 * Branch taken is supposed to return the address to which the instruction
1468 * would jump if the branch is taken.
1469 *
1470 * This is different from branch_taken() in ddb, as we also need to process
1471 * system calls.
1472 */
1473vaddr_t
1474ss_branch_taken(u_int inst, vaddr_t pc, struct reg *regs)
1475{
1476	u_int regno;
1477
1478	/*
1479	 * Quick check of the instruction. Note that we know we are only
1480	 * invoked if ss_inst_branch_or_call() returns TRUE, so we do not
1481	 * need to repeat the jpm, jsr and syscall stricter checks here.
1482	 */
1483	switch (inst >> (32 - 5)) {
1484	case 0x18:	/* br */
1485	case 0x19:	/* bsr */
1486		/* signed 26 bit pc relative displacement, shift left 2 bits */
1487		inst = (inst & 0x03ffffff) << 2;
1488		/* check if sign extension is needed */
1489		if (inst & 0x08000000)
1490			inst |= 0xf0000000;
1491		return (pc + inst);
1492
1493	case 0x1a:	/* bb0 */
1494	case 0x1b:	/* bb1 */
1495	case 0x1d:	/* bcnd */
1496		/* signed 16 bit pc relative displacement, shift left 2 bits */
1497		inst = (inst & 0x0000ffff) << 2;
1498		/* check if sign extension is needed */
1499		if (inst & 0x00020000)
1500			inst |= 0xfffc0000;
1501		return (pc + inst);
1502
1503	case 0x1e:	/* jmp or jsr */
1504		regno = inst & 0x1f;	/* get the register value */
1505		return (regno == 0 ? 0 : regs->r[regno]);
1506
1507	default:	/* system call */
1508		/*
1509		 * The regular (pc + 4) breakpoint will match the error
1510		 * return. Successful system calls return at (pc + 8),
1511		 * so we'll set up a branch breakpoint there.
1512		 */
1513		return (pc + 8);
1514	}
1515}
1516
1517int
1518ss_inst_branch_or_call(u_int ins)
1519{
1520	/* check high five bits */
1521	switch (ins >> (32 - 5)) {
1522	case 0x18: /* br */
1523	case 0x19: /* bsr */
1524	case 0x1a: /* bb0 */
1525	case 0x1b: /* bb1 */
1526	case 0x1d: /* bcnd */
1527		return (TRUE);
1528	case 0x1e: /* could be jmp or jsr */
1529		if ((ins & 0xfffff3e0) == 0xf400c000)
1530			return (TRUE);
1531	}
1532
1533	return (FALSE);
1534}
1535
1536int
1537ss_put_breakpoint(struct proc *p, vaddr_t va, vaddr_t *bpva, u_int *bpsave)
1538{
1539	int rc;
1540
1541	/* Restore previous breakpoint if we did not trigger it. */
1542	if (*bpva != 0) {
1543		ss_put_value(p, *bpva, *bpsave);
1544		*bpva = 0;
1545	}
1546
1547	/* Save instruction. */
1548	if ((rc = ss_get_value(p, va, bpsave)) != 0)
1549		return (rc);
1550
1551	/* Store breakpoint instruction at the location now. */
1552	*bpva = va;
1553	return (ss_put_value(p, va, SSBREAKPOINT));
1554}
1555
1556int
1557process_sstep(struct proc *p, int sstep)
1558{
1559	struct reg *sstf = USER_REGS(p);
1560	vaddr_t pc, brpc;
1561	u_int32_t instr;
1562	int rc;
1563
1564	if (sstep == 0) {
1565		/* Restore previous breakpoints if any. */
1566		if (p->p_md.md_bp0va != 0) {
1567			ss_put_value(p, p->p_md.md_bp0va, p->p_md.md_bp0save);
1568			p->p_md.md_bp0va = 0;
1569		}
1570		if (p->p_md.md_bp1va != 0) {
1571			ss_put_value(p, p->p_md.md_bp1va, p->p_md.md_bp1save);
1572			p->p_md.md_bp1va = 0;
1573		}
1574
1575		return (0);
1576	}
1577
1578	/*
1579	 * User was stopped at pc, e.g. the instruction at pc was not executed.
1580	 * Fetch what's at the current location.
1581	 */
1582	pc = PC_REGS(sstf);
1583	if ((rc = ss_get_value(p, pc, &instr)) != 0)
1584		return (rc);
1585
1586	/*
1587	 * Find if this instruction may cause a branch, and set up a breakpoint
1588	 * at the branch location.
1589	 */
1590	if (ss_inst_branch_or_call(instr) || instr == SYSCALL_INSTR) {
1591		brpc = ss_branch_taken(instr, pc, sstf);
1592
1593		/* self-branches are hopeless */
1594		if (brpc != pc && brpc != 0) {
1595			if ((rc = ss_put_breakpoint(p, brpc,
1596			    &p->p_md.md_bp1va, &p->p_md.md_bp1save)) != 0)
1597				return (rc);
1598		}
1599	}
1600
1601	if ((rc = ss_put_breakpoint(p, pc + 4,
1602	    &p->p_md.md_bp0va, &p->p_md.md_bp0save)) != 0)
1603		return (rc);
1604
1605	return (0);
1606}
1607
1608#endif	/* PTRACE */
1609
1610#ifdef DIAGNOSTIC
1611void
1612splassert_check(int wantipl, const char *func)
1613{
1614	int oldipl;
1615
1616	oldipl = getipl();
1617
1618	if (oldipl < wantipl) {
1619		splassert_fail(wantipl, oldipl, func);
1620		/*
1621		 * This will raise the spl,
1622		 * in a feeble attempt to reduce further damage.
1623		 */
1624		(void)raiseipl(wantipl);
1625	}
1626}
1627#endif
1628
1629/*
1630 * ld.d and st.d instructions referencing long aligned but not long long
1631 * aligned addresses will trigger a misaligned address exception.
1632 *
1633 * This routine attempts to recover these (valid) statements, by simulating
1634 * the split form of the instruction. If it fails, it returns the appropriate
1635 * signal number to deliver.
1636 *
1637 * Note that we do not attempt to do anything for .d.usr instructions - the
1638 * kernel never issues such instructions, and they cause a privileged
1639 * instruction exception from userland.
1640 */
1641int
1642double_reg_fixup(struct trapframe *frame)
1643{
1644	u_int32_t pc, instr, value;
1645	int regno, store;
1646	vaddr_t addr;
1647
1648	/*
1649	 * Decode the faulting instruction.
1650	 */
1651
1652	pc = PC_REGS(&frame->tf_regs);
1653	if (copyin((void *)pc, &instr, sizeof(u_int32_t)) != 0)
1654		return SIGSEGV;
1655
1656	switch (instr & 0xfc00ff00) {
1657	case 0xf4001000:	/* ld.d rD, rS1, rS2 */
1658		addr = frame->tf_r[(instr >> 16) & 0x1f]
1659		    + frame->tf_r[(instr & 0x1f)];
1660		store = 0;
1661		break;
1662	case 0xf4002000:	/* st.d rD, rS1, rS2 */
1663		addr = frame->tf_r[(instr >> 16) & 0x1f]
1664		    + frame->tf_r[(instr & 0x1f)];
1665		store = 1;
1666		break;
1667	default:
1668		switch (instr >> 26) {
1669		case 0x10000000 >> 26:	/* ld.d rD, rS, imm16 */
1670			addr = (instr & 0x0000ffff) +
1671			    frame->tf_r[(instr >> 16) & 0x1f];
1672			store = 0;
1673			break;
1674		case 0x20000000 >> 26:	/* st.d rD, rS, imm16 */
1675			addr = (instr & 0x0000ffff) +
1676			    frame->tf_r[(instr >> 16) & 0x1f];
1677			store = 1;
1678			break;
1679		default:
1680			return SIGBUS;
1681		}
1682		break;
1683	}
1684
1685	/* We only handle long but not long long aligned access here */
1686	if ((addr & 0x07) != 4)
1687		return SIGBUS;
1688
1689	regno = (instr >> 21) & 0x1f;
1690
1691	if (store) {
1692		/*
1693		 * Two word stores.
1694		 */
1695		if (regno == 0)
1696			value = 0;
1697		else
1698			value = frame->tf_r[regno];
1699		if (copyout(&value, (void *)addr, sizeof(u_int32_t)) != 0)
1700			return SIGSEGV;
1701		if (regno == 31)
1702			value = 0;
1703		else
1704			value = frame->tf_r[regno + 1];
1705		if (copyout(&value, (void *)(addr + 4), sizeof(u_int32_t)) != 0)
1706			return SIGSEGV;
1707	} else {
1708		/*
1709		 * Two word loads. r0 should be left unaltered, but the
1710		 * value should still be fetched even if it is discarded.
1711		 */
1712		if (copyin((void *)addr, &value, sizeof(u_int32_t)) != 0)
1713			return SIGSEGV;
1714		if (regno != 0)
1715			frame->tf_r[regno] = value;
1716		if (copyin((void *)(addr + 4), &value, sizeof(u_int32_t)) != 0)
1717			return SIGSEGV;
1718		if (regno != 31)
1719			frame->tf_r[regno + 1] = value;
1720	}
1721
1722	return 0;
1723}
1724
1725void
1726cache_flush(struct trapframe *tf)
1727{
1728	struct proc *p = curproc;
1729	struct pmap *pmap;
1730	paddr_t pa;
1731	vaddr_t va;
1732	vsize_t len, count;
1733
1734	p->p_md.md_tf = tf;
1735
1736	pmap = vm_map_pmap(&p->p_vmspace->vm_map);
1737	va = tf->tf_r[2];
1738	len = tf->tf_r[3];
1739
1740	if (/* va < VM_MIN_ADDRESS || */ va >= VM_MAXUSER_ADDRESS ||
1741	    va + len <= va || va + len >= VM_MAXUSER_ADDRESS)
1742		len = 0;
1743
1744	while (len != 0) {
1745		count = min(len, PAGE_SIZE - (va & PAGE_MASK));
1746		if (pmap_extract(pmap, va, &pa) != FALSE)
1747			dma_cachectl(pa, count, DMA_CACHE_SYNC);
1748		va += count;
1749		len -= count;
1750	}
1751
1752#ifdef M88100
1753	if (CPU_IS88100) {
1754		tf->tf_snip = tf->tf_snip & ~NIP_E;
1755		tf->tf_sfip = tf->tf_sfip & ~FIP_E;
1756	}
1757#endif
1758#ifdef M88110
1759	if (CPU_IS88110) {
1760		/* skip instruction */
1761		m88110_skip_insn(tf);
1762	}
1763#endif
1764
1765	userret(p);
1766}
1767