trap.c revision 1.92
1/*	$OpenBSD: trap.c,v 1.92 2014/05/10 05:33:00 guenther Exp $	*/
2/*
3 * Copyright (c) 2004, Miodrag Vallat.
4 * Copyright (c) 1998 Steve Murphree, Jr.
5 * Copyright (c) 1996 Nivas Madhur
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *      This product includes software developed by Nivas Madhur.
19 * 4. The name of the author may not be used to endorse or promote products
20 *    derived from this software without specific prior written permission
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34/*
35 * Mach Operating System
36 * Copyright (c) 1991 Carnegie Mellon University
37 * Copyright (c) 1991 OMRON Corporation
38 * All Rights Reserved.
39 *
40 * Permission to use, copy, modify and distribute this software and its
41 * documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 */
47
48#include <sys/types.h>
49#include <sys/param.h>
50#include <sys/proc.h>
51#include <sys/signalvar.h>
52#include <sys/user.h>
53#include <sys/syscall.h>
54#include <sys/syscall_mi.h>
55#include <sys/systm.h>
56
57#include <uvm/uvm_extern.h>
58
59#include <machine/asm_macro.h>
60#include <machine/cmmu.h>
61#include <machine/cpu.h>
62#ifdef M88100
63#include <machine/m88100.h>
64#include <machine/m8820x.h>
65#endif
66#ifdef M88110
67#include <machine/m88110.h>
68#endif
69#include <machine/fpu.h>
70#include <machine/pcb.h>
71#include <machine/psl.h>
72#include <machine/trap.h>
73
74#include <machine/db_machdep.h>
75
76#define SSBREAKPOINT (0xF000D1F8U) /* Single Step Breakpoint */
77
78#define USERMODE(PSR)   (((PSR) & PSR_MODE) == 0)
79#define SYSTEMMODE(PSR) (((PSR) & PSR_MODE) != 0)
80
81void printtrap(int, struct trapframe *);
82__dead void panictrap(int, struct trapframe *);
83__dead void error_fatal(struct trapframe *);
84int double_reg_fixup(struct trapframe *, int);
85int ss_put_value(struct proc *, vaddr_t, u_int);
86
87extern void regdump(struct trapframe *f);
88
89const char *trap_type[] = {
90	"Reset",
91	"Interrupt Exception",
92	"Instruction Access",
93	"Data Access Exception",
94	"Misaligned Access",
95	"Unimplemented Opcode",
96	"Privilege Violation"
97	"Bounds Check Violation",
98	"Illegal Integer Divide",
99	"Integer Overflow",
100	"Error Exception",
101	"Non-Maskable Exception",
102};
103
104const int trap_types = sizeof trap_type / sizeof trap_type[0];
105
106#ifdef M88100
107const char *pbus_exception_type[] = {
108	"Success (No Fault)",
109	"unknown 1",
110	"unknown 2",
111	"Bus Error",
112	"Segment Fault",
113	"Page Fault",
114	"Supervisor Violation",
115	"Write Violation",
116};
117#endif
118
119void
120printtrap(int type, struct trapframe *frame)
121{
122#ifdef M88100
123	if (CPU_IS88100) {
124		if (type == 2) {
125			/* instruction exception */
126			printf("\nInstr access fault (%s) v = %lx, frame %p\n",
127			    pbus_exception_type[
128			      CMMU_PFSR_FAULT(frame->tf_ipfsr)],
129			    frame->tf_sxip & XIP_ADDR, frame);
130		} else if (type == 3) {
131			/* data access exception */
132			printf("\nData access fault (%s) v = %lx, frame %p\n",
133			    pbus_exception_type[
134			      CMMU_PFSR_FAULT(frame->tf_dpfsr)],
135			    frame->tf_sxip & XIP_ADDR, frame);
136		} else
137			printf("\nTrap type %d, v = %lx, frame %p\n",
138			    type, frame->tf_sxip & XIP_ADDR, frame);
139	}
140#endif
141#ifdef M88110
142	if (CPU_IS88110) {
143		printf("\nTrap type %d, v = %x, frame %p\n",
144		    type, frame->tf_exip, frame);
145	}
146#endif
147#ifdef DDB
148	regdump(frame);
149#endif
150}
151
152__dead void
153panictrap(int type, struct trapframe *frame)
154{
155	static int panicing = 0;
156
157	if (panicing++ == 0)
158		printtrap(type, frame);
159	if ((u_int)type < trap_types)
160		panic(trap_type[type]);
161	else
162		panic("trap %d", type);
163	/*NOTREACHED*/
164}
165
166/*
167 * Handle external interrupts.
168 */
169void
170interrupt(struct trapframe *frame)
171{
172	struct cpu_info *ci = curcpu();
173
174	ci->ci_intrdepth++;
175	md_interrupt_func(frame);
176	ci->ci_intrdepth--;
177}
178
179#ifdef M88110
180/*
181 * Handle non-maskable interrupts.
182 */
183int
184nmi(struct trapframe *frame)
185{
186	return md_nmi_func(frame);
187}
188
189/*
190 * Reenable non-maskable interrupts.
191 */
192void
193nmi_wrapup(struct trapframe *frame)
194{
195	md_nmi_wrapup_func(frame);
196}
197#endif
198
199/*
200 * Handle asynchronous software traps.
201 */
202void
203ast(struct trapframe *frame)
204{
205	struct cpu_info *ci = curcpu();
206	struct proc *p = ci->ci_curproc;
207
208	p->p_md.md_astpending = 0;
209
210	mi_ast(p, ci->ci_want_resched);
211	userret(p);
212}
213
214#ifdef M88100
215void
216m88100_trap(u_int type, struct trapframe *frame)
217{
218	struct proc *p;
219	struct vm_map *map;
220	vaddr_t va, pcb_onfault;
221	vm_prot_t ftype;
222	int fault_type, pbus_type;
223	u_long fault_code;
224	vaddr_t fault_addr;
225	struct vmspace *vm;
226	union sigval sv;
227	int result;
228#ifdef DDB
229	int s;
230	u_int psr;
231#endif
232	int sig = 0;
233
234	uvmexp.traps++;
235	if ((p = curproc) == NULL)
236		p = &proc0;
237
238	if (USERMODE(frame->tf_epsr)) {
239		type += T_USER;
240		p->p_md.md_tf = frame;	/* for ptrace/signals */
241		refreshcreds(p);
242	}
243	fault_type = SI_NOINFO;
244	fault_code = 0;
245	fault_addr = frame->tf_sxip & XIP_ADDR;
246
247	switch (type) {
248	default:
249	case T_ILLFLT:
250lose:
251		panictrap(frame->tf_vector, frame);
252		break;
253		/*NOTREACHED*/
254
255#if defined(DDB)
256	case T_KDB_BREAK:
257		s = splhigh();
258		set_psr((psr = get_psr()) & ~PSR_IND);
259		ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame);
260		set_psr(psr);
261		splx(s);
262		return;
263	case T_KDB_ENTRY:
264		s = splhigh();
265		set_psr((psr = get_psr()) & ~PSR_IND);
266		ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame);
267		set_psr(psr);
268		splx(s);
269		return;
270#endif /* DDB */
271	case T_MISALGNFLT:
272		printf("kernel misaligned access exception @0x%08lx\n",
273		    frame->tf_sxip);
274		goto lose;
275	case T_INSTFLT:
276		/* kernel mode instruction access fault.
277		 * Should never, never happen for a non-paged kernel.
278		 */
279#ifdef TRAPDEBUG
280		pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr);
281		printf("Kernel Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
282		    pbus_type, pbus_exception_type[pbus_type],
283		    fault_addr, frame, frame->tf_cpu);
284#endif
285		goto lose;
286	case T_DATAFLT:
287		/* kernel mode data fault */
288
289		/* data fault on the user address? */
290		if ((frame->tf_dmt0 & DMT_DAS) == 0) {
291			KERNEL_LOCK();
292			goto user_fault;
293		}
294
295		fault_addr = frame->tf_dma0;
296		if (frame->tf_dmt0 & (DMT_WRITE|DMT_LOCKBAR)) {
297			ftype = VM_PROT_READ|VM_PROT_WRITE;
298			fault_code = VM_PROT_WRITE;
299		} else {
300			ftype = VM_PROT_READ;
301			fault_code = VM_PROT_READ;
302		}
303
304		va = trunc_page((vaddr_t)fault_addr);
305
306		KERNEL_LOCK();
307		vm = p->p_vmspace;
308		map = kernel_map;
309
310		pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr);
311#ifdef TRAPDEBUG
312		printf("Kernel Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
313		    pbus_type, pbus_exception_type[pbus_type],
314		    fault_addr, frame, frame->tf_cpu);
315#endif
316
317		pcb_onfault = p->p_addr->u_pcb.pcb_onfault;
318		switch (pbus_type) {
319		case CMMU_PFSR_SUCCESS:
320			/*
321			 * The fault was resolved. Call data_access_emulation
322			 * to drain the data unit pipe line and reset dmt0
323			 * so that trap won't get called again.
324			 */
325			p->p_addr->u_pcb.pcb_onfault = 0;
326			data_access_emulation((u_int *)frame);
327			p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
328			frame->tf_dmt0 = 0;
329			frame->tf_dpfsr = 0;
330			KERNEL_UNLOCK();
331			return;
332		case CMMU_PFSR_SFAULT:
333		case CMMU_PFSR_PFAULT:
334			p->p_addr->u_pcb.pcb_onfault = 0;
335			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
336			p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
337			if (result == 0) {
338				/*
339				 * We could resolve the fault. Call
340				 * data_access_emulation to drain the data
341				 * unit pipe line and reset dmt0 so that trap
342				 * won't get called again.
343				 */
344				p->p_addr->u_pcb.pcb_onfault = 0;
345				data_access_emulation((u_int *)frame);
346				p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
347				frame->tf_dmt0 = 0;
348				frame->tf_dpfsr = 0;
349				KERNEL_UNLOCK();
350				return;
351			} else if (pcb_onfault != 0) {
352				/*
353				 * This could be a fault caused in copyout*()
354				 * while accessing kernel space.
355				 */
356				frame->tf_snip = pcb_onfault | NIP_V;
357				frame->tf_sfip = (pcb_onfault + 4) | FIP_V;
358				frame->tf_sxip = 0;
359				/*
360				 * Continue as if the fault had been resolved,
361				 * but do not try to complete the faulting
362				 * access.
363				 */
364				frame->tf_dmt0 = 0;
365				frame->tf_dpfsr = 0;
366				KERNEL_UNLOCK();
367				return;
368			}
369			break;
370		}
371#ifdef TRAPDEBUG
372		printf("PBUS Fault %d (%s) va = 0x%x\n", pbus_type,
373		    pbus_exception_type[pbus_type], va);
374#endif
375		KERNEL_UNLOCK();
376		goto lose;
377		/* NOTREACHED */
378	case T_INSTFLT+T_USER:
379		/* User mode instruction access fault */
380		/* FALLTHROUGH */
381	case T_DATAFLT+T_USER:
382		KERNEL_LOCK();
383user_fault:
384		if (type == T_INSTFLT + T_USER) {
385			pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr);
386#ifdef TRAPDEBUG
387			printf("User Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
388			    pbus_type, pbus_exception_type[pbus_type],
389			    fault_addr, frame, frame->tf_cpu);
390#endif
391		} else {
392			fault_addr = frame->tf_dma0;
393			pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr);
394#ifdef TRAPDEBUG
395			printf("User Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
396			    pbus_type, pbus_exception_type[pbus_type],
397			    fault_addr, frame, frame->tf_cpu);
398#endif
399		}
400
401		if (frame->tf_dmt0 & (DMT_WRITE | DMT_LOCKBAR)) {
402			ftype = VM_PROT_READ | VM_PROT_WRITE;
403			fault_code = VM_PROT_WRITE;
404		} else {
405			ftype = VM_PROT_READ;
406			fault_code = VM_PROT_READ;
407		}
408
409		va = trunc_page((vaddr_t)fault_addr);
410
411		vm = p->p_vmspace;
412		map = &vm->vm_map;
413		if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
414			p->p_addr->u_pcb.pcb_onfault = 0;
415
416		/* Call uvm_fault() to resolve non-bus error faults */
417		switch (pbus_type) {
418		case CMMU_PFSR_SUCCESS:
419			result = 0;
420			break;
421		case CMMU_PFSR_BERROR:
422			result = EACCES;
423			break;
424		default:
425			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
426			if (result == EACCES)
427				result = EFAULT;
428			break;
429		}
430
431		p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
432
433		if (result == 0 && (caddr_t)va >= vm->vm_maxsaddr)
434			uvm_grow(p, va);
435
436		if (result == 0) {
437			if (type == T_INSTFLT + T_USER) {
438				/*
439				 * back up SXIP, SNIP,
440				 * clearing the Error bit
441				 */
442				frame->tf_sfip = frame->tf_snip & ~FIP_E;
443				frame->tf_snip = frame->tf_sxip & ~NIP_E;
444				frame->tf_ipfsr = 0;
445			} else {
446				/*
447			 	 * We could resolve the fault. Call
448			 	 * data_access_emulation to drain the data unit
449			 	 * pipe line and reset dmt0 so that trap won't
450			 	 * get called again.
451			 	 */
452				p->p_addr->u_pcb.pcb_onfault = 0;
453				data_access_emulation((u_int *)frame);
454				p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
455				frame->tf_dmt0 = 0;
456				frame->tf_dpfsr = 0;
457			}
458		} else {
459			/*
460			 * This could be a fault caused in copyin*()
461			 * while accessing user space.
462			 */
463			if (pcb_onfault != 0) {
464				frame->tf_snip = pcb_onfault | NIP_V;
465				frame->tf_sfip = (pcb_onfault + 4) | FIP_V;
466				frame->tf_sxip = 0;
467				/*
468				 * Continue as if the fault had been resolved,
469				 * but do not try to complete the faulting
470				 * access.
471				 */
472				frame->tf_dmt0 = 0;
473				frame->tf_dpfsr = 0;
474			} else {
475				sig = result == EACCES ? SIGBUS : SIGSEGV;
476				fault_type = result == EACCES ?
477				    BUS_ADRERR : SEGV_MAPERR;
478			}
479		}
480		KERNEL_UNLOCK();
481		break;
482	case T_MISALGNFLT+T_USER:
483		/* Fix any misaligned ld.d or st.d instructions */
484		sig = double_reg_fixup(frame, T_MISALGNFLT);
485		fault_type = BUS_ADRALN;
486		break;
487	case T_PRIVINFLT+T_USER:
488	case T_ILLFLT+T_USER:
489#ifndef DDB
490	case T_KDB_BREAK:
491	case T_KDB_ENTRY:
492#endif
493	case T_KDB_BREAK+T_USER:
494	case T_KDB_ENTRY+T_USER:
495	case T_KDB_TRACE:
496	case T_KDB_TRACE+T_USER:
497		sig = SIGILL;
498		break;
499	case T_BNDFLT+T_USER:
500		sig = SIGFPE;
501		break;
502	case T_ZERODIV+T_USER:
503		sig = SIGFPE;
504		fault_type = FPE_INTDIV;
505		break;
506	case T_OVFFLT+T_USER:
507		sig = SIGFPE;
508		fault_type = FPE_INTOVF;
509		break;
510	case T_FPEPFLT+T_USER:
511		m88100_fpu_precise_exception(frame);
512		goto maysigfpe;
513	case T_FPEIFLT+T_USER:
514		m88100_fpu_imprecise_exception(frame);
515maysigfpe:
516		/* Check for a SIGFPE condition */
517		if (frame->tf_fpsr & frame->tf_fpcr) {
518			sig = SIGFPE;
519			if (frame->tf_fpecr & FPECR_FIOV)
520				fault_type = FPE_FLTSUB;
521			else if (frame->tf_fpecr & FPECR_FROP)
522				fault_type = FPE_FLTINV;
523			else if (frame->tf_fpecr & FPECR_FDVZ)
524				fault_type = FPE_INTDIV;
525			else if (frame->tf_fpecr & FPECR_FUNF) {
526				if (frame->tf_fpsr & FPSR_EFUNF)
527					fault_type = FPE_FLTUND;
528				else if (frame->tf_fpsr & FPSR_EFINX)
529					fault_type = FPE_FLTRES;
530			} else if (frame->tf_fpecr & FPECR_FOVF) {
531				if (frame->tf_fpsr & FPSR_EFOVF)
532					fault_type = FPE_FLTOVF;
533				else if (frame->tf_fpsr & FPSR_EFINX)
534					fault_type = FPE_FLTRES;
535			} else if (frame->tf_fpecr & FPECR_FINX)
536				fault_type = FPE_FLTRES;
537		}
538		break;
539	case T_SIGSYS+T_USER:
540		sig = SIGSYS;
541		break;
542	case T_STEPBPT+T_USER:
543#ifdef PTRACE
544		/*
545		 * This trap is used by the kernel to support single-step
546		 * debugging (although any user could generate this trap
547		 * which should probably be handled differently). When a
548		 * process is continued by a debugger with the PT_STEP
549		 * function of ptrace (single step), the kernel inserts
550		 * one or two breakpoints in the user process so that only
551		 * one instruction (or two in the case of a delayed branch)
552		 * is executed.  When this breakpoint is hit, we get the
553		 * T_STEPBPT trap.
554		 */
555		{
556			u_int instr;
557			vaddr_t pc = PC_REGS(&frame->tf_regs);
558
559			/* read break instruction */
560			copyin((caddr_t)pc, &instr, sizeof(u_int));
561
562			/* check and see if we got here by accident */
563			if ((p->p_md.md_bp0va != pc &&
564			     p->p_md.md_bp1va != pc) ||
565			    instr != SSBREAKPOINT) {
566				sig = SIGTRAP;
567				fault_type = TRAP_TRACE;
568				break;
569			}
570
571			/* restore original instruction and clear breakpoint */
572			if (p->p_md.md_bp0va == pc) {
573				ss_put_value(p, pc, p->p_md.md_bp0save);
574				p->p_md.md_bp0va = 0;
575			}
576			if (p->p_md.md_bp1va == pc) {
577				ss_put_value(p, pc, p->p_md.md_bp1save);
578				p->p_md.md_bp1va = 0;
579			}
580
581#if 1
582			frame->tf_sfip = frame->tf_snip;
583			frame->tf_snip = pc | NIP_V;
584#endif
585			sig = SIGTRAP;
586			fault_type = TRAP_BRKPT;
587		}
588#else
589		sig = SIGTRAP;
590		fault_type = TRAP_TRACE;
591#endif
592		break;
593
594	case T_USERBPT+T_USER:
595		/*
596		 * This trap is meant to be used by debuggers to implement
597		 * breakpoint debugging.  When we get this trap, we just
598		 * return a signal which gets caught by the debugger.
599		 */
600		frame->tf_sfip = frame->tf_snip;
601		frame->tf_snip = frame->tf_sxip;
602		sig = SIGTRAP;
603		fault_type = TRAP_BRKPT;
604		break;
605
606	}
607
608	/*
609	 * If trap from supervisor mode, just return
610	 */
611	if (type < T_USER)
612		return;
613
614	if (sig) {
615		sv.sival_ptr = (void *)fault_addr;
616		KERNEL_LOCK();
617		trapsignal(p, sig, fault_code, fault_type, sv);
618		KERNEL_UNLOCK();
619		/*
620		 * don't want multiple faults - we are going to
621		 * deliver signal.
622		 */
623		frame->tf_dmt0 = 0;
624		frame->tf_ipfsr = frame->tf_dpfsr = 0;
625	}
626
627	userret(p);
628}
629#endif /* M88100 */
630
631#ifdef M88110
632void
633m88110_trap(u_int type, struct trapframe *frame)
634{
635	struct proc *p;
636	struct vm_map *map;
637	vaddr_t va, pcb_onfault;
638	vm_prot_t ftype;
639	int fault_type;
640	u_long fault_code;
641	vaddr_t fault_addr;
642	struct vmspace *vm;
643	union sigval sv;
644	int result;
645#ifdef DDB
646        int s;
647	u_int psr;
648#endif
649	int sig = 0;
650
651	uvmexp.traps++;
652	if ((p = curproc) == NULL)
653		p = &proc0;
654
655	fault_type = SI_NOINFO;
656	fault_code = 0;
657	fault_addr = frame->tf_exip & XIP_ADDR;
658
659	/*
660	 * 88110 errata #16 (4.2) or #3 (5.1.1):
661	 * ``bsr, br, bcnd, jsr and jmp instructions with the .n extension
662	 *   can cause the enip value to be incremented by 4 incorrectly
663	 *   if the instruction in the delay slot is the first word of a
664	 *   page which misses in the mmu and results in a hardware
665	 *   tablewalk which encounters an exception or an invalid
666	 *   descriptor.  The exip value in this case will point to the
667	 *   first word of the page, and the D bit will be set.
668	 *
669	 *   Note: if the instruction is a jsr.n r1, r1 will be overwritten
670	 *   with erroneous data.  Therefore, no recovery is possible. Do
671	 *   not allow this instruction to occupy the last word of a page.
672	 *
673	 *   Suggested fix: recover in general by backing up the exip by 4
674	 *   and clearing the delay bit before an rte when the lower 3 hex
675	 *   digits of the exip are 001.''
676	 */
677	if ((frame->tf_exip & PAGE_MASK) == 0x00000001 && type == T_INSTFLT) {
678		u_int instr;
679
680		/*
681		 * Note that we have initialized fault_addr above, so that
682		 * signals provide the correct address if necessary.
683		 */
684		frame->tf_exip = (frame->tf_exip & ~1) - 4;
685
686		/*
687		 * Check the instruction at the (backed up) exip.
688		 * If it is a jsr.n, abort.
689		 */
690		if (!USERMODE(frame->tf_epsr)) {
691			instr = *(u_int *)fault_addr;
692			if (instr == 0xf400cc01)
693				panic("mc88110 errata #16, exip %p enip %p",
694				    (frame->tf_exip + 4) | 1, frame->tf_enip);
695		} else {
696			/* copyin here should not fail */
697			if (copyin((const void *)frame->tf_exip, &instr,
698			    sizeof instr) == 0 &&
699			    instr == 0xf400cc01) {
700				uprintf("mc88110 errata #16, exip %p enip %p",
701				    (frame->tf_exip + 4) | 1, frame->tf_enip);
702				sig = SIGILL;
703			}
704		}
705	}
706
707	if (USERMODE(frame->tf_epsr)) {
708		type += T_USER;
709		p->p_md.md_tf = frame;	/* for ptrace/signals */
710		refreshcreds(p);
711	}
712
713	if (sig != 0)
714		goto deliver;
715
716	switch (type) {
717	default:
718lose:
719		panictrap(frame->tf_vector, frame);
720		break;
721		/*NOTREACHED*/
722
723#ifdef DEBUG
724	case T_110_DRM+T_USER:
725	case T_110_DRM:
726		printf("DMMU read miss: Hardware Table Searches should be enabled!\n");
727		goto lose;
728	case T_110_DWM+T_USER:
729	case T_110_DWM:
730		printf("DMMU write miss: Hardware Table Searches should be enabled!\n");
731		goto lose;
732	case T_110_IAM+T_USER:
733	case T_110_IAM:
734		printf("IMMU miss: Hardware Table Searches should be enabled!\n");
735		goto lose;
736#endif
737
738#ifdef DDB
739	case T_KDB_TRACE:
740		s = splhigh();
741		set_psr((psr = get_psr()) & ~PSR_IND);
742		ddb_break_trap(T_KDB_TRACE, (db_regs_t*)frame);
743		set_psr(psr);
744		splx(s);
745		return;
746	case T_KDB_BREAK:
747		s = splhigh();
748		set_psr((psr = get_psr()) & ~PSR_IND);
749		ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame);
750		set_psr(psr);
751		splx(s);
752		return;
753	case T_KDB_ENTRY:
754		s = splhigh();
755		set_psr((psr = get_psr()) & ~PSR_IND);
756		ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame);
757		set_psr(psr);
758		/* skip trap instruction */
759		m88110_skip_insn(frame);
760		splx(s);
761		return;
762#endif /* DDB */
763	case T_ILLFLT:
764		/*
765		 * The 88110 seems to trigger an instruction fault in
766		 * supervisor mode when running the following sequence:
767		 *
768		 *	bcnd.n cond, reg, 1f
769		 *	arithmetic insn
770		 *	...
771		 *  	the same exact arithmetic insn
772		 *  1:	another arithmetic insn stalled by the previous one
773		 *	...
774		 *
775		 * The exception is reported with exip pointing to the
776		 * branch address. I don't know, at this point, if there
777		 * is any better workaround than the aggressive one
778		 * implemented below; I don't see how this could relate to
779		 * any of the 88110 errata (although it might be related to
780		 * branch prediction).
781		 *
782		 * For the record, the exact sequence triggering the
783		 * spurious exception is:
784		 *
785		 *	bcnd.n	eq0, r2,  1f
786		 *	 or	r25, r0,  r22
787		 *	bsr	somewhere
788		 *	or	r25, r0,  r22
789		 *  1:	cmp	r13, r25, r20
790		 *
791		 * within the same cache line.
792		 *
793		 * Simply ignoring the exception and returning does not
794		 * cause the exception to disappear. Clearing the
795		 * instruction cache works, but on 88110+88410 systems,
796		 * the 88410 needs to be invalidated as well. (note that
797		 * the size passed to the flush routines does not matter
798		 * since there is no way to flush a subset of the 88110
799		 * I$ anyway)
800		 */
801	    {
802		extern void *kernel_text, *etext;
803
804		if (fault_addr >= (vaddr_t)&kernel_text &&
805		    fault_addr < (vaddr_t)&etext) {
806			cmmu_icache_inv(curcpu()->ci_cpuid,
807			    trunc_page(fault_addr), PAGE_SIZE);
808			cmmu_cache_wbinv(curcpu()->ci_cpuid,
809			    trunc_page(fault_addr), PAGE_SIZE);
810			return;
811		}
812	    }
813		goto lose;
814	case T_MISALGNFLT:
815		printf("kernel misaligned access exception @%p\n",
816		    frame->tf_exip);
817		goto lose;
818	case T_INSTFLT:
819		/* kernel mode instruction access fault.
820		 * Should never, never happen for a non-paged kernel.
821		 */
822#ifdef TRAPDEBUG
823		printf("Kernel Instruction fault exip %x isr %x ilar %x\n",
824		    frame->tf_exip, frame->tf_isr, frame->tf_ilar);
825#endif
826		goto lose;
827
828	case T_DATAFLT:
829		/* kernel mode data fault */
830
831		/* data fault on the user address? */
832		if ((frame->tf_dsr & CMMU_DSR_SU) == 0) {
833			KERNEL_LOCK();
834			goto m88110_user_fault;
835		}
836
837#ifdef TRAPDEBUG
838		printf("Kernel Data access fault exip %x dsr %x dlar %x\n",
839		    frame->tf_exip, frame->tf_dsr, frame->tf_dlar);
840#endif
841
842		fault_addr = frame->tf_dlar;
843		if (frame->tf_dsr & CMMU_DSR_RW) {
844			ftype = VM_PROT_READ;
845			fault_code = VM_PROT_READ;
846		} else {
847			ftype = VM_PROT_READ|VM_PROT_WRITE;
848			fault_code = VM_PROT_WRITE;
849		}
850
851		va = trunc_page((vaddr_t)fault_addr);
852
853		KERNEL_LOCK();
854		vm = p->p_vmspace;
855		map = kernel_map;
856
857		if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) {
858			/*
859			 * On a segment or a page fault, call uvm_fault() to
860			 * resolve the fault.
861			 */
862			if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
863				p->p_addr->u_pcb.pcb_onfault = 0;
864			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
865			p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
866			/*
867			 * This could be a fault caused in copyout*()
868			 * while accessing kernel space.
869			 */
870			if (result != 0 && pcb_onfault != 0) {
871				frame->tf_exip = pcb_onfault;
872				/*
873				 * Continue as if the fault had been resolved.
874				 */
875				result = 0;
876			}
877			if (result == 0) {
878				KERNEL_UNLOCK();
879				return;
880			}
881		}
882		KERNEL_UNLOCK();
883		goto lose;
884	case T_INSTFLT+T_USER:
885		/* User mode instruction access fault */
886		/* FALLTHROUGH */
887	case T_DATAFLT+T_USER:
888		KERNEL_LOCK();
889m88110_user_fault:
890		if (type == T_INSTFLT+T_USER) {
891			ftype = VM_PROT_READ;
892			fault_code = VM_PROT_READ;
893#ifdef TRAPDEBUG
894			printf("User Instruction fault exip %x isr %x ilar %x\n",
895			    frame->tf_exip, frame->tf_isr, frame->tf_ilar);
896#endif
897		} else {
898			fault_addr = frame->tf_dlar;
899			if (frame->tf_dsr & CMMU_DSR_RW) {
900				ftype = VM_PROT_READ;
901				fault_code = VM_PROT_READ;
902			} else {
903				ftype = VM_PROT_READ|VM_PROT_WRITE;
904				fault_code = VM_PROT_WRITE;
905			}
906#ifdef TRAPDEBUG
907			printf("User Data access fault exip %x dsr %x dlar %x\n",
908			    frame->tf_exip, frame->tf_dsr, frame->tf_dlar);
909#endif
910		}
911
912		va = trunc_page((vaddr_t)fault_addr);
913
914		vm = p->p_vmspace;
915		map = &vm->vm_map;
916		if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
917			p->p_addr->u_pcb.pcb_onfault = 0;
918
919		/*
920		 * Call uvm_fault() to resolve non-bus error faults
921		 * whenever possible.
922		 */
923		if (type == T_INSTFLT+T_USER) {
924			/* instruction faults */
925			if (frame->tf_isr &
926			    (CMMU_ISR_BE | CMMU_ISR_SP | CMMU_ISR_TBE)) {
927				/* bus error, supervisor protection */
928				result = EACCES;
929			} else
930			if (frame->tf_isr & (CMMU_ISR_SI | CMMU_ISR_PI)) {
931				/* segment or page fault */
932				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
933				if (result == EACCES)
934					result = EFAULT;
935			} else {
936#ifdef TRAPDEBUG
937				printf("Unexpected Instruction fault isr %x\n",
938				    frame->tf_isr);
939#endif
940				KERNEL_UNLOCK();
941				goto lose;
942			}
943		} else {
944			/* data faults */
945			if (frame->tf_dsr & CMMU_DSR_BE) {
946				/* bus error */
947				result = EACCES;
948			} else
949			if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) {
950				/* segment or page fault */
951				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
952				if (result == EACCES)
953					result = EFAULT;
954			} else
955			if (frame->tf_dsr & (CMMU_DSR_CP | CMMU_DSR_WA)) {
956				/* copyback or write allocate error */
957				result = EACCES;
958			} else
959			if (frame->tf_dsr & CMMU_DSR_WE) {
960				/* write fault  */
961				/* This could be a write protection fault or an
962				 * exception to set the used and modified bits
963				 * in the pte. Basically, if we got a write
964				 * error, then we already have a pte entry that
965				 * faulted in from a previous seg fault or page
966				 * fault.
967				 * Get the pte and check the status of the
968				 * modified and valid bits to determine if this
969				 * indeed a real write fault.  XXX smurph
970				 */
971				if (pmap_set_modify(map->pmap, va)) {
972#ifdef TRAPDEBUG
973					printf("Corrected userland write fault, pmap %p va %p\n",
974					    map->pmap, va);
975#endif
976					result = 0;
977				} else {
978					/* must be a real wp fault */
979#ifdef TRAPDEBUG
980					printf("Uncorrected userland write fault, pmap %p va %p\n",
981					    map->pmap, va);
982#endif
983					result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
984					if (result == EACCES)
985						result = EFAULT;
986				}
987			} else {
988#ifdef TRAPDEBUG
989				printf("Unexpected Data access fault dsr %x\n",
990				    frame->tf_dsr);
991#endif
992				KERNEL_UNLOCK();
993				goto lose;
994			}
995		}
996		p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
997
998		if (result == 0 && (caddr_t)va >= vm->vm_maxsaddr)
999			uvm_grow(p, va);
1000		KERNEL_UNLOCK();
1001
1002		/*
1003		 * This could be a fault caused in copyin*()
1004		 * while accessing user space.
1005		 */
1006		if (result != 0 && pcb_onfault != 0) {
1007			frame->tf_exip = pcb_onfault;
1008			/*
1009			 * Continue as if the fault had been resolved.
1010			 */
1011			result = 0;
1012		}
1013
1014		if (result != 0) {
1015			sig = result == EACCES ? SIGBUS : SIGSEGV;
1016			fault_type = result == EACCES ?
1017			    BUS_ADRERR : SEGV_MAPERR;
1018		}
1019		break;
1020	case T_MISALGNFLT+T_USER:
1021		/* Fix any misaligned ld.d or st.d instructions */
1022		sig = double_reg_fixup(frame, T_MISALGNFLT);
1023		fault_type = BUS_ADRALN;
1024		if (sig == 0) {
1025			/* skip recovered instruction */
1026			m88110_skip_insn(frame);
1027			goto userexit;
1028		}
1029		break;
1030	case T_ILLFLT+T_USER:
1031		/* Fix any ld.d or st.d instruction with an odd register */
1032		sig = double_reg_fixup(frame, T_ILLFLT);
1033		fault_type = ILL_PRVREG;
1034		if (sig == 0) {
1035			/* skip recovered instruction */
1036			m88110_skip_insn(frame);
1037			goto userexit;
1038		}
1039		break;
1040	case T_PRIVINFLT+T_USER:
1041		fault_type = ILL_PRVREG;
1042		/* FALLTHROUGH */
1043#ifndef DDB
1044	case T_KDB_BREAK:
1045	case T_KDB_ENTRY:
1046	case T_KDB_TRACE:
1047#endif
1048	case T_KDB_BREAK+T_USER:
1049	case T_KDB_ENTRY+T_USER:
1050	case T_KDB_TRACE+T_USER:
1051		sig = SIGILL;
1052		break;
1053	case T_BNDFLT+T_USER:
1054		sig = SIGFPE;
1055		/* skip trap instruction */
1056		m88110_skip_insn(frame);
1057		break;
1058	case T_ZERODIV+T_USER:
1059		sig = SIGFPE;
1060		fault_type = FPE_INTDIV;
1061		/* skip trap instruction */
1062		m88110_skip_insn(frame);
1063		break;
1064	case T_OVFFLT+T_USER:
1065		sig = SIGFPE;
1066		fault_type = FPE_INTOVF;
1067		/* skip trap instruction */
1068		m88110_skip_insn(frame);
1069		break;
1070	case T_FPEPFLT+T_USER:
1071		m88110_fpu_exception(frame);
1072		goto userexit;
1073	case T_SIGSYS+T_USER:
1074		sig = SIGSYS;
1075		break;
1076	case T_STEPBPT+T_USER:
1077#ifdef PTRACE
1078		/*
1079		 * This trap is used by the kernel to support single-step
1080		 * debugging (although any user could generate this trap
1081		 * which should probably be handled differently). When a
1082		 * process is continued by a debugger with the PT_STEP
1083		 * function of ptrace (single step), the kernel inserts
1084		 * one or two breakpoints in the user process so that only
1085		 * one instruction (or two in the case of a delayed branch)
1086		 * is executed.  When this breakpoint is hit, we get the
1087		 * T_STEPBPT trap.
1088		 */
1089		{
1090			u_int instr;
1091			vaddr_t pc = PC_REGS(&frame->tf_regs);
1092
1093			/* read break instruction */
1094			copyin((caddr_t)pc, &instr, sizeof(u_int));
1095
1096			/* check and see if we got here by accident */
1097			if ((p->p_md.md_bp0va != pc &&
1098			     p->p_md.md_bp1va != pc) ||
1099			    instr != SSBREAKPOINT) {
1100				sig = SIGTRAP;
1101				fault_type = TRAP_TRACE;
1102				break;
1103			}
1104
1105			/* restore original instruction and clear breakpoint */
1106			if (p->p_md.md_bp0va == pc) {
1107				ss_put_value(p, pc, p->p_md.md_bp0save);
1108				p->p_md.md_bp0va = 0;
1109			}
1110			if (p->p_md.md_bp1va == pc) {
1111				ss_put_value(p, pc, p->p_md.md_bp1save);
1112				p->p_md.md_bp1va = 0;
1113			}
1114
1115			sig = SIGTRAP;
1116			fault_type = TRAP_BRKPT;
1117		}
1118#else
1119		sig = SIGTRAP;
1120		fault_type = TRAP_TRACE;
1121#endif
1122		break;
1123	case T_USERBPT+T_USER:
1124		/*
1125		 * This trap is meant to be used by debuggers to implement
1126		 * breakpoint debugging.  When we get this trap, we just
1127		 * return a signal which gets caught by the debugger.
1128		 */
1129		sig = SIGTRAP;
1130		fault_type = TRAP_BRKPT;
1131		break;
1132	}
1133
1134	/*
1135	 * If trap from supervisor mode, just return
1136	 */
1137	if (type < T_USER)
1138		return;
1139
1140	if (sig) {
1141deliver:
1142		sv.sival_ptr = (void *)fault_addr;
1143		KERNEL_LOCK();
1144		trapsignal(p, sig, fault_code, fault_type, sv);
1145		KERNEL_UNLOCK();
1146	}
1147
1148userexit:
1149	userret(p);
1150}
1151#endif /* M88110 */
1152
1153__dead void
1154error_fatal(struct trapframe *frame)
1155{
1156	if (frame->tf_vector == 0)
1157		printf("\nCPU %d Reset Exception\n", cpu_number());
1158	else
1159		printf("\nCPU %d Error Exception\n", cpu_number());
1160
1161#ifdef DDB
1162	regdump((struct trapframe*)frame);
1163#endif
1164	panic("unrecoverable exception %ld", frame->tf_vector);
1165}
1166
1167#ifdef M88100
1168void
1169m88100_syscall(register_t code, struct trapframe *tf)
1170{
1171	int i, nsys, nap;
1172	struct sysent *callp;
1173	struct proc *p = curproc;
1174	int error;
1175	register_t args[8], rval[2], *ap;
1176
1177	uvmexp.syscalls++;
1178
1179	callp = p->p_p->ps_emul->e_sysent;
1180	nsys  = p->p_p->ps_emul->e_nsysent;
1181
1182	p->p_md.md_tf = tf;
1183
1184	/*
1185	 * For 88k, all the arguments are passed in the registers (r2-r9),
1186	 * and further arguments (if any) on stack.
1187	 * For syscall (and __syscall), r2 (and r3) has the actual code.
1188	 * __syscall  takes a quad syscall number, so that other
1189	 * arguments are at their natural alignments.
1190	 */
1191	ap = &tf->tf_r[2];
1192	nap = 8; /* r2-r9 */
1193
1194	switch (code) {
1195	case SYS_syscall:
1196		code = *ap++;
1197		nap--;
1198		break;
1199	case SYS___syscall:
1200		if (callp != sysent)
1201			break;
1202		code = ap[_QUAD_LOWWORD];
1203		ap += 2;
1204		nap -= 2;
1205		break;
1206	}
1207
1208	if (code < 0 || code >= nsys)
1209		callp += p->p_p->ps_emul->e_nosys;
1210	else
1211		callp += code;
1212
1213	i = callp->sy_argsize / sizeof(register_t);
1214	if (i > sizeof(args) / sizeof(register_t))
1215		panic("syscall nargs");
1216	if (i > nap) {
1217		bcopy((caddr_t)ap, (caddr_t)args, nap * sizeof(register_t));
1218		if ((error = copyin((caddr_t)tf->tf_r[31], args + nap,
1219		    (i - nap) * sizeof(register_t))))
1220			goto bad;
1221	} else
1222		bcopy((caddr_t)ap, (caddr_t)args, i * sizeof(register_t));
1223
1224	rval[0] = 0;
1225	rval[1] = tf->tf_r[3];
1226
1227	error = mi_syscall(p, code, callp, args, rval);
1228
1229	/*
1230	 * system call will look like:
1231	 *	 or r13, r0, <code>
1232	 *       tb0 0, r0, <128> <- sxip
1233	 *	 br err 	  <- snip
1234	 *       jmp r1 	  <- sfip
1235	 *  err: or.u r3, r0, hi16(errno)
1236	 *	 st r2, r3, lo16(errno)
1237	 *	 subu r2, r0, 1
1238	 *	 jmp r1
1239	 *
1240	 * So, when we take syscall trap, sxip/snip/sfip will be as
1241	 * shown above.
1242	 * Given this,
1243	 * 1. If the system call returned 0, need to skip nip.
1244	 *	nip = fip, fip += 4
1245	 *    (doesn't matter what fip + 4 will be but we will never
1246	 *    execute this since jmp r1 at nip will change the execution flow.)
1247	 * 2. If the system call returned an errno > 0, plug the value
1248	 *    in r2, and leave nip and fip unchanged. This will have us
1249	 *    executing "br err" on return to user space.
1250	 * 3. If the system call code returned ERESTART,
1251	 *    we need to rexecute the trap instruction. Back up the pipe
1252	 *    line.
1253	 *     fip = nip, nip = xip
1254	 * 4. If the system call returned EJUSTRETURN, don't need to adjust
1255	 *    any pointers.
1256	 */
1257
1258	switch (error) {
1259	case 0:
1260		tf->tf_r[2] = rval[0];
1261		tf->tf_r[3] = rval[1];
1262		tf->tf_epsr &= ~PSR_C;
1263		tf->tf_snip = tf->tf_sfip & ~NIP_E;
1264		tf->tf_sfip = tf->tf_snip + 4;
1265		break;
1266	case ERESTART:
1267		tf->tf_epsr &= ~PSR_C;
1268		tf->tf_sfip = tf->tf_snip & ~FIP_E;
1269		tf->tf_snip = tf->tf_sxip & ~NIP_E;
1270		break;
1271	case EJUSTRETURN:
1272		tf->tf_epsr &= ~PSR_C;
1273		break;
1274	default:
1275	bad:
1276		tf->tf_r[2] = error;
1277		tf->tf_epsr |= PSR_C;   /* fail */
1278		tf->tf_snip = tf->tf_snip & ~NIP_E;
1279		tf->tf_sfip = tf->tf_sfip & ~FIP_E;
1280		break;
1281	}
1282
1283	mi_syscall_return(p, code, error, rval);
1284}
1285#endif /* M88100 */
1286
1287#ifdef M88110
1288/* Instruction pointers operate differently on mc88110 */
1289void
1290m88110_syscall(register_t code, struct trapframe *tf)
1291{
1292	int i, nsys, nap;
1293	struct sysent *callp;
1294	struct proc *p = curproc;
1295	int error;
1296	register_t args[8], rval[2], *ap;
1297
1298	uvmexp.syscalls++;
1299
1300	callp = p->p_p->ps_emul->e_sysent;
1301	nsys  = p->p_p->ps_emul->e_nsysent;
1302
1303	p->p_md.md_tf = tf;
1304
1305	/*
1306	 * For 88k, all the arguments are passed in the registers (r2-r9),
1307	 * and further arguments (if any) on stack.
1308	 * For syscall (and __syscall), r2 (and r3) has the actual code.
1309	 * __syscall  takes a quad syscall number, so that other
1310	 * arguments are at their natural alignments.
1311	 */
1312	ap = &tf->tf_r[2];
1313	nap = 8;	/* r2-r9 */
1314
1315	switch (code) {
1316	case SYS_syscall:
1317		code = *ap++;
1318		nap--;
1319		break;
1320	case SYS___syscall:
1321		if (callp != sysent)
1322			break;
1323		code = ap[_QUAD_LOWWORD];
1324		ap += 2;
1325		nap -= 2;
1326		break;
1327	}
1328
1329	if (code < 0 || code >= nsys)
1330		callp += p->p_p->ps_emul->e_nosys;
1331	else
1332		callp += code;
1333
1334	i = callp->sy_argsize / sizeof(register_t);
1335	if (i > sizeof(args) / sizeof(register_t))
1336		panic("syscall nargs");
1337	if (i > nap) {
1338		bcopy((caddr_t)ap, (caddr_t)args, nap * sizeof(register_t));
1339		if ((error = copyin((caddr_t)tf->tf_r[31], args + nap,
1340		    (i - nap) * sizeof(register_t))))
1341			goto bad;
1342	} else
1343		bcopy((caddr_t)ap, (caddr_t)args, i * sizeof(register_t));
1344
1345	rval[0] = 0;
1346	rval[1] = tf->tf_r[3];
1347
1348	error = mi_syscall(p, code, callp, args, rval);
1349
1350	/*
1351	 * system call will look like:
1352	 *	 or r13, r0, <code>
1353	 *       tb0 0, r0, <128> <- exip
1354	 *	 br err 	  <- enip
1355	 *       jmp r1
1356	 *  err: or.u r3, r0, hi16(errno)
1357	 *	 st r2, r3, lo16(errno)
1358	 *	 subu r2, r0, 1
1359	 *	 jmp r1
1360	 *
1361	 * So, when we take syscall trap, exip/enip will be as
1362	 * shown above.
1363	 * Given this,
1364	 * 1. If the system call returned 0, need to jmp r1.
1365	 *    exip += 8
1366	 * 2. If the system call returned an errno > 0, increment
1367	 *    exip += 4 and plug the value in r2. This will have us
1368	 *    executing "br err" on return to user space.
1369	 * 3. If the system call code returned ERESTART,
1370	 *    we need to rexecute the trap instruction. leave exip as is.
1371	 * 4. If the system call returned EJUSTRETURN, just return.
1372	 *    exip += 4
1373	 */
1374
1375	switch (error) {
1376	case 0:
1377		tf->tf_r[2] = rval[0];
1378		tf->tf_r[3] = rval[1];
1379		tf->tf_epsr &= ~PSR_C;
1380		/* skip two instructions */
1381		m88110_skip_insn(tf);
1382		m88110_skip_insn(tf);
1383		break;
1384	case ERESTART:
1385		/*
1386		 * Reexecute the trap.
1387		 * exip is already at the trap instruction, so
1388		 * there is nothing to do.
1389		 */
1390		tf->tf_epsr &= ~PSR_C;
1391		break;
1392	case EJUSTRETURN:
1393		tf->tf_epsr &= ~PSR_C;
1394		/* skip one instruction */
1395		m88110_skip_insn(tf);
1396		break;
1397	default:
1398	bad:
1399		tf->tf_r[2] = error;
1400		tf->tf_epsr |= PSR_C;   /* fail */
1401		/* skip one instruction */
1402		m88110_skip_insn(tf);
1403		break;
1404	}
1405
1406	mi_syscall_return(p, code, error, rval);
1407}
1408#endif	/* M88110 */
1409
1410/*
1411 * Set up return-value registers as fork() libc stub expects,
1412 * and do normal return-to-user-mode stuff.
1413 */
1414void
1415child_return(arg)
1416	void *arg;
1417{
1418	struct proc *p = arg;
1419	struct trapframe *tf;
1420
1421	tf = (struct trapframe *)USER_REGS(p);
1422	tf->tf_r[2] = 0;
1423	tf->tf_r[3] = 0;
1424	tf->tf_epsr &= ~PSR_C;
1425	/* reset r26 (used by the threads library) if __tfork */
1426	if (p->p_flag & P_THREAD)
1427		tf->tf_r[26] = 0;
1428	/* skip br instruction as in syscall() */
1429#ifdef M88100
1430	if (CPU_IS88100) {
1431		tf->tf_snip = (tf->tf_sfip & XIP_ADDR) | XIP_V;
1432		tf->tf_sfip = tf->tf_snip + 4;
1433	}
1434#endif
1435#ifdef M88110
1436	if (CPU_IS88110) {
1437		/* skip two instructions */
1438		m88110_skip_insn(tf);
1439		m88110_skip_insn(tf);
1440	}
1441#endif
1442
1443	KERNEL_UNLOCK();
1444
1445	mi_child_return(p);
1446}
1447
1448#ifdef PTRACE
1449
1450/*
1451 * User Single Step Debugging Support
1452 */
1453
1454#include <sys/ptrace.h>
1455
1456vaddr_t	ss_branch_taken(u_int, vaddr_t, struct reg *);
1457int	ss_get_value(struct proc *, vaddr_t, u_int *);
1458int	ss_inst_branch_or_call(u_int);
1459int	ss_put_breakpoint(struct proc *, vaddr_t, vaddr_t *, u_int *);
1460
1461#define	SYSCALL_INSTR	0xf000d080	/* tb0 0,r0,128 */
1462
1463int
1464ss_get_value(struct proc *p, vaddr_t addr, u_int *value)
1465{
1466	struct uio uio;
1467	struct iovec iov;
1468
1469	iov.iov_base = (caddr_t)value;
1470	iov.iov_len = sizeof(u_int);
1471	uio.uio_iov = &iov;
1472	uio.uio_iovcnt = 1;
1473	uio.uio_offset = (off_t)addr;
1474	uio.uio_resid = sizeof(u_int);
1475	uio.uio_segflg = UIO_SYSSPACE;
1476	uio.uio_rw = UIO_READ;
1477	uio.uio_procp = curproc;
1478	return (process_domem(curproc, p, &uio, PT_READ_I));
1479}
1480
1481int
1482ss_put_value(struct proc *p, vaddr_t addr, u_int value)
1483{
1484	struct uio uio;
1485	struct iovec iov;
1486
1487	iov.iov_base = (caddr_t)&value;
1488	iov.iov_len = sizeof(u_int);
1489	uio.uio_iov = &iov;
1490	uio.uio_iovcnt = 1;
1491	uio.uio_offset = (off_t)addr;
1492	uio.uio_resid = sizeof(u_int);
1493	uio.uio_segflg = UIO_SYSSPACE;
1494	uio.uio_rw = UIO_WRITE;
1495	uio.uio_procp = curproc;
1496	return (process_domem(curproc, p, &uio, PT_WRITE_I));
1497}
1498
1499/*
1500 * ss_branch_taken(instruction, pc, regs)
1501 *
1502 * instruction will be a control flow instruction location at address pc.
1503 * Branch taken is supposed to return the address to which the instruction
1504 * would jump if the branch is taken.
1505 *
1506 * This is different from branch_taken() in ddb, as we also need to process
1507 * system calls.
1508 */
1509vaddr_t
1510ss_branch_taken(u_int inst, vaddr_t pc, struct reg *regs)
1511{
1512	u_int regno;
1513
1514	/*
1515	 * Quick check of the instruction. Note that we know we are only
1516	 * invoked if ss_inst_branch_or_call() returns TRUE, so we do not
1517	 * need to repeat the jpm, jsr and syscall stricter checks here.
1518	 */
1519	switch (inst >> (32 - 5)) {
1520	case 0x18:	/* br */
1521	case 0x19:	/* bsr */
1522		/* signed 26 bit pc relative displacement, shift left 2 bits */
1523		inst = (inst & 0x03ffffff) << 2;
1524		/* check if sign extension is needed */
1525		if (inst & 0x08000000)
1526			inst |= 0xf0000000;
1527		return (pc + inst);
1528
1529	case 0x1a:	/* bb0 */
1530	case 0x1b:	/* bb1 */
1531	case 0x1d:	/* bcnd */
1532		/* signed 16 bit pc relative displacement, shift left 2 bits */
1533		inst = (inst & 0x0000ffff) << 2;
1534		/* check if sign extension is needed */
1535		if (inst & 0x00020000)
1536			inst |= 0xfffc0000;
1537		return (pc + inst);
1538
1539	case 0x1e:	/* jmp or jsr */
1540		regno = inst & 0x1f;	/* get the register value */
1541		return (regno == 0 ? 0 : regs->r[regno]);
1542
1543	default:	/* system call */
1544		/*
1545		 * The regular (pc + 4) breakpoint will match the error
1546		 * return. Successful system calls return at (pc + 8),
1547		 * so we'll set up a branch breakpoint there.
1548		 */
1549		return (pc + 8);
1550	}
1551}
1552
1553int
1554ss_inst_branch_or_call(u_int ins)
1555{
1556	/* check high five bits */
1557	switch (ins >> (32 - 5)) {
1558	case 0x18: /* br */
1559	case 0x19: /* bsr */
1560	case 0x1a: /* bb0 */
1561	case 0x1b: /* bb1 */
1562	case 0x1d: /* bcnd */
1563		return (TRUE);
1564	case 0x1e: /* could be jmp or jsr */
1565		if ((ins & 0xfffff3e0) == 0xf400c000)
1566			return (TRUE);
1567	}
1568
1569	return (FALSE);
1570}
1571
1572int
1573ss_put_breakpoint(struct proc *p, vaddr_t va, vaddr_t *bpva, u_int *bpsave)
1574{
1575	int rc;
1576
1577	/* Restore previous breakpoint if we did not trigger it. */
1578	if (*bpva != 0) {
1579		ss_put_value(p, *bpva, *bpsave);
1580		*bpva = 0;
1581	}
1582
1583	/* Save instruction. */
1584	if ((rc = ss_get_value(p, va, bpsave)) != 0)
1585		return (rc);
1586
1587	/* Store breakpoint instruction at the location now. */
1588	*bpva = va;
1589	return (ss_put_value(p, va, SSBREAKPOINT));
1590}
1591
1592int
1593process_sstep(struct proc *p, int sstep)
1594{
1595	struct reg *sstf = USER_REGS(p);
1596	vaddr_t pc, brpc;
1597	u_int32_t instr;
1598	int rc;
1599
1600	if (sstep == 0) {
1601		/* Restore previous breakpoints if any. */
1602		if (p->p_md.md_bp0va != 0) {
1603			ss_put_value(p, p->p_md.md_bp0va, p->p_md.md_bp0save);
1604			p->p_md.md_bp0va = 0;
1605		}
1606		if (p->p_md.md_bp1va != 0) {
1607			ss_put_value(p, p->p_md.md_bp1va, p->p_md.md_bp1save);
1608			p->p_md.md_bp1va = 0;
1609		}
1610
1611		return (0);
1612	}
1613
1614	/*
1615	 * User was stopped at pc, e.g. the instruction at pc was not executed.
1616	 * Fetch what's at the current location.
1617	 */
1618	pc = PC_REGS(sstf);
1619	if ((rc = ss_get_value(p, pc, &instr)) != 0)
1620		return (rc);
1621
1622	/*
1623	 * Find if this instruction may cause a branch, and set up a breakpoint
1624	 * at the branch location.
1625	 */
1626	if (ss_inst_branch_or_call(instr) || instr == SYSCALL_INSTR) {
1627		brpc = ss_branch_taken(instr, pc, sstf);
1628
1629		/* self-branches are hopeless */
1630		if (brpc != pc && brpc != 0) {
1631			if ((rc = ss_put_breakpoint(p, brpc,
1632			    &p->p_md.md_bp1va, &p->p_md.md_bp1save)) != 0)
1633				return (rc);
1634		}
1635	}
1636
1637	if ((rc = ss_put_breakpoint(p, pc + 4,
1638	    &p->p_md.md_bp0va, &p->p_md.md_bp0save)) != 0)
1639		return (rc);
1640
1641	return (0);
1642}
1643
1644#endif	/* PTRACE */
1645
1646#ifdef DIAGNOSTIC
1647void
1648splassert_check(int wantipl, const char *func)
1649{
1650	int oldipl;
1651
1652	oldipl = getipl();
1653
1654	if (oldipl < wantipl) {
1655		splassert_fail(wantipl, oldipl, func);
1656		/*
1657		 * This will raise the spl,
1658		 * in a feeble attempt to reduce further damage.
1659		 */
1660		(void)raiseipl(wantipl);
1661	}
1662}
1663#endif
1664
1665/*
1666 * ld.d and st.d instructions referencing long aligned but not long long
1667 * aligned addresses will trigger a misaligned address exception.
1668 *
1669 * This routine attempts to recover these (valid) statements, by simulating
1670 * the split form of the instruction. If it fails, it returns the appropriate
1671 * signal number to deliver.
1672 *
1673 * Note that we do not attempt to do anything for .d.usr instructions - the
1674 * kernel never issues such instructions, and they cause a privileged
1675 * instruction exception from userland.
1676 */
1677int
1678double_reg_fixup(struct trapframe *frame, int fault)
1679{
1680	u_int32_t pc, instr, value;
1681	int regno, store;
1682	vaddr_t addr;
1683
1684	/*
1685	 * Decode the faulting instruction.
1686	 */
1687
1688	pc = PC_REGS(&frame->tf_regs);
1689	if (copyin((void *)pc, &instr, sizeof(u_int32_t)) != 0)
1690		return SIGSEGV;
1691
1692	switch (instr & 0xfc00ff00) {
1693	case 0xf4001000:	/* ld.d rD, rS1, rS2 */
1694		addr = frame->tf_r[(instr >> 16) & 0x1f]
1695		    + frame->tf_r[(instr & 0x1f)];
1696		store = 0;
1697		break;
1698	case 0xf4002000:	/* st.d rD, rS1, rS2 */
1699		addr = frame->tf_r[(instr >> 16) & 0x1f]
1700		    + frame->tf_r[(instr & 0x1f)];
1701		store = 1;
1702		break;
1703	default:
1704		switch (instr >> 26) {
1705		case 0x10000000 >> 26:	/* ld.d rD, rS, imm16 */
1706			addr = (instr & 0x0000ffff) +
1707			    frame->tf_r[(instr >> 16) & 0x1f];
1708			store = 0;
1709			break;
1710		case 0x20000000 >> 26:	/* st.d rD, rS, imm16 */
1711			addr = (instr & 0x0000ffff) +
1712			    frame->tf_r[(instr >> 16) & 0x1f];
1713			store = 1;
1714			break;
1715		default:
1716			return SIGBUS;
1717		}
1718		break;
1719	}
1720
1721	regno = (instr >> 21) & 0x1f;
1722
1723	switch (fault) {
1724	case T_MISALGNFLT:
1725		/* We only handle long but not long long aligned access here */
1726		if ((addr & 0x07) != 4)
1727			return SIGBUS;
1728		break;
1729	case T_ILLFLT:
1730		/* We only handle odd register pair number here */
1731		if ((regno & 0x01) == 0)
1732			return SIGILL;
1733		/* We only handle long aligned access here */
1734		if ((addr & 0x03) != 0)
1735			return SIGBUS;
1736		break;
1737	}
1738
1739	if (store) {
1740		/*
1741		 * Two word stores.
1742		 */
1743		if (regno == 0)
1744			value = 0;
1745		else
1746			value = frame->tf_r[regno];
1747		if (copyout(&value, (void *)addr, sizeof(u_int32_t)) != 0)
1748			return SIGSEGV;
1749		if (regno == 31)
1750			value = 0;
1751		else
1752			value = frame->tf_r[regno + 1];
1753		if (copyout(&value, (void *)(addr + 4), sizeof(u_int32_t)) != 0)
1754			return SIGSEGV;
1755	} else {
1756		/*
1757		 * Two word loads. r0 should be left unaltered, but the
1758		 * value should still be fetched even if it is discarded.
1759		 */
1760		if (copyin((void *)addr, &value, sizeof(u_int32_t)) != 0)
1761			return SIGSEGV;
1762		if (regno != 0)
1763			frame->tf_r[regno] = value;
1764		if (copyin((void *)(addr + 4), &value, sizeof(u_int32_t)) != 0)
1765			return SIGSEGV;
1766		if (regno != 31)
1767			frame->tf_r[regno + 1] = value;
1768	}
1769
1770	return 0;
1771}
1772
1773void
1774cache_flush(struct trapframe *tf)
1775{
1776	struct proc *p = curproc;
1777	struct pmap *pmap;
1778	paddr_t pa;
1779	vaddr_t va;
1780	vsize_t len, count;
1781
1782	p->p_md.md_tf = tf;
1783
1784	pmap = vm_map_pmap(&p->p_vmspace->vm_map);
1785	va = tf->tf_r[2];
1786	len = tf->tf_r[3];
1787
1788	if (/* va < VM_MIN_ADDRESS || */ va >= VM_MAXUSER_ADDRESS ||
1789	    va + len <= va || va + len >= VM_MAXUSER_ADDRESS)
1790		len = 0;
1791
1792	while (len != 0) {
1793		count = min(len, PAGE_SIZE - (va & PAGE_MASK));
1794		if (pmap_extract(pmap, va, &pa) != FALSE)
1795			dma_cachectl(pa, count, DMA_CACHE_SYNC);
1796		va += count;
1797		len -= count;
1798	}
1799
1800#ifdef M88100
1801	if (CPU_IS88100) {
1802		tf->tf_snip = tf->tf_snip & ~NIP_E;
1803		tf->tf_sfip = tf->tf_sfip & ~FIP_E;
1804	}
1805#endif
1806#ifdef M88110
1807	if (CPU_IS88110) {
1808		/* skip instruction */
1809		m88110_skip_insn(tf);
1810	}
1811#endif
1812
1813	userret(p);
1814}
1815