trap.c revision 1.94
1/*	$OpenBSD: trap.c,v 1.94 2014/05/26 17:31:17 miod Exp $	*/
2/*
3 * Copyright (c) 2004, Miodrag Vallat.
4 * Copyright (c) 1998 Steve Murphree, Jr.
5 * Copyright (c) 1996 Nivas Madhur
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *      This product includes software developed by Nivas Madhur.
19 * 4. The name of the author may not be used to endorse or promote products
20 *    derived from this software without specific prior written permission
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34/*
35 * Mach Operating System
36 * Copyright (c) 1991 Carnegie Mellon University
37 * Copyright (c) 1991 OMRON Corporation
38 * All Rights Reserved.
39 *
40 * Permission to use, copy, modify and distribute this software and its
41 * documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 */
47
48#include <sys/types.h>
49#include <sys/param.h>
50#include <sys/proc.h>
51#include <sys/signalvar.h>
52#include <sys/user.h>
53#include <sys/syscall.h>
54#include <sys/syscall_mi.h>
55#include <sys/systm.h>
56
57#include <uvm/uvm_extern.h>
58
59#include <machine/asm_macro.h>
60#include <machine/cmmu.h>
61#include <machine/cpu.h>
62#ifdef M88100
63#include <machine/m88100.h>
64#include <machine/m8820x.h>
65#endif
66#ifdef M88110
67#include <machine/m88110.h>
68#endif
69#include <machine/fpu.h>
70#include <machine/pcb.h>
71#include <machine/psl.h>
72#include <machine/trap.h>
73
74#include <machine/db_machdep.h>
75
76#define SSBREAKPOINT (0xF000D1F8U) /* Single Step Breakpoint */
77
78#define USERMODE(PSR)   (((PSR) & PSR_MODE) == 0)
79#define SYSTEMMODE(PSR) (((PSR) & PSR_MODE) != 0)
80
81void printtrap(int, struct trapframe *);
82__dead void panictrap(int, struct trapframe *);
83__dead void error_fatal(struct trapframe *);
84int double_reg_fixup(struct trapframe *, int);
85int ss_put_value(struct proc *, vaddr_t, u_int);
86
87extern void regdump(struct trapframe *f);
88
89const char *trap_type[] = {
90	"Reset",
91	"Interrupt Exception",
92	"Instruction Access",
93	"Data Access Exception",
94	"Misaligned Access",
95	"Unimplemented Opcode",
96	"Privilege Violation"
97	"Bounds Check Violation",
98	"Illegal Integer Divide",
99	"Integer Overflow",
100	"Error Exception",
101	"Non-Maskable Exception",
102};
103
104const int trap_types = sizeof trap_type / sizeof trap_type[0];
105
106#ifdef M88100
107const char *pbus_exception_type[] = {
108	"Success (No Fault)",
109	"unknown 1",
110	"unknown 2",
111	"Bus Error",
112	"Segment Fault",
113	"Page Fault",
114	"Supervisor Violation",
115	"Write Violation",
116};
117#endif
118
119void
120printtrap(int type, struct trapframe *frame)
121{
122#ifdef M88100
123	if (CPU_IS88100) {
124		if (type == 2) {
125			/* instruction exception */
126			printf("\nInstr access fault (%s) v = %lx, frame %p\n",
127			    pbus_exception_type[
128			      CMMU_PFSR_FAULT(frame->tf_ipfsr)],
129			    frame->tf_sxip & XIP_ADDR, frame);
130		} else if (type == 3) {
131			/* data access exception */
132			printf("\nData access fault (%s) v = %lx, frame %p\n",
133			    pbus_exception_type[
134			      CMMU_PFSR_FAULT(frame->tf_dpfsr)],
135			    frame->tf_sxip & XIP_ADDR, frame);
136		} else
137			printf("\nTrap type %d, v = %lx, frame %p\n",
138			    type, frame->tf_sxip & XIP_ADDR, frame);
139	}
140#endif
141#ifdef M88110
142	if (CPU_IS88110) {
143		printf("\nTrap type %d, v = %x, frame %p\n",
144		    type, frame->tf_exip, frame);
145	}
146#endif
147#ifdef DDB
148	regdump(frame);
149#endif
150}
151
152__dead void
153panictrap(int type, struct trapframe *frame)
154{
155	static int panicing = 0;
156
157	if (panicing++ == 0)
158		printtrap(type, frame);
159	if ((u_int)type < trap_types)
160		panic(trap_type[type]);
161	else
162		panic("trap %d", type);
163	/*NOTREACHED*/
164}
165
166/*
167 * Handle external interrupts.
168 */
169void
170interrupt(struct trapframe *frame)
171{
172	struct cpu_info *ci = curcpu();
173
174	ci->ci_intrdepth++;
175	md_interrupt_func(frame);
176	ci->ci_intrdepth--;
177}
178
179#ifdef M88110
180/*
181 * Handle non-maskable interrupts.
182 */
183int
184nmi(struct trapframe *frame)
185{
186	return md_nmi_func(frame);
187}
188
189/*
190 * Reenable non-maskable interrupts.
191 */
192void
193nmi_wrapup(struct trapframe *frame)
194{
195	md_nmi_wrapup_func(frame);
196}
197#endif
198
199/*
200 * Handle asynchronous software traps.
201 */
202void
203ast(struct trapframe *frame)
204{
205	struct cpu_info *ci = curcpu();
206	struct proc *p = ci->ci_curproc;
207
208	p->p_md.md_astpending = 0;
209
210	uvmexp.softs++;
211	mi_ast(p, ci->ci_want_resched);
212	userret(p);
213}
214
215#ifdef M88100
216void
217m88100_trap(u_int type, struct trapframe *frame)
218{
219	struct proc *p;
220	struct vm_map *map;
221	vaddr_t va, pcb_onfault;
222	vm_prot_t ftype;
223	int fault_type, pbus_type;
224	u_long fault_code;
225	vaddr_t fault_addr;
226	struct vmspace *vm;
227	union sigval sv;
228	int result;
229#ifdef DDB
230	int s;
231	u_int psr;
232#endif
233	int sig = 0;
234
235	uvmexp.traps++;
236	if ((p = curproc) == NULL)
237		p = &proc0;
238
239	if (USERMODE(frame->tf_epsr)) {
240		type += T_USER;
241		p->p_md.md_tf = frame;	/* for ptrace/signals */
242		refreshcreds(p);
243	}
244	fault_type = SI_NOINFO;
245	fault_code = 0;
246	fault_addr = frame->tf_sxip & XIP_ADDR;
247
248	switch (type) {
249	default:
250	case T_ILLFLT:
251lose:
252		panictrap(frame->tf_vector, frame);
253		break;
254		/*NOTREACHED*/
255
256#if defined(DDB)
257	case T_KDB_BREAK:
258		s = splhigh();
259		set_psr((psr = get_psr()) & ~PSR_IND);
260		ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame);
261		set_psr(psr);
262		splx(s);
263		return;
264	case T_KDB_ENTRY:
265		s = splhigh();
266		set_psr((psr = get_psr()) & ~PSR_IND);
267		ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame);
268		set_psr(psr);
269		splx(s);
270		return;
271#endif /* DDB */
272	case T_MISALGNFLT:
273		printf("kernel misaligned access exception @0x%08lx\n",
274		    frame->tf_sxip);
275		goto lose;
276	case T_INSTFLT:
277		/* kernel mode instruction access fault.
278		 * Should never, never happen for a non-paged kernel.
279		 */
280#ifdef TRAPDEBUG
281		pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr);
282		printf("Kernel Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
283		    pbus_type, pbus_exception_type[pbus_type],
284		    fault_addr, frame, frame->tf_cpu);
285#endif
286		goto lose;
287	case T_DATAFLT:
288		/* kernel mode data fault */
289
290		/* data fault on the user address? */
291		if ((frame->tf_dmt0 & DMT_DAS) == 0) {
292			KERNEL_LOCK();
293			goto user_fault;
294		}
295
296		fault_addr = frame->tf_dma0;
297		if (frame->tf_dmt0 & (DMT_WRITE|DMT_LOCKBAR)) {
298			ftype = VM_PROT_READ|VM_PROT_WRITE;
299			fault_code = VM_PROT_WRITE;
300		} else {
301			ftype = VM_PROT_READ;
302			fault_code = VM_PROT_READ;
303		}
304
305		va = trunc_page((vaddr_t)fault_addr);
306
307		KERNEL_LOCK();
308		vm = p->p_vmspace;
309		map = kernel_map;
310
311		pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr);
312#ifdef TRAPDEBUG
313		printf("Kernel Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
314		    pbus_type, pbus_exception_type[pbus_type],
315		    fault_addr, frame, frame->tf_cpu);
316#endif
317
318		pcb_onfault = p->p_addr->u_pcb.pcb_onfault;
319		switch (pbus_type) {
320		case CMMU_PFSR_SUCCESS:
321			/*
322			 * The fault was resolved. Call data_access_emulation
323			 * to drain the data unit pipe line and reset dmt0
324			 * so that trap won't get called again.
325			 */
326			p->p_addr->u_pcb.pcb_onfault = 0;
327			data_access_emulation((u_int *)frame);
328			p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
329			frame->tf_dmt0 = 0;
330			frame->tf_dpfsr = 0;
331			KERNEL_UNLOCK();
332			return;
333		case CMMU_PFSR_SFAULT:
334		case CMMU_PFSR_PFAULT:
335			p->p_addr->u_pcb.pcb_onfault = 0;
336			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
337			p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
338			if (result == 0) {
339				/*
340				 * We could resolve the fault. Call
341				 * data_access_emulation to drain the data
342				 * unit pipe line and reset dmt0 so that trap
343				 * won't get called again.
344				 */
345				p->p_addr->u_pcb.pcb_onfault = 0;
346				data_access_emulation((u_int *)frame);
347				p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
348				frame->tf_dmt0 = 0;
349				frame->tf_dpfsr = 0;
350				KERNEL_UNLOCK();
351				return;
352			} else if (pcb_onfault != 0) {
353				/*
354				 * This could be a fault caused in copyout*()
355				 * while accessing kernel space.
356				 */
357				frame->tf_snip = pcb_onfault | NIP_V;
358				frame->tf_sfip = (pcb_onfault + 4) | FIP_V;
359				frame->tf_sxip = 0;
360				/*
361				 * Continue as if the fault had been resolved,
362				 * but do not try to complete the faulting
363				 * access.
364				 */
365				frame->tf_dmt0 = 0;
366				frame->tf_dpfsr = 0;
367				KERNEL_UNLOCK();
368				return;
369			}
370			break;
371		}
372#ifdef TRAPDEBUG
373		printf("PBUS Fault %d (%s) va = 0x%x\n", pbus_type,
374		    pbus_exception_type[pbus_type], va);
375#endif
376		KERNEL_UNLOCK();
377		goto lose;
378		/* NOTREACHED */
379	case T_INSTFLT+T_USER:
380		/* User mode instruction access fault */
381		/* FALLTHROUGH */
382	case T_DATAFLT+T_USER:
383		KERNEL_LOCK();
384user_fault:
385		if (type == T_INSTFLT + T_USER) {
386			pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr);
387#ifdef TRAPDEBUG
388			printf("User Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
389			    pbus_type, pbus_exception_type[pbus_type],
390			    fault_addr, frame, frame->tf_cpu);
391#endif
392		} else {
393			fault_addr = frame->tf_dma0;
394			pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr);
395#ifdef TRAPDEBUG
396			printf("User Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
397			    pbus_type, pbus_exception_type[pbus_type],
398			    fault_addr, frame, frame->tf_cpu);
399#endif
400		}
401
402		if (frame->tf_dmt0 & (DMT_WRITE | DMT_LOCKBAR)) {
403			ftype = VM_PROT_READ | VM_PROT_WRITE;
404			fault_code = VM_PROT_WRITE;
405		} else {
406			ftype = VM_PROT_READ;
407			fault_code = VM_PROT_READ;
408		}
409
410		va = trunc_page((vaddr_t)fault_addr);
411
412		vm = p->p_vmspace;
413		map = &vm->vm_map;
414		if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
415			p->p_addr->u_pcb.pcb_onfault = 0;
416
417		/* Call uvm_fault() to resolve non-bus error faults */
418		switch (pbus_type) {
419		case CMMU_PFSR_SUCCESS:
420			result = 0;
421			break;
422		case CMMU_PFSR_BERROR:
423			result = EACCES;
424			break;
425		default:
426			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
427			if (result == EACCES)
428				result = EFAULT;
429			break;
430		}
431
432		p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
433
434		if (result == 0 && (caddr_t)va >= vm->vm_maxsaddr)
435			uvm_grow(p, va);
436
437		if (result == 0) {
438			if (type == T_INSTFLT + T_USER) {
439				/*
440				 * back up SXIP, SNIP,
441				 * clearing the Error bit
442				 */
443				frame->tf_sfip = frame->tf_snip & ~FIP_E;
444				frame->tf_snip = frame->tf_sxip & ~NIP_E;
445				frame->tf_ipfsr = 0;
446			} else {
447				/*
448			 	 * We could resolve the fault. Call
449			 	 * data_access_emulation to drain the data unit
450			 	 * pipe line and reset dmt0 so that trap won't
451			 	 * get called again.
452			 	 */
453				p->p_addr->u_pcb.pcb_onfault = 0;
454				data_access_emulation((u_int *)frame);
455				p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
456				frame->tf_dmt0 = 0;
457				frame->tf_dpfsr = 0;
458			}
459		} else {
460			/*
461			 * This could be a fault caused in copyin*()
462			 * while accessing user space.
463			 */
464			if (pcb_onfault != 0) {
465				frame->tf_snip = pcb_onfault | NIP_V;
466				frame->tf_sfip = (pcb_onfault + 4) | FIP_V;
467				frame->tf_sxip = 0;
468				/*
469				 * Continue as if the fault had been resolved,
470				 * but do not try to complete the faulting
471				 * access.
472				 */
473				frame->tf_dmt0 = 0;
474				frame->tf_dpfsr = 0;
475			} else {
476				sig = result == EACCES ? SIGBUS : SIGSEGV;
477				fault_type = result == EACCES ?
478				    BUS_ADRERR : SEGV_MAPERR;
479			}
480		}
481		KERNEL_UNLOCK();
482		break;
483	case T_MISALGNFLT+T_USER:
484		/* Fix any misaligned ld.d or st.d instructions */
485		sig = double_reg_fixup(frame, T_MISALGNFLT);
486		fault_type = BUS_ADRALN;
487		break;
488	case T_PRIVINFLT+T_USER:
489	case T_ILLFLT+T_USER:
490#ifndef DDB
491	case T_KDB_BREAK:
492	case T_KDB_ENTRY:
493#endif
494	case T_KDB_BREAK+T_USER:
495	case T_KDB_ENTRY+T_USER:
496	case T_KDB_TRACE:
497	case T_KDB_TRACE+T_USER:
498		sig = SIGILL;
499		break;
500	case T_BNDFLT+T_USER:
501		sig = SIGFPE;
502		break;
503	case T_ZERODIV+T_USER:
504		sig = SIGFPE;
505		fault_type = FPE_INTDIV;
506		break;
507	case T_OVFFLT+T_USER:
508		sig = SIGFPE;
509		fault_type = FPE_INTOVF;
510		break;
511	case T_FPEPFLT+T_USER:
512		m88100_fpu_precise_exception(frame);
513		goto maysigfpe;
514	case T_FPEIFLT+T_USER:
515		m88100_fpu_imprecise_exception(frame);
516maysigfpe:
517		/* Check for a SIGFPE condition */
518		if (frame->tf_fpsr & frame->tf_fpcr) {
519			sig = SIGFPE;
520			if (frame->tf_fpecr & FPECR_FIOV)
521				fault_type = FPE_FLTSUB;
522			else if (frame->tf_fpecr & FPECR_FROP)
523				fault_type = FPE_FLTINV;
524			else if (frame->tf_fpecr & FPECR_FDVZ)
525				fault_type = FPE_INTDIV;
526			else if (frame->tf_fpecr & FPECR_FUNF) {
527				if (frame->tf_fpsr & FPSR_EFUNF)
528					fault_type = FPE_FLTUND;
529				else if (frame->tf_fpsr & FPSR_EFINX)
530					fault_type = FPE_FLTRES;
531			} else if (frame->tf_fpecr & FPECR_FOVF) {
532				if (frame->tf_fpsr & FPSR_EFOVF)
533					fault_type = FPE_FLTOVF;
534				else if (frame->tf_fpsr & FPSR_EFINX)
535					fault_type = FPE_FLTRES;
536			} else if (frame->tf_fpecr & FPECR_FINX)
537				fault_type = FPE_FLTRES;
538		}
539		break;
540	case T_SIGSYS+T_USER:
541		sig = SIGSYS;
542		break;
543	case T_STEPBPT+T_USER:
544#ifdef PTRACE
545		/*
546		 * This trap is used by the kernel to support single-step
547		 * debugging (although any user could generate this trap
548		 * which should probably be handled differently). When a
549		 * process is continued by a debugger with the PT_STEP
550		 * function of ptrace (single step), the kernel inserts
551		 * one or two breakpoints in the user process so that only
552		 * one instruction (or two in the case of a delayed branch)
553		 * is executed.  When this breakpoint is hit, we get the
554		 * T_STEPBPT trap.
555		 */
556		{
557			u_int instr;
558			vaddr_t pc = PC_REGS(&frame->tf_regs);
559
560			/* read break instruction */
561			copyin((caddr_t)pc, &instr, sizeof(u_int));
562
563			/* check and see if we got here by accident */
564			if ((p->p_md.md_bp0va != pc &&
565			     p->p_md.md_bp1va != pc) ||
566			    instr != SSBREAKPOINT) {
567				sig = SIGTRAP;
568				fault_type = TRAP_TRACE;
569				break;
570			}
571
572			/* restore original instruction and clear breakpoint */
573			if (p->p_md.md_bp0va == pc) {
574				ss_put_value(p, pc, p->p_md.md_bp0save);
575				p->p_md.md_bp0va = 0;
576			}
577			if (p->p_md.md_bp1va == pc) {
578				ss_put_value(p, pc, p->p_md.md_bp1save);
579				p->p_md.md_bp1va = 0;
580			}
581
582#if 1
583			frame->tf_sfip = frame->tf_snip;
584			frame->tf_snip = pc | NIP_V;
585#endif
586			sig = SIGTRAP;
587			fault_type = TRAP_BRKPT;
588		}
589#else
590		sig = SIGTRAP;
591		fault_type = TRAP_TRACE;
592#endif
593		break;
594
595	case T_USERBPT+T_USER:
596		/*
597		 * This trap is meant to be used by debuggers to implement
598		 * breakpoint debugging.  When we get this trap, we just
599		 * return a signal which gets caught by the debugger.
600		 */
601		frame->tf_sfip = frame->tf_snip;
602		frame->tf_snip = frame->tf_sxip;
603		sig = SIGTRAP;
604		fault_type = TRAP_BRKPT;
605		break;
606
607	}
608
609	/*
610	 * If trap from supervisor mode, just return
611	 */
612	if (type < T_USER)
613		return;
614
615	if (sig) {
616		sv.sival_ptr = (void *)fault_addr;
617		KERNEL_LOCK();
618		trapsignal(p, sig, fault_code, fault_type, sv);
619		KERNEL_UNLOCK();
620		/*
621		 * don't want multiple faults - we are going to
622		 * deliver signal.
623		 */
624		frame->tf_dmt0 = 0;
625		frame->tf_ipfsr = frame->tf_dpfsr = 0;
626	}
627
628	userret(p);
629}
630#endif /* M88100 */
631
632#ifdef M88110
633void
634m88110_trap(u_int type, struct trapframe *frame)
635{
636	struct proc *p;
637	struct vm_map *map;
638	vaddr_t va, pcb_onfault;
639	vm_prot_t ftype;
640	int fault_type;
641	u_long fault_code;
642	vaddr_t fault_addr;
643	struct vmspace *vm;
644	union sigval sv;
645	int result;
646#ifdef DDB
647        int s;
648	u_int psr;
649#endif
650	int sig = 0;
651
652	uvmexp.traps++;
653	if ((p = curproc) == NULL)
654		p = &proc0;
655
656	fault_type = SI_NOINFO;
657	fault_code = 0;
658	fault_addr = frame->tf_exip & XIP_ADDR;
659
660	/*
661	 * 88110 errata #16 (4.2) or #3 (5.1.1):
662	 * ``bsr, br, bcnd, jsr and jmp instructions with the .n extension
663	 *   can cause the enip value to be incremented by 4 incorrectly
664	 *   if the instruction in the delay slot is the first word of a
665	 *   page which misses in the mmu and results in a hardware
666	 *   tablewalk which encounters an exception or an invalid
667	 *   descriptor.  The exip value in this case will point to the
668	 *   first word of the page, and the D bit will be set.
669	 *
670	 *   Note: if the instruction is a jsr.n r1, r1 will be overwritten
671	 *   with erroneous data.  Therefore, no recovery is possible. Do
672	 *   not allow this instruction to occupy the last word of a page.
673	 *
674	 *   Suggested fix: recover in general by backing up the exip by 4
675	 *   and clearing the delay bit before an rte when the lower 3 hex
676	 *   digits of the exip are 001.''
677	 */
678	if ((frame->tf_exip & PAGE_MASK) == 0x00000001 && type == T_INSTFLT) {
679		u_int instr;
680
681		/*
682		 * Note that we have initialized fault_addr above, so that
683		 * signals provide the correct address if necessary.
684		 */
685		frame->tf_exip = (frame->tf_exip & ~1) - 4;
686
687		/*
688		 * Check the instruction at the (backed up) exip.
689		 * If it is a jsr.n, abort.
690		 */
691		if (!USERMODE(frame->tf_epsr)) {
692			instr = *(u_int *)fault_addr;
693			if (instr == 0xf400cc01)
694				panic("mc88110 errata #16, exip %p enip %p",
695				    (frame->tf_exip + 4) | 1, frame->tf_enip);
696		} else {
697			/* copyin here should not fail */
698			if (copyin((const void *)frame->tf_exip, &instr,
699			    sizeof instr) == 0 &&
700			    instr == 0xf400cc01) {
701				uprintf("mc88110 errata #16, exip %p enip %p",
702				    (frame->tf_exip + 4) | 1, frame->tf_enip);
703				sig = SIGILL;
704			}
705		}
706	}
707
708	if (USERMODE(frame->tf_epsr)) {
709		type += T_USER;
710		p->p_md.md_tf = frame;	/* for ptrace/signals */
711		refreshcreds(p);
712	}
713
714	if (sig != 0)
715		goto deliver;
716
717	switch (type) {
718	default:
719lose:
720		panictrap(frame->tf_vector, frame);
721		break;
722		/*NOTREACHED*/
723
724#ifdef DEBUG
725	case T_110_DRM+T_USER:
726	case T_110_DRM:
727		printf("DMMU read miss: Hardware Table Searches should be enabled!\n");
728		goto lose;
729	case T_110_DWM+T_USER:
730	case T_110_DWM:
731		printf("DMMU write miss: Hardware Table Searches should be enabled!\n");
732		goto lose;
733	case T_110_IAM+T_USER:
734	case T_110_IAM:
735		printf("IMMU miss: Hardware Table Searches should be enabled!\n");
736		goto lose;
737#endif
738
739#ifdef DDB
740	case T_KDB_TRACE:
741		s = splhigh();
742		set_psr((psr = get_psr()) & ~PSR_IND);
743		ddb_break_trap(T_KDB_TRACE, (db_regs_t*)frame);
744		set_psr(psr);
745		splx(s);
746		return;
747	case T_KDB_BREAK:
748		s = splhigh();
749		set_psr((psr = get_psr()) & ~PSR_IND);
750		ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame);
751		set_psr(psr);
752		splx(s);
753		return;
754	case T_KDB_ENTRY:
755		s = splhigh();
756		set_psr((psr = get_psr()) & ~PSR_IND);
757		ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame);
758		set_psr(psr);
759		/* skip trap instruction */
760		m88110_skip_insn(frame);
761		splx(s);
762		return;
763#endif /* DDB */
764	case T_ILLFLT:
765		/*
766		 * The 88110 seems to trigger an instruction fault in
767		 * supervisor mode when running the following sequence:
768		 *
769		 *	bcnd.n cond, reg, 1f
770		 *	arithmetic insn
771		 *	...
772		 *  	the same exact arithmetic insn
773		 *  1:	another arithmetic insn stalled by the previous one
774		 *	...
775		 *
776		 * The exception is reported with exip pointing to the
777		 * branch address. I don't know, at this point, if there
778		 * is any better workaround than the aggressive one
779		 * implemented below; I don't see how this could relate to
780		 * any of the 88110 errata (although it might be related to
781		 * branch prediction).
782		 *
783		 * For the record, the exact sequence triggering the
784		 * spurious exception is:
785		 *
786		 *	bcnd.n	eq0, r2,  1f
787		 *	 or	r25, r0,  r22
788		 *	bsr	somewhere
789		 *	or	r25, r0,  r22
790		 *  1:	cmp	r13, r25, r20
791		 *
792		 * within the same cache line.
793		 *
794		 * Simply ignoring the exception and returning does not
795		 * cause the exception to disappear. Clearing the
796		 * instruction cache works, but on 88110+88410 systems,
797		 * the 88410 needs to be invalidated as well. (note that
798		 * the size passed to the flush routines does not matter
799		 * since there is no way to flush a subset of the 88110
800		 * I$ anyway)
801		 */
802	    {
803		extern void *kernel_text, *etext;
804
805		if (fault_addr >= (vaddr_t)&kernel_text &&
806		    fault_addr < (vaddr_t)&etext) {
807			cmmu_icache_inv(curcpu()->ci_cpuid,
808			    trunc_page(fault_addr), PAGE_SIZE);
809			cmmu_cache_wbinv(curcpu()->ci_cpuid,
810			    trunc_page(fault_addr), PAGE_SIZE);
811			return;
812		}
813	    }
814		goto lose;
815	case T_MISALGNFLT:
816		printf("kernel misaligned access exception @%p\n",
817		    frame->tf_exip);
818		goto lose;
819	case T_INSTFLT:
820		/* kernel mode instruction access fault.
821		 * Should never, never happen for a non-paged kernel.
822		 */
823#ifdef TRAPDEBUG
824		printf("Kernel Instruction fault exip %x isr %x ilar %x\n",
825		    frame->tf_exip, frame->tf_isr, frame->tf_ilar);
826#endif
827		goto lose;
828
829	case T_DATAFLT:
830		/* kernel mode data fault */
831
832		/* data fault on the user address? */
833		if ((frame->tf_dsr & CMMU_DSR_SU) == 0) {
834			KERNEL_LOCK();
835			goto m88110_user_fault;
836		}
837
838#ifdef TRAPDEBUG
839		printf("Kernel Data access fault exip %x dsr %x dlar %x\n",
840		    frame->tf_exip, frame->tf_dsr, frame->tf_dlar);
841#endif
842
843		fault_addr = frame->tf_dlar;
844		if (frame->tf_dsr & CMMU_DSR_RW) {
845			ftype = VM_PROT_READ;
846			fault_code = VM_PROT_READ;
847		} else {
848			ftype = VM_PROT_READ|VM_PROT_WRITE;
849			fault_code = VM_PROT_WRITE;
850		}
851
852		va = trunc_page((vaddr_t)fault_addr);
853
854		KERNEL_LOCK();
855		vm = p->p_vmspace;
856		map = kernel_map;
857
858		if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) {
859			/*
860			 * On a segment or a page fault, call uvm_fault() to
861			 * resolve the fault.
862			 */
863			if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
864				p->p_addr->u_pcb.pcb_onfault = 0;
865			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
866			p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
867			/*
868			 * This could be a fault caused in copyout*()
869			 * while accessing kernel space.
870			 */
871			if (result != 0 && pcb_onfault != 0) {
872				frame->tf_exip = pcb_onfault;
873				/*
874				 * Continue as if the fault had been resolved.
875				 */
876				result = 0;
877			}
878			if (result == 0) {
879				KERNEL_UNLOCK();
880				return;
881			}
882		}
883		KERNEL_UNLOCK();
884		goto lose;
885	case T_INSTFLT+T_USER:
886		/* User mode instruction access fault */
887		/* FALLTHROUGH */
888	case T_DATAFLT+T_USER:
889		KERNEL_LOCK();
890m88110_user_fault:
891		if (type == T_INSTFLT+T_USER) {
892			ftype = VM_PROT_READ;
893			fault_code = VM_PROT_READ;
894#ifdef TRAPDEBUG
895			printf("User Instruction fault exip %x isr %x ilar %x\n",
896			    frame->tf_exip, frame->tf_isr, frame->tf_ilar);
897#endif
898		} else {
899			fault_addr = frame->tf_dlar;
900			if (frame->tf_dsr & CMMU_DSR_RW) {
901				ftype = VM_PROT_READ;
902				fault_code = VM_PROT_READ;
903			} else {
904				ftype = VM_PROT_READ|VM_PROT_WRITE;
905				fault_code = VM_PROT_WRITE;
906			}
907#ifdef TRAPDEBUG
908			printf("User Data access fault exip %x dsr %x dlar %x\n",
909			    frame->tf_exip, frame->tf_dsr, frame->tf_dlar);
910#endif
911		}
912
913		va = trunc_page((vaddr_t)fault_addr);
914
915		vm = p->p_vmspace;
916		map = &vm->vm_map;
917		if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
918			p->p_addr->u_pcb.pcb_onfault = 0;
919
920		/*
921		 * Call uvm_fault() to resolve non-bus error faults
922		 * whenever possible.
923		 */
924		if (type == T_INSTFLT+T_USER) {
925			/* instruction faults */
926			if (frame->tf_isr &
927			    (CMMU_ISR_BE | CMMU_ISR_SP | CMMU_ISR_TBE)) {
928				/* bus error, supervisor protection */
929				result = EACCES;
930			} else
931			if (frame->tf_isr & (CMMU_ISR_SI | CMMU_ISR_PI)) {
932				/* segment or page fault */
933				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
934				if (result == EACCES)
935					result = EFAULT;
936			} else {
937#ifdef TRAPDEBUG
938				printf("Unexpected Instruction fault isr %x\n",
939				    frame->tf_isr);
940#endif
941				KERNEL_UNLOCK();
942				goto lose;
943			}
944		} else {
945			/* data faults */
946			if (frame->tf_dsr & CMMU_DSR_BE) {
947				/* bus error */
948				result = EACCES;
949			} else
950			if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) {
951				/* segment or page fault */
952				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
953				if (result == EACCES)
954					result = EFAULT;
955			} else
956			if (frame->tf_dsr & (CMMU_DSR_CP | CMMU_DSR_WA)) {
957				/* copyback or write allocate error */
958				result = EACCES;
959			} else
960			if (frame->tf_dsr & CMMU_DSR_WE) {
961				/* write fault  */
962				/* This could be a write protection fault or an
963				 * exception to set the used and modified bits
964				 * in the pte. Basically, if we got a write
965				 * error, then we already have a pte entry that
966				 * faulted in from a previous seg fault or page
967				 * fault.
968				 * Get the pte and check the status of the
969				 * modified and valid bits to determine if this
970				 * indeed a real write fault.  XXX smurph
971				 */
972				if (pmap_set_modify(map->pmap, va)) {
973#ifdef TRAPDEBUG
974					printf("Corrected userland write fault, pmap %p va %p\n",
975					    map->pmap, va);
976#endif
977					result = 0;
978				} else {
979					/* must be a real wp fault */
980#ifdef TRAPDEBUG
981					printf("Uncorrected userland write fault, pmap %p va %p\n",
982					    map->pmap, va);
983#endif
984					result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
985					if (result == EACCES)
986						result = EFAULT;
987				}
988			} else {
989#ifdef TRAPDEBUG
990				printf("Unexpected Data access fault dsr %x\n",
991				    frame->tf_dsr);
992#endif
993				KERNEL_UNLOCK();
994				goto lose;
995			}
996		}
997		p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
998
999		if (result == 0 && (caddr_t)va >= vm->vm_maxsaddr)
1000			uvm_grow(p, va);
1001		KERNEL_UNLOCK();
1002
1003		/*
1004		 * This could be a fault caused in copyin*()
1005		 * while accessing user space.
1006		 */
1007		if (result != 0 && pcb_onfault != 0) {
1008			frame->tf_exip = pcb_onfault;
1009			/*
1010			 * Continue as if the fault had been resolved.
1011			 */
1012			result = 0;
1013		}
1014
1015		if (result != 0) {
1016			sig = result == EACCES ? SIGBUS : SIGSEGV;
1017			fault_type = result == EACCES ?
1018			    BUS_ADRERR : SEGV_MAPERR;
1019		}
1020		break;
1021	case T_MISALGNFLT+T_USER:
1022		/* Fix any misaligned ld.d or st.d instructions */
1023		sig = double_reg_fixup(frame, T_MISALGNFLT);
1024		fault_type = BUS_ADRALN;
1025		if (sig == 0) {
1026			/* skip recovered instruction */
1027			m88110_skip_insn(frame);
1028			goto userexit;
1029		}
1030		break;
1031	case T_ILLFLT+T_USER:
1032		/* Fix any ld.d or st.d instruction with an odd register */
1033		sig = double_reg_fixup(frame, T_ILLFLT);
1034		fault_type = ILL_PRVREG;
1035		if (sig == 0) {
1036			/* skip recovered instruction */
1037			m88110_skip_insn(frame);
1038			goto userexit;
1039		}
1040		break;
1041	case T_PRIVINFLT+T_USER:
1042		fault_type = ILL_PRVREG;
1043		/* FALLTHROUGH */
1044#ifndef DDB
1045	case T_KDB_BREAK:
1046	case T_KDB_ENTRY:
1047	case T_KDB_TRACE:
1048#endif
1049	case T_KDB_BREAK+T_USER:
1050	case T_KDB_ENTRY+T_USER:
1051	case T_KDB_TRACE+T_USER:
1052		sig = SIGILL;
1053		break;
1054	case T_BNDFLT+T_USER:
1055		sig = SIGFPE;
1056		/* skip trap instruction */
1057		m88110_skip_insn(frame);
1058		break;
1059	case T_ZERODIV+T_USER:
1060		sig = SIGFPE;
1061		fault_type = FPE_INTDIV;
1062		/* skip trap instruction */
1063		m88110_skip_insn(frame);
1064		break;
1065	case T_OVFFLT+T_USER:
1066		sig = SIGFPE;
1067		fault_type = FPE_INTOVF;
1068		/* skip trap instruction */
1069		m88110_skip_insn(frame);
1070		break;
1071	case T_FPEPFLT+T_USER:
1072		m88110_fpu_exception(frame);
1073		goto userexit;
1074	case T_SIGSYS+T_USER:
1075		sig = SIGSYS;
1076		break;
1077	case T_STEPBPT+T_USER:
1078#ifdef PTRACE
1079		/*
1080		 * This trap is used by the kernel to support single-step
1081		 * debugging (although any user could generate this trap
1082		 * which should probably be handled differently). When a
1083		 * process is continued by a debugger with the PT_STEP
1084		 * function of ptrace (single step), the kernel inserts
1085		 * one or two breakpoints in the user process so that only
1086		 * one instruction (or two in the case of a delayed branch)
1087		 * is executed.  When this breakpoint is hit, we get the
1088		 * T_STEPBPT trap.
1089		 */
1090		{
1091			u_int instr;
1092			vaddr_t pc = PC_REGS(&frame->tf_regs);
1093
1094			/* read break instruction */
1095			copyin((caddr_t)pc, &instr, sizeof(u_int));
1096
1097			/* check and see if we got here by accident */
1098			if ((p->p_md.md_bp0va != pc &&
1099			     p->p_md.md_bp1va != pc) ||
1100			    instr != SSBREAKPOINT) {
1101				sig = SIGTRAP;
1102				fault_type = TRAP_TRACE;
1103				break;
1104			}
1105
1106			/* restore original instruction and clear breakpoint */
1107			if (p->p_md.md_bp0va == pc) {
1108				ss_put_value(p, pc, p->p_md.md_bp0save);
1109				p->p_md.md_bp0va = 0;
1110			}
1111			if (p->p_md.md_bp1va == pc) {
1112				ss_put_value(p, pc, p->p_md.md_bp1save);
1113				p->p_md.md_bp1va = 0;
1114			}
1115
1116			sig = SIGTRAP;
1117			fault_type = TRAP_BRKPT;
1118		}
1119#else
1120		sig = SIGTRAP;
1121		fault_type = TRAP_TRACE;
1122#endif
1123		break;
1124	case T_USERBPT+T_USER:
1125		/*
1126		 * This trap is meant to be used by debuggers to implement
1127		 * breakpoint debugging.  When we get this trap, we just
1128		 * return a signal which gets caught by the debugger.
1129		 */
1130		sig = SIGTRAP;
1131		fault_type = TRAP_BRKPT;
1132		break;
1133	}
1134
1135	/*
1136	 * If trap from supervisor mode, just return
1137	 */
1138	if (type < T_USER)
1139		return;
1140
1141	if (sig) {
1142deliver:
1143		sv.sival_ptr = (void *)fault_addr;
1144		KERNEL_LOCK();
1145		trapsignal(p, sig, fault_code, fault_type, sv);
1146		KERNEL_UNLOCK();
1147	}
1148
1149userexit:
1150	userret(p);
1151}
1152#endif /* M88110 */
1153
1154__dead void
1155error_fatal(struct trapframe *frame)
1156{
1157	if (frame->tf_vector == 0)
1158		printf("\nCPU %d Reset Exception\n", cpu_number());
1159	else
1160		printf("\nCPU %d Error Exception\n", cpu_number());
1161
1162#ifdef DDB
1163	regdump((struct trapframe*)frame);
1164#endif
1165	panic("unrecoverable exception %ld", frame->tf_vector);
1166}
1167
1168#ifdef M88100
1169void
1170m88100_syscall(register_t code, struct trapframe *tf)
1171{
1172	int i, nsys, nap;
1173	struct sysent *callp;
1174	struct proc *p = curproc;
1175	int error;
1176	register_t args[8], rval[2], *ap;
1177
1178	uvmexp.syscalls++;
1179
1180	callp = p->p_p->ps_emul->e_sysent;
1181	nsys  = p->p_p->ps_emul->e_nsysent;
1182
1183	p->p_md.md_tf = tf;
1184
1185	/*
1186	 * For 88k, all the arguments are passed in the registers (r2-r9),
1187	 * and further arguments (if any) on stack.
1188	 * For syscall (and __syscall), r2 (and r3) has the actual code.
1189	 * __syscall  takes a quad syscall number, so that other
1190	 * arguments are at their natural alignments.
1191	 */
1192	ap = &tf->tf_r[2];
1193	nap = 8; /* r2-r9 */
1194
1195	switch (code) {
1196	case SYS_syscall:
1197		code = *ap++;
1198		nap--;
1199		break;
1200	case SYS___syscall:
1201		if (callp != sysent)
1202			break;
1203		code = ap[_QUAD_LOWWORD];
1204		ap += 2;
1205		nap -= 2;
1206		break;
1207	}
1208
1209	if (code < 0 || code >= nsys)
1210		callp += p->p_p->ps_emul->e_nosys;
1211	else
1212		callp += code;
1213
1214	i = callp->sy_argsize / sizeof(register_t);
1215	if (i > sizeof(args) / sizeof(register_t))
1216		panic("syscall nargs");
1217	if (i > nap) {
1218		bcopy((caddr_t)ap, (caddr_t)args, nap * sizeof(register_t));
1219		if ((error = copyin((caddr_t)tf->tf_r[31], args + nap,
1220		    (i - nap) * sizeof(register_t))))
1221			goto bad;
1222	} else
1223		bcopy((caddr_t)ap, (caddr_t)args, i * sizeof(register_t));
1224
1225	rval[0] = 0;
1226	rval[1] = tf->tf_r[3];
1227
1228	error = mi_syscall(p, code, callp, args, rval);
1229
1230	/*
1231	 * system call will look like:
1232	 *	 or r13, r0, <code>
1233	 *       tb0 0, r0, <128> <- sxip
1234	 *	 br err 	  <- snip
1235	 *       jmp r1 	  <- sfip
1236	 *  err: or.u r3, r0, hi16(errno)
1237	 *	 st r2, r3, lo16(errno)
1238	 *	 subu r2, r0, 1
1239	 *	 jmp r1
1240	 *
1241	 * So, when we take syscall trap, sxip/snip/sfip will be as
1242	 * shown above.
1243	 * Given this,
1244	 * 1. If the system call returned 0, need to skip nip.
1245	 *	nip = fip, fip += 4
1246	 *    (doesn't matter what fip + 4 will be but we will never
1247	 *    execute this since jmp r1 at nip will change the execution flow.)
1248	 * 2. If the system call returned an errno > 0, plug the value
1249	 *    in r2, and leave nip and fip unchanged. This will have us
1250	 *    executing "br err" on return to user space.
1251	 * 3. If the system call code returned ERESTART,
1252	 *    we need to rexecute the trap instruction. Back up the pipe
1253	 *    line.
1254	 *     fip = nip, nip = xip
1255	 * 4. If the system call returned EJUSTRETURN, don't need to adjust
1256	 *    any pointers.
1257	 */
1258
1259	switch (error) {
1260	case 0:
1261		tf->tf_r[2] = rval[0];
1262		tf->tf_r[3] = rval[1];
1263		tf->tf_epsr &= ~PSR_C;
1264		tf->tf_snip = tf->tf_sfip & ~NIP_E;
1265		tf->tf_sfip = tf->tf_snip + 4;
1266		break;
1267	case ERESTART:
1268		tf->tf_sfip = tf->tf_snip & ~FIP_E;
1269		tf->tf_snip = tf->tf_sxip & ~NIP_E;
1270		break;
1271	case EJUSTRETURN:
1272		break;
1273	default:
1274	bad:
1275		tf->tf_r[2] = error;
1276		tf->tf_epsr |= PSR_C;   /* fail */
1277		tf->tf_snip = tf->tf_snip & ~NIP_E;
1278		tf->tf_sfip = tf->tf_sfip & ~FIP_E;
1279		break;
1280	}
1281
1282	mi_syscall_return(p, code, error, rval);
1283}
1284#endif /* M88100 */
1285
1286#ifdef M88110
1287/* Instruction pointers operate differently on mc88110 */
1288void
1289m88110_syscall(register_t code, struct trapframe *tf)
1290{
1291	int i, nsys, nap;
1292	struct sysent *callp;
1293	struct proc *p = curproc;
1294	int error;
1295	register_t args[8], rval[2], *ap;
1296
1297	uvmexp.syscalls++;
1298
1299	callp = p->p_p->ps_emul->e_sysent;
1300	nsys  = p->p_p->ps_emul->e_nsysent;
1301
1302	p->p_md.md_tf = tf;
1303
1304	/*
1305	 * For 88k, all the arguments are passed in the registers (r2-r9),
1306	 * and further arguments (if any) on stack.
1307	 * For syscall (and __syscall), r2 (and r3) has the actual code.
1308	 * __syscall  takes a quad syscall number, so that other
1309	 * arguments are at their natural alignments.
1310	 */
1311	ap = &tf->tf_r[2];
1312	nap = 8;	/* r2-r9 */
1313
1314	switch (code) {
1315	case SYS_syscall:
1316		code = *ap++;
1317		nap--;
1318		break;
1319	case SYS___syscall:
1320		if (callp != sysent)
1321			break;
1322		code = ap[_QUAD_LOWWORD];
1323		ap += 2;
1324		nap -= 2;
1325		break;
1326	}
1327
1328	if (code < 0 || code >= nsys)
1329		callp += p->p_p->ps_emul->e_nosys;
1330	else
1331		callp += code;
1332
1333	i = callp->sy_argsize / sizeof(register_t);
1334	if (i > sizeof(args) / sizeof(register_t))
1335		panic("syscall nargs");
1336	if (i > nap) {
1337		bcopy((caddr_t)ap, (caddr_t)args, nap * sizeof(register_t));
1338		if ((error = copyin((caddr_t)tf->tf_r[31], args + nap,
1339		    (i - nap) * sizeof(register_t))))
1340			goto bad;
1341	} else
1342		bcopy((caddr_t)ap, (caddr_t)args, i * sizeof(register_t));
1343
1344	rval[0] = 0;
1345	rval[1] = tf->tf_r[3];
1346
1347	error = mi_syscall(p, code, callp, args, rval);
1348
1349	/*
1350	 * system call will look like:
1351	 *	 or r13, r0, <code>
1352	 *       tb0 0, r0, <128> <- exip
1353	 *	 br err 	  <- enip
1354	 *       jmp r1
1355	 *  err: or.u r3, r0, hi16(errno)
1356	 *	 st r2, r3, lo16(errno)
1357	 *	 subu r2, r0, 1
1358	 *	 jmp r1
1359	 *
1360	 * So, when we take syscall trap, exip/enip will be as
1361	 * shown above.
1362	 * Given this,
1363	 * 1. If the system call returned 0, need to jmp r1.
1364	 *    exip += 8
1365	 * 2. If the system call returned an errno > 0, increment
1366	 *    exip += 4 and plug the value in r2. This will have us
1367	 *    executing "br err" on return to user space.
1368	 * 3. If the system call code returned ERESTART,
1369	 *    we need to rexecute the trap instruction. leave exip as is.
1370	 * 4. If the system call returned EJUSTRETURN, just return.
1371	 *    exip += 4
1372	 */
1373
1374	switch (error) {
1375	case 0:
1376		tf->tf_r[2] = rval[0];
1377		tf->tf_r[3] = rval[1];
1378		tf->tf_epsr &= ~PSR_C;
1379		/* skip two instructions */
1380		m88110_skip_insn(tf);
1381		m88110_skip_insn(tf);
1382		break;
1383	case ERESTART:
1384		/*
1385		 * Reexecute the trap.
1386		 * exip is already at the trap instruction, so
1387		 * there is nothing to do.
1388		 */
1389		break;
1390	case EJUSTRETURN:
1391		/* skip one instruction */
1392		m88110_skip_insn(tf);
1393		break;
1394	default:
1395	bad:
1396		tf->tf_r[2] = error;
1397		tf->tf_epsr |= PSR_C;   /* fail */
1398		/* skip one instruction */
1399		m88110_skip_insn(tf);
1400		break;
1401	}
1402
1403	mi_syscall_return(p, code, error, rval);
1404}
1405#endif	/* M88110 */
1406
1407/*
1408 * Set up return-value registers as fork() libc stub expects,
1409 * and do normal return-to-user-mode stuff.
1410 */
1411void
1412child_return(arg)
1413	void *arg;
1414{
1415	struct proc *p = arg;
1416	struct trapframe *tf;
1417
1418	tf = (struct trapframe *)USER_REGS(p);
1419	tf->tf_r[2] = 0;
1420	tf->tf_r[3] = 0;
1421	tf->tf_epsr &= ~PSR_C;
1422	/* reset r26 (used by the threads library) if __tfork */
1423	if (p->p_flag & P_THREAD)
1424		tf->tf_r[26] = 0;
1425	/* skip br instruction as in syscall() */
1426#ifdef M88100
1427	if (CPU_IS88100) {
1428		tf->tf_snip = (tf->tf_sfip & XIP_ADDR) | XIP_V;
1429		tf->tf_sfip = tf->tf_snip + 4;
1430	}
1431#endif
1432#ifdef M88110
1433	if (CPU_IS88110) {
1434		/* skip two instructions */
1435		m88110_skip_insn(tf);
1436		m88110_skip_insn(tf);
1437	}
1438#endif
1439
1440	KERNEL_UNLOCK();
1441
1442	mi_child_return(p);
1443}
1444
1445#ifdef PTRACE
1446
1447/*
1448 * User Single Step Debugging Support
1449 */
1450
1451#include <sys/ptrace.h>
1452
1453vaddr_t	ss_branch_taken(u_int, vaddr_t, struct reg *);
1454int	ss_get_value(struct proc *, vaddr_t, u_int *);
1455int	ss_inst_branch_or_call(u_int);
1456int	ss_put_breakpoint(struct proc *, vaddr_t, vaddr_t *, u_int *);
1457
1458#define	SYSCALL_INSTR	0xf000d080	/* tb0 0,r0,128 */
1459
1460int
1461ss_get_value(struct proc *p, vaddr_t addr, u_int *value)
1462{
1463	struct uio uio;
1464	struct iovec iov;
1465
1466	iov.iov_base = (caddr_t)value;
1467	iov.iov_len = sizeof(u_int);
1468	uio.uio_iov = &iov;
1469	uio.uio_iovcnt = 1;
1470	uio.uio_offset = (off_t)addr;
1471	uio.uio_resid = sizeof(u_int);
1472	uio.uio_segflg = UIO_SYSSPACE;
1473	uio.uio_rw = UIO_READ;
1474	uio.uio_procp = curproc;
1475	return (process_domem(curproc, p, &uio, PT_READ_I));
1476}
1477
1478int
1479ss_put_value(struct proc *p, vaddr_t addr, u_int value)
1480{
1481	struct uio uio;
1482	struct iovec iov;
1483
1484	iov.iov_base = (caddr_t)&value;
1485	iov.iov_len = sizeof(u_int);
1486	uio.uio_iov = &iov;
1487	uio.uio_iovcnt = 1;
1488	uio.uio_offset = (off_t)addr;
1489	uio.uio_resid = sizeof(u_int);
1490	uio.uio_segflg = UIO_SYSSPACE;
1491	uio.uio_rw = UIO_WRITE;
1492	uio.uio_procp = curproc;
1493	return (process_domem(curproc, p, &uio, PT_WRITE_I));
1494}
1495
1496/*
1497 * ss_branch_taken(instruction, pc, regs)
1498 *
1499 * instruction will be a control flow instruction location at address pc.
1500 * Branch taken is supposed to return the address to which the instruction
1501 * would jump if the branch is taken.
1502 *
1503 * This is different from branch_taken() in ddb, as we also need to process
1504 * system calls.
1505 */
1506vaddr_t
1507ss_branch_taken(u_int inst, vaddr_t pc, struct reg *regs)
1508{
1509	u_int regno;
1510
1511	/*
1512	 * Quick check of the instruction. Note that we know we are only
1513	 * invoked if ss_inst_branch_or_call() returns TRUE, so we do not
1514	 * need to repeat the jpm, jsr and syscall stricter checks here.
1515	 */
1516	switch (inst >> (32 - 5)) {
1517	case 0x18:	/* br */
1518	case 0x19:	/* bsr */
1519		/* signed 26 bit pc relative displacement, shift left 2 bits */
1520		inst = (inst & 0x03ffffff) << 2;
1521		/* check if sign extension is needed */
1522		if (inst & 0x08000000)
1523			inst |= 0xf0000000;
1524		return (pc + inst);
1525
1526	case 0x1a:	/* bb0 */
1527	case 0x1b:	/* bb1 */
1528	case 0x1d:	/* bcnd */
1529		/* signed 16 bit pc relative displacement, shift left 2 bits */
1530		inst = (inst & 0x0000ffff) << 2;
1531		/* check if sign extension is needed */
1532		if (inst & 0x00020000)
1533			inst |= 0xfffc0000;
1534		return (pc + inst);
1535
1536	case 0x1e:	/* jmp or jsr */
1537		regno = inst & 0x1f;	/* get the register value */
1538		return (regno == 0 ? 0 : regs->r[regno]);
1539
1540	default:	/* system call */
1541		/*
1542		 * The regular (pc + 4) breakpoint will match the error
1543		 * return. Successful system calls return at (pc + 8),
1544		 * so we'll set up a branch breakpoint there.
1545		 */
1546		return (pc + 8);
1547	}
1548}
1549
1550int
1551ss_inst_branch_or_call(u_int ins)
1552{
1553	/* check high five bits */
1554	switch (ins >> (32 - 5)) {
1555	case 0x18: /* br */
1556	case 0x19: /* bsr */
1557	case 0x1a: /* bb0 */
1558	case 0x1b: /* bb1 */
1559	case 0x1d: /* bcnd */
1560		return (TRUE);
1561	case 0x1e: /* could be jmp or jsr */
1562		if ((ins & 0xfffff3e0) == 0xf400c000)
1563			return (TRUE);
1564	}
1565
1566	return (FALSE);
1567}
1568
1569int
1570ss_put_breakpoint(struct proc *p, vaddr_t va, vaddr_t *bpva, u_int *bpsave)
1571{
1572	int rc;
1573
1574	/* Restore previous breakpoint if we did not trigger it. */
1575	if (*bpva != 0) {
1576		ss_put_value(p, *bpva, *bpsave);
1577		*bpva = 0;
1578	}
1579
1580	/* Save instruction. */
1581	if ((rc = ss_get_value(p, va, bpsave)) != 0)
1582		return (rc);
1583
1584	/* Store breakpoint instruction at the location now. */
1585	*bpva = va;
1586	return (ss_put_value(p, va, SSBREAKPOINT));
1587}
1588
1589int
1590process_sstep(struct proc *p, int sstep)
1591{
1592	struct reg *sstf = USER_REGS(p);
1593	vaddr_t pc, brpc;
1594	u_int32_t instr;
1595	int rc;
1596
1597	if (sstep == 0) {
1598		/* Restore previous breakpoints if any. */
1599		if (p->p_md.md_bp0va != 0) {
1600			ss_put_value(p, p->p_md.md_bp0va, p->p_md.md_bp0save);
1601			p->p_md.md_bp0va = 0;
1602		}
1603		if (p->p_md.md_bp1va != 0) {
1604			ss_put_value(p, p->p_md.md_bp1va, p->p_md.md_bp1save);
1605			p->p_md.md_bp1va = 0;
1606		}
1607
1608		return (0);
1609	}
1610
1611	/*
1612	 * User was stopped at pc, e.g. the instruction at pc was not executed.
1613	 * Fetch what's at the current location.
1614	 */
1615	pc = PC_REGS(sstf);
1616	if ((rc = ss_get_value(p, pc, &instr)) != 0)
1617		return (rc);
1618
1619	/*
1620	 * Find if this instruction may cause a branch, and set up a breakpoint
1621	 * at the branch location.
1622	 */
1623	if (ss_inst_branch_or_call(instr) || instr == SYSCALL_INSTR) {
1624		brpc = ss_branch_taken(instr, pc, sstf);
1625
1626		/* self-branches are hopeless */
1627		if (brpc != pc && brpc != 0) {
1628			if ((rc = ss_put_breakpoint(p, brpc,
1629			    &p->p_md.md_bp1va, &p->p_md.md_bp1save)) != 0)
1630				return (rc);
1631		}
1632	}
1633
1634	if ((rc = ss_put_breakpoint(p, pc + 4,
1635	    &p->p_md.md_bp0va, &p->p_md.md_bp0save)) != 0)
1636		return (rc);
1637
1638	return (0);
1639}
1640
1641#endif	/* PTRACE */
1642
1643#ifdef DIAGNOSTIC
1644void
1645splassert_check(int wantipl, const char *func)
1646{
1647	int oldipl;
1648
1649	oldipl = getipl();
1650
1651	if (oldipl < wantipl) {
1652		splassert_fail(wantipl, oldipl, func);
1653		/*
1654		 * This will raise the spl,
1655		 * in a feeble attempt to reduce further damage.
1656		 */
1657		(void)raiseipl(wantipl);
1658	}
1659}
1660#endif
1661
1662/*
1663 * ld.d and st.d instructions referencing long aligned but not long long
1664 * aligned addresses will trigger a misaligned address exception.
1665 *
1666 * This routine attempts to recover these (valid) statements, by simulating
1667 * the split form of the instruction. If it fails, it returns the appropriate
1668 * signal number to deliver.
1669 *
1670 * Note that we do not attempt to do anything for .d.usr instructions - the
1671 * kernel never issues such instructions, and they cause a privileged
1672 * instruction exception from userland.
1673 */
1674int
1675double_reg_fixup(struct trapframe *frame, int fault)
1676{
1677	u_int32_t pc, instr, value;
1678	int regno, store;
1679	vaddr_t addr;
1680
1681	/*
1682	 * Decode the faulting instruction.
1683	 */
1684
1685	pc = PC_REGS(&frame->tf_regs);
1686	if (copyin((void *)pc, &instr, sizeof(u_int32_t)) != 0)
1687		return SIGSEGV;
1688
1689	switch (instr & 0xfc00ff00) {
1690	case 0xf4001000:	/* ld.d rD, rS1, rS2 */
1691		addr = frame->tf_r[(instr >> 16) & 0x1f]
1692		    + frame->tf_r[(instr & 0x1f)];
1693		store = 0;
1694		break;
1695	case 0xf4002000:	/* st.d rD, rS1, rS2 */
1696		addr = frame->tf_r[(instr >> 16) & 0x1f]
1697		    + frame->tf_r[(instr & 0x1f)];
1698		store = 1;
1699		break;
1700	default:
1701		switch (instr >> 26) {
1702		case 0x10000000 >> 26:	/* ld.d rD, rS, imm16 */
1703			addr = (instr & 0x0000ffff) +
1704			    frame->tf_r[(instr >> 16) & 0x1f];
1705			store = 0;
1706			break;
1707		case 0x20000000 >> 26:	/* st.d rD, rS, imm16 */
1708			addr = (instr & 0x0000ffff) +
1709			    frame->tf_r[(instr >> 16) & 0x1f];
1710			store = 1;
1711			break;
1712		default:
1713			return SIGBUS;
1714		}
1715		break;
1716	}
1717
1718	regno = (instr >> 21) & 0x1f;
1719
1720	switch (fault) {
1721	case T_MISALGNFLT:
1722		/* We only handle long but not long long aligned access here */
1723		if ((addr & 0x07) != 4)
1724			return SIGBUS;
1725		break;
1726	case T_ILLFLT:
1727		/* We only handle odd register pair number here */
1728		if ((regno & 0x01) == 0)
1729			return SIGILL;
1730		/* We only handle long aligned access here */
1731		if ((addr & 0x03) != 0)
1732			return SIGBUS;
1733		break;
1734	}
1735
1736	if (store) {
1737		/*
1738		 * Two word stores.
1739		 */
1740		if (regno == 0)
1741			value = 0;
1742		else
1743			value = frame->tf_r[regno];
1744		if (copyout(&value, (void *)addr, sizeof(u_int32_t)) != 0)
1745			return SIGSEGV;
1746		if (regno == 31)
1747			value = 0;
1748		else
1749			value = frame->tf_r[regno + 1];
1750		if (copyout(&value, (void *)(addr + 4), sizeof(u_int32_t)) != 0)
1751			return SIGSEGV;
1752	} else {
1753		/*
1754		 * Two word loads. r0 should be left unaltered, but the
1755		 * value should still be fetched even if it is discarded.
1756		 */
1757		if (copyin((void *)addr, &value, sizeof(u_int32_t)) != 0)
1758			return SIGSEGV;
1759		if (regno != 0)
1760			frame->tf_r[regno] = value;
1761		if (copyin((void *)(addr + 4), &value, sizeof(u_int32_t)) != 0)
1762			return SIGSEGV;
1763		if (regno != 31)
1764			frame->tf_r[regno + 1] = value;
1765	}
1766
1767	return 0;
1768}
1769
1770void
1771cache_flush(struct trapframe *tf)
1772{
1773	struct proc *p = curproc;
1774	struct pmap *pmap;
1775	paddr_t pa;
1776	vaddr_t va;
1777	vsize_t len, count;
1778
1779	p->p_md.md_tf = tf;
1780
1781	pmap = vm_map_pmap(&p->p_vmspace->vm_map);
1782	va = tf->tf_r[2];
1783	len = tf->tf_r[3];
1784
1785	if (/* va < VM_MIN_ADDRESS || */ va >= VM_MAXUSER_ADDRESS ||
1786	    va + len <= va || va + len >= VM_MAXUSER_ADDRESS)
1787		len = 0;
1788
1789	while (len != 0) {
1790		count = min(len, PAGE_SIZE - (va & PAGE_MASK));
1791		if (pmap_extract(pmap, va, &pa) != FALSE)
1792			dma_cachectl(pa, count, DMA_CACHE_SYNC);
1793		va += count;
1794		len -= count;
1795	}
1796
1797#ifdef M88100
1798	if (CPU_IS88100) {
1799		tf->tf_snip = tf->tf_snip & ~NIP_E;
1800		tf->tf_sfip = tf->tf_sfip & ~FIP_E;
1801	}
1802#endif
1803#ifdef M88110
1804	if (CPU_IS88110) {
1805		/* skip instruction */
1806		m88110_skip_insn(tf);
1807	}
1808#endif
1809
1810	userret(p);
1811}
1812