trap.c revision 1.74
1/*	$OpenBSD: trap.c,v 1.74 2011/04/03 14:56:28 guenther Exp $	*/
2/*
3 * Copyright (c) 2004, Miodrag Vallat.
4 * Copyright (c) 1998 Steve Murphree, Jr.
5 * Copyright (c) 1996 Nivas Madhur
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 *    must display the following acknowledgement:
18 *      This product includes software developed by Nivas Madhur.
19 * 4. The name of the author may not be used to endorse or promote products
20 *    derived from this software without specific prior written permission
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 */
34/*
35 * Mach Operating System
36 * Copyright (c) 1991 Carnegie Mellon University
37 * Copyright (c) 1991 OMRON Corporation
38 * All Rights Reserved.
39 *
40 * Permission to use, copy, modify and distribute this software and its
41 * documentation is hereby granted, provided that both the copyright
42 * notice and this permission notice appear in all copies of the
43 * software, derivative works or modified versions, and any portions
44 * thereof, and that both notices appear in supporting documentation.
45 *
46 */
47
48#include <sys/types.h>
49#include <sys/param.h>
50#include <sys/proc.h>
51#include <sys/signalvar.h>
52#include <sys/user.h>
53#include <sys/syscall.h>
54#include <sys/systm.h>
55#include <sys/ktrace.h>
56
57#include "systrace.h"
58#include <dev/systrace.h>
59
60#include <uvm/uvm_extern.h>
61
62#include <machine/asm_macro.h>
63#include <machine/cmmu.h>
64#include <machine/cpu.h>
65#ifdef M88100
66#include <machine/m88100.h>
67#include <machine/m8820x.h>
68#endif
69#ifdef M88110
70#include <machine/m88110.h>
71#endif
72#include <machine/fpu.h>
73#include <machine/pcb.h>
74#include <machine/psl.h>
75#include <machine/trap.h>
76
77#include <machine/db_machdep.h>
78
79#define SSBREAKPOINT (0xF000D1F8U) /* Single Step Breakpoint */
80
81#define USERMODE(PSR)   (((PSR) & PSR_MODE) == 0)
82#define SYSTEMMODE(PSR) (((PSR) & PSR_MODE) != 0)
83
84void printtrap(int, struct trapframe *);
85__dead void panictrap(int, struct trapframe *);
86__dead void error_fatal(struct trapframe *);
87int double_reg_fixup(struct trapframe *);
88int ss_put_value(struct proc *, vaddr_t, u_int);
89
90extern void regdump(struct trapframe *f);
91
92const char *trap_type[] = {
93	"Reset",
94	"Interrupt Exception",
95	"Instruction Access",
96	"Data Access Exception",
97	"Misaligned Access",
98	"Unimplemented Opcode",
99	"Privilege Violation"
100	"Bounds Check Violation",
101	"Illegal Integer Divide",
102	"Integer Overflow",
103	"Error Exception",
104	"Non-Maskable Exception",
105};
106
107const int trap_types = sizeof trap_type / sizeof trap_type[0];
108
109#ifdef M88100
110const char *pbus_exception_type[] = {
111	"Success (No Fault)",
112	"unknown 1",
113	"unknown 2",
114	"Bus Error",
115	"Segment Fault",
116	"Page Fault",
117	"Supervisor Violation",
118	"Write Violation",
119};
120#endif
121
122static inline void
123userret(struct proc *p)
124{
125	int sig;
126
127	/* take pending signals */
128	while ((sig = CURSIG(p)) != 0)
129		postsig(sig);
130
131	curcpu()->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri;
132}
133
134void
135printtrap(int type, struct trapframe *frame)
136{
137#ifdef M88100
138	if (CPU_IS88100) {
139		if (type == 2) {
140			/* instruction exception */
141			printf("\nInstr access fault (%s) v = %x, frame %p\n",
142			    pbus_exception_type[
143			      CMMU_PFSR_FAULT(frame->tf_ipfsr)],
144			    frame->tf_sxip & XIP_ADDR, frame);
145		} else if (type == 3) {
146			/* data access exception */
147			printf("\nData access fault (%s) v = %x, frame %p\n",
148			    pbus_exception_type[
149			      CMMU_PFSR_FAULT(frame->tf_dpfsr)],
150			    frame->tf_sxip & XIP_ADDR, frame);
151		} else
152			printf("\nTrap type %d, v = %x, frame %p\n",
153			    type, frame->tf_sxip & XIP_ADDR, frame);
154	}
155#endif
156#ifdef M88110
157	if (CPU_IS88110) {
158		printf("\nTrap type %d, v = %x, frame %p\n",
159		    type, frame->tf_exip, frame);
160	}
161#endif
162#ifdef DDB
163	regdump(frame);
164#endif
165}
166
167__dead void
168panictrap(int type, struct trapframe *frame)
169{
170	static int panicing = 0;
171
172	if (panicing++ == 0)
173		printtrap(type, frame);
174	if ((u_int)type < trap_types)
175		panic(trap_type[type]);
176	else
177		panic("trap %d", type);
178	/*NOTREACHED*/
179}
180
181/*
182 * Handle external interrupts.
183 */
184void
185interrupt(struct trapframe *frame)
186{
187	struct cpu_info *ci = curcpu();
188
189	ci->ci_intrdepth++;
190	md_interrupt_func(frame);
191	ci->ci_intrdepth--;
192}
193
194#ifdef M88110
195/*
196 * Handle non-maskable interrupts.
197 */
198int
199nmi(struct trapframe *frame)
200{
201	return md_nmi_func(frame);
202}
203
204/*
205 * Reenable non-maskable interrupts.
206 */
207void
208nmi_wrapup(struct trapframe *frame)
209{
210	md_nmi_wrapup_func(frame);
211}
212#endif
213
214/*
215 * Handle asynchronous software traps.
216 */
217void
218ast(struct trapframe *frame)
219{
220	struct cpu_info *ci = curcpu();
221	struct proc *p = ci->ci_curproc;
222
223	uvmexp.softs++;
224	p->p_md.md_astpending = 0;
225	if (p->p_flag & P_OWEUPC) {
226		KERNEL_PROC_LOCK(p);
227		ADDUPROF(p);
228		KERNEL_PROC_UNLOCK(p);
229	}
230	if (ci->ci_want_resched)
231		preempt(NULL);
232
233	userret(p);
234}
235
236#ifdef M88100
237void
238m88100_trap(u_int type, struct trapframe *frame)
239{
240	struct proc *p;
241	struct vm_map *map;
242	vaddr_t va, pcb_onfault;
243	vm_prot_t ftype;
244	int fault_type, pbus_type;
245	u_long fault_code;
246	vaddr_t fault_addr;
247	struct vmspace *vm;
248	union sigval sv;
249	int result;
250#ifdef DDB
251	int s;
252	u_int psr;
253#endif
254	int sig = 0;
255
256	uvmexp.traps++;
257	if ((p = curproc) == NULL)
258		p = &proc0;
259
260	if (USERMODE(frame->tf_epsr)) {
261		type += T_USER;
262		p->p_md.md_tf = frame;	/* for ptrace/signals */
263	}
264	fault_type = SI_NOINFO;
265	fault_code = 0;
266	fault_addr = frame->tf_sxip & XIP_ADDR;
267
268	switch (type) {
269	default:
270	case T_ILLFLT:
271lose:
272		panictrap(frame->tf_vector, frame);
273		break;
274		/*NOTREACHED*/
275
276#if defined(DDB)
277	case T_KDB_BREAK:
278		s = splhigh();
279		set_psr((psr = get_psr()) & ~PSR_IND);
280		ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame);
281		set_psr(psr);
282		splx(s);
283		return;
284	case T_KDB_ENTRY:
285		s = splhigh();
286		set_psr((psr = get_psr()) & ~PSR_IND);
287		ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame);
288		set_psr(psr);
289		splx(s);
290		return;
291#endif /* DDB */
292	case T_MISALGNFLT:
293		printf("kernel misaligned access exception @%p\n",
294		    frame->tf_sxip);
295		goto lose;
296	case T_INSTFLT:
297		/* kernel mode instruction access fault.
298		 * Should never, never happen for a non-paged kernel.
299		 */
300#ifdef TRAPDEBUG
301		pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr);
302		printf("Kernel Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
303		    pbus_type, pbus_exception_type[pbus_type],
304		    fault_addr, frame, frame->tf_cpu);
305#endif
306		goto lose;
307	case T_DATAFLT:
308		/* kernel mode data fault */
309
310		/* data fault on the user address? */
311		if ((frame->tf_dmt0 & DMT_DAS) == 0) {
312			KERNEL_LOCK();
313			goto user_fault;
314		}
315
316		fault_addr = frame->tf_dma0;
317		if (frame->tf_dmt0 & (DMT_WRITE|DMT_LOCKBAR)) {
318			ftype = VM_PROT_READ|VM_PROT_WRITE;
319			fault_code = VM_PROT_WRITE;
320		} else {
321			ftype = VM_PROT_READ;
322			fault_code = VM_PROT_READ;
323		}
324
325		va = trunc_page((vaddr_t)fault_addr);
326
327		KERNEL_LOCK();
328		vm = p->p_vmspace;
329		map = kernel_map;
330
331		pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr);
332#ifdef TRAPDEBUG
333		printf("Kernel Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
334		    pbus_type, pbus_exception_type[pbus_type],
335		    fault_addr, frame, frame->tf_cpu);
336#endif
337
338		switch (pbus_type) {
339		case CMMU_PFSR_SUCCESS:
340			/*
341			 * The fault was resolved. Call data_access_emulation
342			 * to drain the data unit pipe line and reset dmt0
343			 * so that trap won't get called again.
344			 */
345			data_access_emulation((u_int *)frame);
346			frame->tf_dpfsr = 0;
347			frame->tf_dmt0 = 0;
348			KERNEL_UNLOCK();
349			return;
350		case CMMU_PFSR_SFAULT:
351		case CMMU_PFSR_PFAULT:
352			if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
353				p->p_addr->u_pcb.pcb_onfault = 0;
354			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
355			p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
356			/*
357			 * This could be a fault caused in copyout*()
358			 * while accessing kernel space.
359			 */
360			if (result != 0 && pcb_onfault != 0) {
361				frame->tf_snip = pcb_onfault | NIP_V;
362				frame->tf_sfip = (pcb_onfault + 4) | FIP_V;
363				frame->tf_sxip = 0;
364				/*
365				 * Continue as if the fault had been resolved,
366				 * but do not try to complete the faulting
367				 * access.
368				 */
369				frame->tf_dmt0 |= DMT_SKIP;
370				result = 0;
371			}
372			if (result == 0) {
373				/*
374				 * We could resolve the fault. Call
375				 * data_access_emulation to drain the data
376				 * unit pipe line and reset dmt0 so that trap
377				 * won't get called again.
378				 */
379				data_access_emulation((u_int *)frame);
380				frame->tf_dpfsr = 0;
381				frame->tf_dmt0 = 0;
382				KERNEL_UNLOCK();
383				return;
384			}
385			break;
386		}
387#ifdef TRAPDEBUG
388		printf("PBUS Fault %d (%s) va = 0x%x\n", pbus_type,
389		    pbus_exception_type[pbus_type], va);
390#endif
391		KERNEL_UNLOCK();
392		goto lose;
393		/* NOTREACHED */
394	case T_INSTFLT+T_USER:
395		/* User mode instruction access fault */
396		/* FALLTHROUGH */
397	case T_DATAFLT+T_USER:
398		KERNEL_PROC_LOCK(p);
399user_fault:
400		if (type == T_INSTFLT + T_USER) {
401			pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr);
402#ifdef TRAPDEBUG
403			printf("User Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
404			    pbus_type, pbus_exception_type[pbus_type],
405			    fault_addr, frame, frame->tf_cpu);
406#endif
407		} else {
408			fault_addr = frame->tf_dma0;
409			pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr);
410#ifdef TRAPDEBUG
411			printf("User Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n",
412			    pbus_type, pbus_exception_type[pbus_type],
413			    fault_addr, frame, frame->tf_cpu);
414#endif
415		}
416
417		if (frame->tf_dmt0 & (DMT_WRITE | DMT_LOCKBAR)) {
418			ftype = VM_PROT_READ | VM_PROT_WRITE;
419			fault_code = VM_PROT_WRITE;
420		} else {
421			ftype = VM_PROT_READ;
422			fault_code = VM_PROT_READ;
423		}
424
425		va = trunc_page((vaddr_t)fault_addr);
426
427		vm = p->p_vmspace;
428		map = &vm->vm_map;
429		if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
430			p->p_addr->u_pcb.pcb_onfault = 0;
431
432		/* Call uvm_fault() to resolve non-bus error faults */
433		switch (pbus_type) {
434		case CMMU_PFSR_SUCCESS:
435			result = 0;
436			break;
437		case CMMU_PFSR_BERROR:
438			result = EACCES;
439			break;
440		default:
441			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
442			break;
443		}
444
445		p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
446
447		if ((caddr_t)va >= vm->vm_maxsaddr) {
448			if (result == 0)
449				uvm_grow(p, va);
450			else if (result == EACCES)
451				result = EFAULT;
452		}
453
454		/*
455		 * This could be a fault caused in copyin*()
456		 * while accessing user space.
457		 */
458		if (result != 0 && pcb_onfault != 0) {
459			frame->tf_snip = pcb_onfault | NIP_V;
460			frame->tf_sfip = (pcb_onfault + 4) | FIP_V;
461			frame->tf_sxip = 0;
462			/*
463			 * Continue as if the fault had been resolved, but
464			 * do not try to complete the faulting access.
465			 */
466			frame->tf_dmt0 |= DMT_SKIP;
467			result = 0;
468		}
469
470		if (result == 0) {
471			if (type == T_INSTFLT + T_USER) {
472				/*
473				 * back up SXIP, SNIP,
474				 * clearing the Error bit
475				 */
476				frame->tf_sfip = frame->tf_snip & ~FIP_E;
477				frame->tf_snip = frame->tf_sxip & ~NIP_E;
478				frame->tf_ipfsr = 0;
479			} else {
480				/*
481			 	 * We could resolve the fault. Call
482			 	 * data_access_emulation to drain the data unit
483			 	 * pipe line and reset dmt0 so that trap won't
484			 	 * get called again.
485			 	 */
486				data_access_emulation((u_int *)frame);
487				frame->tf_dpfsr = 0;
488				frame->tf_dmt0 = 0;
489			}
490		} else {
491			sig = result == EACCES ? SIGBUS : SIGSEGV;
492			fault_type = result == EACCES ?
493			    BUS_ADRERR : SEGV_MAPERR;
494		}
495		if (type == T_DATAFLT)
496			KERNEL_UNLOCK();
497		else
498			KERNEL_PROC_UNLOCK(p);
499		break;
500	case T_MISALGNFLT+T_USER:
501		/* Fix any misaligned ld.d or st.d instructions */
502		sig = double_reg_fixup(frame);
503		fault_type = BUS_ADRALN;
504		break;
505	case T_PRIVINFLT+T_USER:
506	case T_ILLFLT+T_USER:
507#ifndef DDB
508	case T_KDB_BREAK:
509	case T_KDB_ENTRY:
510#endif
511	case T_KDB_BREAK+T_USER:
512	case T_KDB_ENTRY+T_USER:
513	case T_KDB_TRACE:
514	case T_KDB_TRACE+T_USER:
515		sig = SIGILL;
516		break;
517	case T_BNDFLT+T_USER:
518		sig = SIGFPE;
519		break;
520	case T_ZERODIV+T_USER:
521		sig = SIGFPE;
522		fault_type = FPE_INTDIV;
523		break;
524	case T_OVFFLT+T_USER:
525		sig = SIGFPE;
526		fault_type = FPE_INTOVF;
527		break;
528	case T_FPEPFLT+T_USER:
529		sig = SIGFPE;
530		break;
531	case T_SIGSYS+T_USER:
532		sig = SIGSYS;
533		break;
534	case T_STEPBPT+T_USER:
535#ifdef PTRACE
536		/*
537		 * This trap is used by the kernel to support single-step
538		 * debugging (although any user could generate this trap
539		 * which should probably be handled differently). When a
540		 * process is continued by a debugger with the PT_STEP
541		 * function of ptrace (single step), the kernel inserts
542		 * one or two breakpoints in the user process so that only
543		 * one instruction (or two in the case of a delayed branch)
544		 * is executed.  When this breakpoint is hit, we get the
545		 * T_STEPBPT trap.
546		 */
547		{
548			u_int instr;
549			vaddr_t pc = PC_REGS(&frame->tf_regs);
550
551			/* read break instruction */
552			copyin((caddr_t)pc, &instr, sizeof(u_int));
553
554			/* check and see if we got here by accident */
555			if ((p->p_md.md_bp0va != pc &&
556			     p->p_md.md_bp1va != pc) ||
557			    instr != SSBREAKPOINT) {
558				sig = SIGTRAP;
559				fault_type = TRAP_TRACE;
560				break;
561			}
562
563			/* restore original instruction and clear breakpoint */
564			if (p->p_md.md_bp0va == pc) {
565				ss_put_value(p, pc, p->p_md.md_bp0save);
566				p->p_md.md_bp0va = 0;
567			}
568			if (p->p_md.md_bp1va == pc) {
569				ss_put_value(p, pc, p->p_md.md_bp1save);
570				p->p_md.md_bp1va = 0;
571			}
572
573#if 1
574			frame->tf_sfip = frame->tf_snip;
575			frame->tf_snip = pc | NIP_V;
576#endif
577			sig = SIGTRAP;
578			fault_type = TRAP_BRKPT;
579		}
580#else
581		sig = SIGTRAP;
582		fault_type = TRAP_TRACE;
583#endif
584		break;
585
586	case T_USERBPT+T_USER:
587		/*
588		 * This trap is meant to be used by debuggers to implement
589		 * breakpoint debugging.  When we get this trap, we just
590		 * return a signal which gets caught by the debugger.
591		 */
592		frame->tf_sfip = frame->tf_snip;
593		frame->tf_snip = frame->tf_sxip;
594		sig = SIGTRAP;
595		fault_type = TRAP_BRKPT;
596		break;
597
598	}
599
600	/*
601	 * If trap from supervisor mode, just return
602	 */
603	if (type < T_USER)
604		return;
605
606	if (sig) {
607		sv.sival_ptr = (void *)fault_addr;
608		KERNEL_PROC_LOCK(p);
609		trapsignal(p, sig, fault_code, fault_type, sv);
610		KERNEL_PROC_UNLOCK(p);
611		/*
612		 * don't want multiple faults - we are going to
613		 * deliver signal.
614		 */
615		frame->tf_dmt0 = 0;
616		frame->tf_ipfsr = frame->tf_dpfsr = 0;
617	}
618
619	userret(p);
620}
621#endif /* M88100 */
622
623#ifdef M88110
624void
625m88110_trap(u_int type, struct trapframe *frame)
626{
627	struct proc *p;
628	struct vm_map *map;
629	vaddr_t va, pcb_onfault;
630	vm_prot_t ftype;
631	int fault_type;
632	u_long fault_code;
633	vaddr_t fault_addr;
634	struct vmspace *vm;
635	union sigval sv;
636	int result;
637#ifdef DDB
638        int s;
639	u_int psr;
640#endif
641	int sig = 0;
642
643	uvmexp.traps++;
644	if ((p = curproc) == NULL)
645		p = &proc0;
646
647	fault_type = SI_NOINFO;
648	fault_code = 0;
649	fault_addr = frame->tf_exip & XIP_ADDR;
650
651	/*
652	 * 88110 errata #16 (4.2) or #3 (5.1.1):
653	 * ``bsr, br, bcnd, jsr and jmp instructions with the .n extension
654	 *   can cause the enip value to be incremented by 4 incorrectly
655	 *   if the instruction in the delay slot is the first word of a
656	 *   page which misses in the mmu and results in a hardware
657	 *   tablewalk which encounters an exception or an invalid
658	 *   descriptor.  The exip value in this case will point to the
659	 *   first word of the page, and the D bit will be set.
660	 *
661	 *   Note: if the instruction is a jsr.n r1, r1 will be overwritten
662	 *   with erroneous data.  Therefore, no recovery is possible. Do
663	 *   not allow this instruction to occupy the last word of a page.
664	 *
665	 *   Suggested fix: recover in general by backing up the exip by 4
666	 *   and clearing the delay bit before an rte when the lower 3 hex
667	 *   digits of the exip are 001.''
668	 */
669	if ((frame->tf_exip & PAGE_MASK) == 0x00000001 && type == T_INSTFLT) {
670		u_int instr;
671
672		/*
673		 * Note that we have initialized fault_addr above, so that
674		 * signals provide the correct address if necessary.
675		 */
676		frame->tf_exip = (frame->tf_exip & ~1) - 4;
677
678		/*
679		 * Check the instruction at the (backed up) exip.
680		 * If it is a jsr.n, abort.
681		 */
682		if (!USERMODE(frame->tf_epsr)) {
683			instr = *(u_int *)fault_addr;
684			if (instr == 0xf400cc01)
685				panic("mc88110 errata #16, exip %p enip %p",
686				    (frame->tf_exip + 4) | 1, frame->tf_enip);
687		} else {
688			/* copyin here should not fail */
689			if (copyin((const void *)frame->tf_exip, &instr,
690			    sizeof instr) == 0 &&
691			    instr == 0xf400cc01) {
692				uprintf("mc88110 errata #16, exip %p enip %p",
693				    (frame->tf_exip + 4) | 1, frame->tf_enip);
694				sig = SIGILL;
695			}
696		}
697	}
698
699	if (USERMODE(frame->tf_epsr)) {
700		type += T_USER;
701		p->p_md.md_tf = frame;	/* for ptrace/signals */
702	}
703
704	if (sig != 0)
705		goto deliver;
706
707	switch (type) {
708	default:
709lose:
710		panictrap(frame->tf_vector, frame);
711		break;
712		/*NOTREACHED*/
713
714#ifdef DEBUG
715	case T_110_DRM+T_USER:
716	case T_110_DRM:
717		printf("DMMU read miss: Hardware Table Searches should be enabled!\n");
718		goto lose;
719	case T_110_DWM+T_USER:
720	case T_110_DWM:
721		printf("DMMU write miss: Hardware Table Searches should be enabled!\n");
722		goto lose;
723	case T_110_IAM+T_USER:
724	case T_110_IAM:
725		printf("IMMU miss: Hardware Table Searches should be enabled!\n");
726		goto lose;
727#endif
728
729#ifdef DDB
730	case T_KDB_TRACE:
731		s = splhigh();
732		set_psr((psr = get_psr()) & ~PSR_IND);
733		ddb_break_trap(T_KDB_TRACE, (db_regs_t*)frame);
734		set_psr(psr);
735		splx(s);
736		return;
737	case T_KDB_BREAK:
738		s = splhigh();
739		set_psr((psr = get_psr()) & ~PSR_IND);
740		ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame);
741		set_psr(psr);
742		splx(s);
743		return;
744	case T_KDB_ENTRY:
745		s = splhigh();
746		set_psr((psr = get_psr()) & ~PSR_IND);
747		ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame);
748		set_psr(psr);
749		/* skip trap instruction */
750		m88110_skip_insn(frame);
751		splx(s);
752		return;
753#endif /* DDB */
754	case T_ILLFLT:
755		/*
756		 * The 88110 seems to trigger an instruction fault in
757		 * supervisor mode when running the following sequence:
758		 *
759		 *	bcnd.n cond, reg, 1f
760		 *	arithmetic insn
761		 *	...
762		 *  	the same exact arithmetic insn
763		 *  1:	another arithmetic insn stalled by the previous one
764		 *	...
765		 *
766		 * The exception is reported with exip pointing to the
767		 * branch address. I don't know, at this point, if there
768		 * is any better workaround than the aggressive one
769		 * implemented below; I don't see how this could relate to
770		 * any of the 88110 errata (although it might be related to
771		 * branch prediction).
772		 *
773		 * For the record, the exact sequence triggering the
774		 * spurious exception is:
775		 *
776		 *	bcnd.n	eq0, r2,  1f
777		 *	 or	r25, r0,  r22
778		 *	bsr	somewhere
779		 *	or	r25, r0,  r22
780		 *  1:	cmp	r13, r25, r20
781		 *
782		 * within the same cache line.
783		 *
784		 * Simply ignoring the exception and returning does not
785		 * cause the exception to disappear. Clearing the
786		 * instruction cache works, but on 88110+88410 systems,
787		 * the 88410 needs to be invalidated as well. (note that
788		 * the size passed to the flush routines does not matter
789		 * since there is no way to flush a subset of the 88110
790		 * I$ anyway)
791		 */
792	    {
793		extern void *kernel_text, *etext;
794
795		if (fault_addr >= (vaddr_t)&kernel_text &&
796		    fault_addr < (vaddr_t)&etext) {
797			cmmu_icache_inv(curcpu()->ci_cpuid,
798			    trunc_page(fault_addr), PAGE_SIZE);
799			cmmu_cache_wbinv(curcpu()->ci_cpuid,
800			    trunc_page(fault_addr), PAGE_SIZE);
801			return;
802		}
803	    }
804		goto lose;
805	case T_MISALGNFLT:
806		printf("kernel misaligned access exception @%p\n",
807		    frame->tf_exip);
808		goto lose;
809	case T_INSTFLT:
810		/* kernel mode instruction access fault.
811		 * Should never, never happen for a non-paged kernel.
812		 */
813#ifdef TRAPDEBUG
814		printf("Kernel Instruction fault exip %x isr %x ilar %x\n",
815		    frame->tf_exip, frame->tf_isr, frame->tf_ilar);
816#endif
817		goto lose;
818
819	case T_DATAFLT:
820		/* kernel mode data fault */
821
822		/* data fault on the user address? */
823		if ((frame->tf_dsr & CMMU_DSR_SU) == 0) {
824			KERNEL_LOCK();
825			goto m88110_user_fault;
826		}
827
828#ifdef TRAPDEBUG
829		printf("Kernel Data access fault exip %x dsr %x dlar %x\n",
830		    frame->tf_exip, frame->tf_dsr, frame->tf_dlar);
831#endif
832
833		fault_addr = frame->tf_dlar;
834		if (frame->tf_dsr & CMMU_DSR_RW) {
835			ftype = VM_PROT_READ;
836			fault_code = VM_PROT_READ;
837		} else {
838			ftype = VM_PROT_READ|VM_PROT_WRITE;
839			fault_code = VM_PROT_WRITE;
840		}
841
842		va = trunc_page((vaddr_t)fault_addr);
843
844		KERNEL_LOCK();
845		vm = p->p_vmspace;
846		map = kernel_map;
847
848		if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) {
849			/*
850			 * On a segment or a page fault, call uvm_fault() to
851			 * resolve the fault.
852			 */
853			if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
854				p->p_addr->u_pcb.pcb_onfault = 0;
855			result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
856			p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
857			/*
858			 * This could be a fault caused in copyout*()
859			 * while accessing kernel space.
860			 */
861			if (result != 0 && pcb_onfault != 0) {
862				frame->tf_exip = pcb_onfault;
863				/*
864				 * Continue as if the fault had been resolved.
865				 */
866				result = 0;
867			}
868			if (result == 0) {
869				KERNEL_UNLOCK();
870				return;
871			}
872		}
873		KERNEL_UNLOCK();
874		goto lose;
875	case T_INSTFLT+T_USER:
876		/* User mode instruction access fault */
877		/* FALLTHROUGH */
878	case T_DATAFLT+T_USER:
879		KERNEL_PROC_LOCK(p);
880m88110_user_fault:
881		if (type == T_INSTFLT+T_USER) {
882			ftype = VM_PROT_READ;
883			fault_code = VM_PROT_READ;
884#ifdef TRAPDEBUG
885			printf("User Instruction fault exip %x isr %x ilar %x\n",
886			    frame->tf_exip, frame->tf_isr, frame->tf_ilar);
887#endif
888		} else {
889			fault_addr = frame->tf_dlar;
890			if (frame->tf_dsr & CMMU_DSR_RW) {
891				ftype = VM_PROT_READ;
892				fault_code = VM_PROT_READ;
893			} else {
894				ftype = VM_PROT_READ|VM_PROT_WRITE;
895				fault_code = VM_PROT_WRITE;
896			}
897#ifdef TRAPDEBUG
898			printf("User Data access fault exip %x dsr %x dlar %x\n",
899			    frame->tf_exip, frame->tf_dsr, frame->tf_dlar);
900#endif
901		}
902
903		va = trunc_page((vaddr_t)fault_addr);
904
905		vm = p->p_vmspace;
906		map = &vm->vm_map;
907		if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0)
908			p->p_addr->u_pcb.pcb_onfault = 0;
909
910		/*
911		 * Call uvm_fault() to resolve non-bus error faults
912		 * whenever possible.
913		 */
914		if (type == T_INSTFLT+T_USER) {
915			/* instruction faults */
916			if (frame->tf_isr &
917			    (CMMU_ISR_BE | CMMU_ISR_SP | CMMU_ISR_TBE)) {
918				/* bus error, supervisor protection */
919				result = EACCES;
920			} else
921			if (frame->tf_isr & (CMMU_ISR_SI | CMMU_ISR_PI)) {
922				/* segment or page fault */
923				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
924			} else {
925#ifdef TRAPDEBUG
926				printf("Unexpected Instruction fault isr %x\n",
927				    frame->tf_isr);
928#endif
929				if (type == T_DATAFLT)
930					KERNEL_UNLOCK();
931				else
932					KERNEL_PROC_UNLOCK(p);
933				goto lose;
934			}
935		} else {
936			/* data faults */
937			if (frame->tf_dsr & CMMU_DSR_BE) {
938				/* bus error */
939				result = EACCES;
940			} else
941			if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) {
942				/* segment or page fault */
943				result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
944			} else
945			if (frame->tf_dsr & (CMMU_DSR_CP | CMMU_DSR_WA)) {
946				/* copyback or write allocate error */
947				result = EACCES;
948			} else
949			if (frame->tf_dsr & CMMU_DSR_WE) {
950				/* write fault  */
951				/* This could be a write protection fault or an
952				 * exception to set the used and modified bits
953				 * in the pte. Basically, if we got a write
954				 * error, then we already have a pte entry that
955				 * faulted in from a previous seg fault or page
956				 * fault.
957				 * Get the pte and check the status of the
958				 * modified and valid bits to determine if this
959				 * indeed a real write fault.  XXX smurph
960				 */
961				if (pmap_set_modify(map->pmap, va)) {
962#ifdef TRAPDEBUG
963					printf("Corrected userland write fault, pmap %p va %p\n",
964					    map->pmap, va);
965#endif
966					result = 0;
967				} else {
968					/* must be a real wp fault */
969#ifdef TRAPDEBUG
970					printf("Uncorrected userland write fault, pmap %p va %p\n",
971					    map->pmap, va);
972#endif
973					result = uvm_fault(map, va, VM_FAULT_INVALID, ftype);
974				}
975			} else {
976#ifdef TRAPDEBUG
977				printf("Unexpected Data access fault dsr %x\n",
978				    frame->tf_dsr);
979#endif
980				if (type == T_DATAFLT)
981					KERNEL_UNLOCK();
982				else
983					KERNEL_PROC_UNLOCK(p);
984				goto lose;
985			}
986		}
987		p->p_addr->u_pcb.pcb_onfault = pcb_onfault;
988
989		if ((caddr_t)va >= vm->vm_maxsaddr) {
990			if (result == 0)
991				uvm_grow(p, va);
992			else if (result == EACCES)
993				result = EFAULT;
994		}
995		if (type == T_DATAFLT)
996			KERNEL_UNLOCK();
997		else
998			KERNEL_PROC_UNLOCK(p);
999
1000		/*
1001		 * This could be a fault caused in copyin*()
1002		 * while accessing user space.
1003		 */
1004		if (result != 0 && pcb_onfault != 0) {
1005			frame->tf_exip = pcb_onfault;
1006			/*
1007			 * Continue as if the fault had been resolved.
1008			 */
1009			result = 0;
1010		}
1011
1012		if (result != 0) {
1013			sig = result == EACCES ? SIGBUS : SIGSEGV;
1014			fault_type = result == EACCES ?
1015			    BUS_ADRERR : SEGV_MAPERR;
1016		}
1017		break;
1018	case T_MISALGNFLT+T_USER:
1019		/* Fix any misaligned ld.d or st.d instructions */
1020		sig = double_reg_fixup(frame);
1021		fault_type = BUS_ADRALN;
1022		if (sig == 0) {
1023			/* skip recovered instruction */
1024			m88110_skip_insn(frame);
1025			goto userexit;
1026		}
1027		break;
1028	case T_PRIVINFLT+T_USER:
1029		fault_type = ILL_PRVREG;
1030		/* FALLTHROUGH */
1031	case T_ILLFLT+T_USER:
1032#ifndef DDB
1033	case T_KDB_BREAK:
1034	case T_KDB_ENTRY:
1035	case T_KDB_TRACE:
1036#endif
1037	case T_KDB_BREAK+T_USER:
1038	case T_KDB_ENTRY+T_USER:
1039	case T_KDB_TRACE+T_USER:
1040		sig = SIGILL;
1041		break;
1042	case T_BNDFLT+T_USER:
1043		sig = SIGFPE;
1044		/* skip trap instruction */
1045		m88110_skip_insn(frame);
1046		break;
1047	case T_ZERODIV+T_USER:
1048		sig = SIGFPE;
1049		fault_type = FPE_INTDIV;
1050		/* skip trap instruction */
1051		m88110_skip_insn(frame);
1052		break;
1053	case T_OVFFLT+T_USER:
1054		sig = SIGFPE;
1055		fault_type = FPE_INTOVF;
1056		/* skip trap instruction */
1057		m88110_skip_insn(frame);
1058		break;
1059	case T_FPEPFLT+T_USER:
1060		m88110_fpu_exception(frame);
1061		goto userexit;
1062	case T_SIGSYS+T_USER:
1063		sig = SIGSYS;
1064		break;
1065	case T_STEPBPT+T_USER:
1066#ifdef PTRACE
1067		/*
1068		 * This trap is used by the kernel to support single-step
1069		 * debugging (although any user could generate this trap
1070		 * which should probably be handled differently). When a
1071		 * process is continued by a debugger with the PT_STEP
1072		 * function of ptrace (single step), the kernel inserts
1073		 * one or two breakpoints in the user process so that only
1074		 * one instruction (or two in the case of a delayed branch)
1075		 * is executed.  When this breakpoint is hit, we get the
1076		 * T_STEPBPT trap.
1077		 */
1078		{
1079			u_int instr;
1080			vaddr_t pc = PC_REGS(&frame->tf_regs);
1081
1082			/* read break instruction */
1083			copyin((caddr_t)pc, &instr, sizeof(u_int));
1084
1085			/* check and see if we got here by accident */
1086			if ((p->p_md.md_bp0va != pc &&
1087			     p->p_md.md_bp1va != pc) ||
1088			    instr != SSBREAKPOINT) {
1089				sig = SIGTRAP;
1090				fault_type = TRAP_TRACE;
1091				break;
1092			}
1093
1094			/* restore original instruction and clear breakpoint */
1095			if (p->p_md.md_bp0va == pc) {
1096				ss_put_value(p, pc, p->p_md.md_bp0save);
1097				p->p_md.md_bp0va = 0;
1098			}
1099			if (p->p_md.md_bp1va == pc) {
1100				ss_put_value(p, pc, p->p_md.md_bp1save);
1101				p->p_md.md_bp1va = 0;
1102			}
1103
1104			sig = SIGTRAP;
1105			fault_type = TRAP_BRKPT;
1106		}
1107#else
1108		sig = SIGTRAP;
1109		fault_type = TRAP_TRACE;
1110#endif
1111		break;
1112	case T_USERBPT+T_USER:
1113		/*
1114		 * This trap is meant to be used by debuggers to implement
1115		 * breakpoint debugging.  When we get this trap, we just
1116		 * return a signal which gets caught by the debugger.
1117		 */
1118		sig = SIGTRAP;
1119		fault_type = TRAP_BRKPT;
1120		break;
1121	}
1122
1123	/*
1124	 * If trap from supervisor mode, just return
1125	 */
1126	if (type < T_USER)
1127		return;
1128
1129	if (sig) {
1130deliver:
1131		sv.sival_ptr = (void *)fault_addr;
1132		KERNEL_PROC_LOCK(p);
1133		trapsignal(p, sig, fault_code, fault_type, sv);
1134		KERNEL_PROC_UNLOCK(p);
1135	}
1136
1137userexit:
1138	userret(p);
1139}
1140#endif /* M88110 */
1141
1142__dead void
1143error_fatal(struct trapframe *frame)
1144{
1145	if (frame->tf_vector == 0)
1146		printf("\nCPU %d Reset Exception\n", cpu_number());
1147	else
1148		printf("\nCPU %d Error Exception\n", cpu_number());
1149
1150#ifdef DDB
1151	regdump((struct trapframe*)frame);
1152#endif
1153	panic("unrecoverable exception %d", frame->tf_vector);
1154}
1155
1156#ifdef M88100
1157void
1158m88100_syscall(register_t code, struct trapframe *tf)
1159{
1160	int i, nsys, nap;
1161	struct sysent *callp;
1162	struct proc *p = curproc;
1163	int error;
1164	register_t args[8], rval[2], *ap;
1165	int nolock;
1166
1167	uvmexp.syscalls++;
1168
1169	callp = p->p_emul->e_sysent;
1170	nsys  = p->p_emul->e_nsysent;
1171
1172	p->p_md.md_tf = tf;
1173
1174	/*
1175	 * For 88k, all the arguments are passed in the registers (r2-r9),
1176	 * and further arguments (if any) on stack.
1177	 * For syscall (and __syscall), r2 (and r3) has the actual code.
1178	 * __syscall  takes a quad syscall number, so that other
1179	 * arguments are at their natural alignments.
1180	 */
1181	ap = &tf->tf_r[2];
1182	nap = 8; /* r2-r9 */
1183
1184	switch (code) {
1185	case SYS_syscall:
1186		code = *ap++;
1187		nap--;
1188		break;
1189	case SYS___syscall:
1190		if (callp != sysent)
1191			break;
1192		code = ap[_QUAD_LOWWORD];
1193		ap += 2;
1194		nap -= 2;
1195		break;
1196	}
1197
1198	if (code < 0 || code >= nsys)
1199		callp += p->p_emul->e_nosys;
1200	else
1201		callp += code;
1202
1203	i = callp->sy_argsize / sizeof(register_t);
1204	if (i > sizeof(args) / sizeof(register_t))
1205		panic("syscall nargs");
1206	if (i > nap) {
1207		bcopy((caddr_t)ap, (caddr_t)args, nap * sizeof(register_t));
1208		error = copyin((caddr_t)tf->tf_r[31], (caddr_t)(args + nap),
1209		    (i - nap) * sizeof(register_t));
1210	} else {
1211		bcopy((caddr_t)ap, (caddr_t)args, i * sizeof(register_t));
1212		error = 0;
1213	}
1214
1215	if (error != 0)
1216		goto bad;
1217
1218#ifdef SYSCALL_DEBUG
1219	KERNEL_PROC_LOCK(p);
1220	scdebug_call(p, code, args);
1221	KERNEL_PROC_UNLOCK(p);
1222#endif
1223#ifdef KTRACE
1224	if (KTRPOINT(p, KTR_SYSCALL)) {
1225		KERNEL_PROC_LOCK(p);
1226		ktrsyscall(p, code, callp->sy_argsize, args);
1227		KERNEL_PROC_UNLOCK(p);
1228	}
1229#endif
1230	rval[0] = 0;
1231	rval[1] = tf->tf_r[3];
1232#if NSYSTRACE > 0
1233	if (ISSET(p->p_flag, P_SYSTRACE)) {
1234		KERNEL_PROC_LOCK(p);
1235		error = systrace_redirect(code, p, args, rval);
1236		KERNEL_PROC_UNLOCK(p);
1237	} else
1238#endif
1239	{
1240		nolock = (callp->sy_flags & SY_NOLOCK);
1241		if (!nolock)
1242			KERNEL_PROC_LOCK(p);
1243		error = (*callp->sy_call)(p, args, rval);
1244		if (!nolock)
1245			KERNEL_PROC_UNLOCK(p);
1246	}
1247
1248	/*
1249	 * system call will look like:
1250	 *	 or r13, r0, <code>
1251	 *       tb0 0, r0, <128> <- sxip
1252	 *	 br err 	  <- snip
1253	 *       jmp r1 	  <- sfip
1254	 *  err: or.u r3, r0, hi16(errno)
1255	 *	 st r2, r3, lo16(errno)
1256	 *	 subu r2, r0, 1
1257	 *	 jmp r1
1258	 *
1259	 * So, when we take syscall trap, sxip/snip/sfip will be as
1260	 * shown above.
1261	 * Given this,
1262	 * 1. If the system call returned 0, need to skip nip.
1263	 *	nip = fip, fip += 4
1264	 *    (doesn't matter what fip + 4 will be but we will never
1265	 *    execute this since jmp r1 at nip will change the execution flow.)
1266	 * 2. If the system call returned an errno > 0, plug the value
1267	 *    in r2, and leave nip and fip unchanged. This will have us
1268	 *    executing "br err" on return to user space.
1269	 * 3. If the system call code returned ERESTART,
1270	 *    we need to rexecute the trap instruction. Back up the pipe
1271	 *    line.
1272	 *     fip = nip, nip = xip
1273	 * 4. If the system call returned EJUSTRETURN, don't need to adjust
1274	 *    any pointers.
1275	 */
1276
1277	switch (error) {
1278	case 0:
1279		tf->tf_r[2] = rval[0];
1280		tf->tf_r[3] = rval[1];
1281		tf->tf_epsr &= ~PSR_C;
1282		tf->tf_snip = tf->tf_sfip & ~NIP_E;
1283		tf->tf_sfip = tf->tf_snip + 4;
1284		break;
1285	case ERESTART:
1286		tf->tf_epsr &= ~PSR_C;
1287		tf->tf_sfip = tf->tf_snip & ~FIP_E;
1288		tf->tf_snip = tf->tf_sxip & ~NIP_E;
1289		break;
1290	case EJUSTRETURN:
1291		tf->tf_epsr &= ~PSR_C;
1292		break;
1293	default:
1294bad:
1295		if (p->p_emul->e_errno)
1296			error = p->p_emul->e_errno[error];
1297		tf->tf_r[2] = error;
1298		tf->tf_epsr |= PSR_C;   /* fail */
1299		tf->tf_snip = tf->tf_snip & ~NIP_E;
1300		tf->tf_sfip = tf->tf_sfip & ~FIP_E;
1301		break;
1302	}
1303#ifdef SYSCALL_DEBUG
1304	KERNEL_PROC_LOCK(p);
1305	scdebug_ret(p, code, error, rval);
1306	KERNEL_PROC_UNLOCK(p);
1307#endif
1308	userret(p);
1309#ifdef KTRACE
1310	if (KTRPOINT(p, KTR_SYSRET)) {
1311		KERNEL_PROC_LOCK(p);
1312		ktrsysret(p, code, error, rval[0]);
1313		KERNEL_PROC_UNLOCK(p);
1314	}
1315#endif
1316}
1317#endif /* M88100 */
1318
1319#ifdef M88110
1320/* Instruction pointers operate differently on mc88110 */
1321void
1322m88110_syscall(register_t code, struct trapframe *tf)
1323{
1324	int i, nsys, nap;
1325	struct sysent *callp;
1326	struct proc *p = curproc;
1327	int error;
1328	register_t args[8], rval[2], *ap;
1329	int nolock;
1330
1331	uvmexp.syscalls++;
1332
1333	callp = p->p_emul->e_sysent;
1334	nsys  = p->p_emul->e_nsysent;
1335
1336	p->p_md.md_tf = tf;
1337
1338	/*
1339	 * For 88k, all the arguments are passed in the registers (r2-r9),
1340	 * and further arguments (if any) on stack.
1341	 * For syscall (and __syscall), r2 (and r3) has the actual code.
1342	 * __syscall  takes a quad syscall number, so that other
1343	 * arguments are at their natural alignments.
1344	 */
1345	ap = &tf->tf_r[2];
1346	nap = 8;	/* r2-r9 */
1347
1348	switch (code) {
1349	case SYS_syscall:
1350		code = *ap++;
1351		nap--;
1352		break;
1353	case SYS___syscall:
1354		if (callp != sysent)
1355			break;
1356		code = ap[_QUAD_LOWWORD];
1357		ap += 2;
1358		nap -= 2;
1359		break;
1360	}
1361
1362	if (code < 0 || code >= nsys)
1363		callp += p->p_emul->e_nosys;
1364	else
1365		callp += code;
1366
1367	i = callp->sy_argsize / sizeof(register_t);
1368	if (i > sizeof(args) > sizeof(register_t))
1369		panic("syscall nargs");
1370	if (i > nap) {
1371		bcopy((caddr_t)ap, (caddr_t)args, nap * sizeof(register_t));
1372		error = copyin((caddr_t)tf->tf_r[31], (caddr_t)(args + nap),
1373		    (i - nap) * sizeof(register_t));
1374	} else {
1375		bcopy((caddr_t)ap, (caddr_t)args, i * sizeof(register_t));
1376		error = 0;
1377	}
1378
1379	if (error != 0)
1380		goto bad;
1381
1382#ifdef SYSCALL_DEBUG
1383	KERNEL_PROC_LOCK(p);
1384	scdebug_call(p, code, args);
1385	KERNEL_PROC_UNLOCK(p);
1386#endif
1387#ifdef KTRACE
1388	if (KTRPOINT(p, KTR_SYSCALL)) {
1389		KERNEL_PROC_LOCK(p);
1390		ktrsyscall(p, code, callp->sy_argsize, args);
1391		KERNEL_PROC_UNLOCK(p);
1392	}
1393#endif
1394	rval[0] = 0;
1395	rval[1] = tf->tf_r[3];
1396#if NSYSTRACE > 0
1397	if (ISSET(p->p_flag, P_SYSTRACE)) {
1398		KERNEL_PROC_LOCK(p);
1399		error = systrace_redirect(code, p, args, rval);
1400		KERNEL_PROC_UNLOCK(p);
1401	} else
1402#endif
1403	{
1404		nolock = (callp->sy_flags & SY_NOLOCK);
1405		if (!nolock)
1406			KERNEL_PROC_LOCK(p);
1407		error = (*callp->sy_call)(p, args, rval);
1408		if (!nolock)
1409			KERNEL_PROC_UNLOCK(p);
1410	}
1411
1412	/*
1413	 * system call will look like:
1414	 *	 or r13, r0, <code>
1415	 *       tb0 0, r0, <128> <- exip
1416	 *	 br err 	  <- enip
1417	 *       jmp r1
1418	 *  err: or.u r3, r0, hi16(errno)
1419	 *	 st r2, r3, lo16(errno)
1420	 *	 subu r2, r0, 1
1421	 *	 jmp r1
1422	 *
1423	 * So, when we take syscall trap, exip/enip will be as
1424	 * shown above.
1425	 * Given this,
1426	 * 1. If the system call returned 0, need to jmp r1.
1427	 *    exip += 8
1428	 * 2. If the system call returned an errno > 0, increment
1429	 *    exip += 4 and plug the value in r2. This will have us
1430	 *    executing "br err" on return to user space.
1431	 * 3. If the system call code returned ERESTART,
1432	 *    we need to rexecute the trap instruction. leave exip as is.
1433	 * 4. If the system call returned EJUSTRETURN, just return.
1434	 *    exip += 4
1435	 */
1436
1437	switch (error) {
1438	case 0:
1439		tf->tf_r[2] = rval[0];
1440		tf->tf_r[3] = rval[1];
1441		tf->tf_epsr &= ~PSR_C;
1442		/* skip two instructions */
1443		m88110_skip_insn(tf);
1444		m88110_skip_insn(tf);
1445		break;
1446	case ERESTART:
1447		/*
1448		 * Reexecute the trap.
1449		 * exip is already at the trap instruction, so
1450		 * there is nothing to do.
1451		 */
1452		tf->tf_epsr &= ~PSR_C;
1453		break;
1454	case EJUSTRETURN:
1455		tf->tf_epsr &= ~PSR_C;
1456		/* skip one instruction */
1457		m88110_skip_insn(tf);
1458		break;
1459	default:
1460bad:
1461		if (p->p_emul->e_errno)
1462			error = p->p_emul->e_errno[error];
1463		tf->tf_r[2] = error;
1464		tf->tf_epsr |= PSR_C;   /* fail */
1465		/* skip one instruction */
1466		m88110_skip_insn(tf);
1467		break;
1468	}
1469
1470#ifdef SYSCALL_DEBUG
1471	KERNEL_PROC_LOCK(p);
1472	scdebug_ret(p, code, error, rval);
1473	KERNEL_PROC_UNLOCK(p);
1474#endif
1475	userret(p);
1476#ifdef KTRACE
1477	if (KTRPOINT(p, KTR_SYSRET)) {
1478		KERNEL_PROC_LOCK(p);
1479		ktrsysret(p, code, error, rval[0]);
1480		KERNEL_PROC_UNLOCK(p);
1481	}
1482#endif
1483}
1484#endif	/* M88110 */
1485
1486/*
1487 * Set up return-value registers as fork() libc stub expects,
1488 * and do normal return-to-user-mode stuff.
1489 */
1490void
1491child_return(arg)
1492	void *arg;
1493{
1494	struct proc *p = arg;
1495	struct trapframe *tf;
1496
1497	tf = (struct trapframe *)USER_REGS(p);
1498	tf->tf_r[2] = 0;
1499	tf->tf_r[3] = 0;
1500	tf->tf_epsr &= ~PSR_C;
1501	/* skip br instruction as in syscall() */
1502#ifdef M88100
1503	if (CPU_IS88100) {
1504		tf->tf_snip = (tf->tf_sfip & XIP_ADDR) | XIP_V;
1505		tf->tf_sfip = tf->tf_snip + 4;
1506	}
1507#endif
1508#ifdef M88110
1509	if (CPU_IS88110) {
1510		/* skip two instructions */
1511		m88110_skip_insn(tf);
1512		m88110_skip_insn(tf);
1513	}
1514#endif
1515
1516	KERNEL_PROC_UNLOCK(p);
1517	userret(p);
1518
1519#ifdef KTRACE
1520	if (KTRPOINT(p, KTR_SYSRET)) {
1521		KERNEL_PROC_LOCK(p);
1522		ktrsysret(p,
1523		    (p->p_flag & P_THREAD) ? SYS_rfork :
1524		    (p->p_p->ps_flags & PS_PPWAIT) ? SYS_vfork : SYS_fork,
1525		    0, 0);
1526		KERNEL_PROC_UNLOCK(p);
1527	}
1528#endif
1529}
1530
1531#ifdef PTRACE
1532
1533/*
1534 * User Single Step Debugging Support
1535 */
1536
1537#include <sys/ptrace.h>
1538
1539vaddr_t	ss_branch_taken(u_int, vaddr_t, struct reg *);
1540int	ss_get_value(struct proc *, vaddr_t, u_int *);
1541int	ss_inst_branch_or_call(u_int);
1542int	ss_put_breakpoint(struct proc *, vaddr_t, vaddr_t *, u_int *);
1543
1544#define	SYSCALL_INSTR	0xf000d080	/* tb0 0,r0,128 */
1545
1546int
1547ss_get_value(struct proc *p, vaddr_t addr, u_int *value)
1548{
1549	struct uio uio;
1550	struct iovec iov;
1551
1552	iov.iov_base = (caddr_t)value;
1553	iov.iov_len = sizeof(u_int);
1554	uio.uio_iov = &iov;
1555	uio.uio_iovcnt = 1;
1556	uio.uio_offset = (off_t)addr;
1557	uio.uio_resid = sizeof(u_int);
1558	uio.uio_segflg = UIO_SYSSPACE;
1559	uio.uio_rw = UIO_READ;
1560	uio.uio_procp = curproc;
1561	return (process_domem(curproc, p, &uio, PT_READ_I));
1562}
1563
1564int
1565ss_put_value(struct proc *p, vaddr_t addr, u_int value)
1566{
1567	struct uio uio;
1568	struct iovec iov;
1569
1570	iov.iov_base = (caddr_t)&value;
1571	iov.iov_len = sizeof(u_int);
1572	uio.uio_iov = &iov;
1573	uio.uio_iovcnt = 1;
1574	uio.uio_offset = (off_t)addr;
1575	uio.uio_resid = sizeof(u_int);
1576	uio.uio_segflg = UIO_SYSSPACE;
1577	uio.uio_rw = UIO_WRITE;
1578	uio.uio_procp = curproc;
1579	return (process_domem(curproc, p, &uio, PT_WRITE_I));
1580}
1581
1582/*
1583 * ss_branch_taken(instruction, pc, regs)
1584 *
1585 * instruction will be a control flow instruction location at address pc.
1586 * Branch taken is supposed to return the address to which the instruction
1587 * would jump if the branch is taken.
1588 *
1589 * This is different from branch_taken() in ddb, as we also need to process
1590 * system calls.
1591 */
1592vaddr_t
1593ss_branch_taken(u_int inst, vaddr_t pc, struct reg *regs)
1594{
1595	u_int regno;
1596
1597	/*
1598	 * Quick check of the instruction. Note that we know we are only
1599	 * invoked if ss_inst_branch_or_call() returns TRUE, so we do not
1600	 * need to repeat the jpm, jsr and syscall stricter checks here.
1601	 */
1602	switch (inst >> (32 - 5)) {
1603	case 0x18:	/* br */
1604	case 0x19:	/* bsr */
1605		/* signed 26 bit pc relative displacement, shift left 2 bits */
1606		inst = (inst & 0x03ffffff) << 2;
1607		/* check if sign extension is needed */
1608		if (inst & 0x08000000)
1609			inst |= 0xf0000000;
1610		return (pc + inst);
1611
1612	case 0x1a:	/* bb0 */
1613	case 0x1b:	/* bb1 */
1614	case 0x1d:	/* bcnd */
1615		/* signed 16 bit pc relative displacement, shift left 2 bits */
1616		inst = (inst & 0x0000ffff) << 2;
1617		/* check if sign extension is needed */
1618		if (inst & 0x00020000)
1619			inst |= 0xfffc0000;
1620		return (pc + inst);
1621
1622	case 0x1e:	/* jmp or jsr */
1623		regno = inst & 0x1f;	/* get the register value */
1624		return (regno == 0 ? 0 : regs->r[regno]);
1625
1626	default:	/* system call */
1627		/*
1628		 * The regular (pc + 4) breakpoint will match the error
1629		 * return. Successful system calls return at (pc + 8),
1630		 * so we'll set up a branch breakpoint there.
1631		 */
1632		return (pc + 8);
1633	}
1634}
1635
1636int
1637ss_inst_branch_or_call(u_int ins)
1638{
1639	/* check high five bits */
1640	switch (ins >> (32 - 5)) {
1641	case 0x18: /* br */
1642	case 0x19: /* bsr */
1643	case 0x1a: /* bb0 */
1644	case 0x1b: /* bb1 */
1645	case 0x1d: /* bcnd */
1646		return (TRUE);
1647	case 0x1e: /* could be jmp or jsr */
1648		if ((ins & 0xfffff3e0) == 0xf400c000)
1649			return (TRUE);
1650	}
1651
1652	return (FALSE);
1653}
1654
1655int
1656ss_put_breakpoint(struct proc *p, vaddr_t va, vaddr_t *bpva, u_int *bpsave)
1657{
1658	int rc;
1659
1660	/* Restore previous breakpoint if we did not trigger it. */
1661	if (*bpva != 0) {
1662		ss_put_value(p, *bpva, *bpsave);
1663		*bpva = 0;
1664	}
1665
1666	/* Save instruction. */
1667	if ((rc = ss_get_value(p, va, bpsave)) != 0)
1668		return (rc);
1669
1670	/* Store breakpoint instruction at the location now. */
1671	*bpva = va;
1672	return (ss_put_value(p, va, SSBREAKPOINT));
1673}
1674
1675int
1676process_sstep(struct proc *p, int sstep)
1677{
1678	struct reg *sstf = USER_REGS(p);
1679	vaddr_t pc, brpc;
1680	u_int32_t instr;
1681	int rc;
1682
1683	if (sstep == 0) {
1684		/* Restore previous breakpoints if any. */
1685		if (p->p_md.md_bp0va != 0) {
1686			ss_put_value(p, p->p_md.md_bp0va, p->p_md.md_bp0save);
1687			p->p_md.md_bp0va = 0;
1688		}
1689		if (p->p_md.md_bp1va != 0) {
1690			ss_put_value(p, p->p_md.md_bp1va, p->p_md.md_bp1save);
1691			p->p_md.md_bp1va = 0;
1692		}
1693
1694		return (0);
1695	}
1696
1697	/*
1698	 * User was stopped at pc, e.g. the instruction at pc was not executed.
1699	 * Fetch what's at the current location.
1700	 */
1701	pc = PC_REGS(sstf);
1702	if ((rc = ss_get_value(p, pc, &instr)) != 0)
1703		return (rc);
1704
1705	/*
1706	 * Find if this instruction may cause a branch, and set up a breakpoint
1707	 * at the branch location.
1708	 */
1709	if (ss_inst_branch_or_call(instr) || instr == SYSCALL_INSTR) {
1710		brpc = ss_branch_taken(instr, pc, sstf);
1711
1712		/* self-branches are hopeless */
1713		if (brpc != pc && brpc != 0) {
1714			if ((rc = ss_put_breakpoint(p, brpc,
1715			    &p->p_md.md_bp1va, &p->p_md.md_bp1save)) != 0)
1716				return (rc);
1717		}
1718	}
1719
1720	if ((rc = ss_put_breakpoint(p, pc + 4,
1721	    &p->p_md.md_bp0va, &p->p_md.md_bp0save)) != 0)
1722		return (rc);
1723
1724	return (0);
1725}
1726
1727#endif	/* PTRACE */
1728
1729#ifdef DIAGNOSTIC
1730void
1731splassert_check(int wantipl, const char *func)
1732{
1733	int oldipl;
1734
1735	oldipl = getipl();
1736
1737	if (oldipl < wantipl) {
1738		splassert_fail(wantipl, oldipl, func);
1739		/*
1740		 * This will raise the spl,
1741		 * in a feeble attempt to reduce further damage.
1742		 */
1743		(void)raiseipl(wantipl);
1744	}
1745}
1746#endif
1747
1748/*
1749 * ld.d and st.d instructions referencing long aligned but not long long
1750 * aligned addresses will trigger a misaligned address exception.
1751 *
1752 * This routine attempts to recover these (valid) statements, by simulating
1753 * the split form of the instruction. If it fails, it returns the appropriate
1754 * signal number to deliver.
1755 *
1756 * Note that we do not attempt to do anything for .d.usr instructions - the
1757 * kernel never issues such instructions, and they cause a privileged
1758 * instruction exception from userland.
1759 */
1760int
1761double_reg_fixup(struct trapframe *frame)
1762{
1763	u_int32_t pc, instr, value;
1764	int regno, store;
1765	vaddr_t addr;
1766
1767	/*
1768	 * Decode the faulting instruction.
1769	 */
1770
1771	pc = PC_REGS(&frame->tf_regs);
1772	if (copyin((void *)pc, &instr, sizeof(u_int32_t)) != 0)
1773		return SIGSEGV;
1774
1775	switch (instr & 0xfc00ff00) {
1776	case 0xf4001000:	/* ld.d rD, rS1, rS2 */
1777		addr = frame->tf_r[(instr >> 16) & 0x1f]
1778		    + frame->tf_r[(instr & 0x1f)];
1779		store = 0;
1780		break;
1781	case 0xf4002000:	/* st.d rD, rS1, rS2 */
1782		addr = frame->tf_r[(instr >> 16) & 0x1f]
1783		    + frame->tf_r[(instr & 0x1f)];
1784		store = 1;
1785		break;
1786	default:
1787		switch (instr >> 26) {
1788		case 0x10000000 >> 26:	/* ld.d rD, rS, imm16 */
1789			addr = (instr & 0x0000ffff) +
1790			    frame->tf_r[(instr >> 16) & 0x1f];
1791			store = 0;
1792			break;
1793		case 0x20000000 >> 26:	/* st.d rD, rS, imm16 */
1794			addr = (instr & 0x0000ffff) +
1795			    frame->tf_r[(instr >> 16) & 0x1f];
1796			store = 1;
1797			break;
1798		default:
1799			return SIGBUS;
1800		}
1801		break;
1802	}
1803
1804	/* We only handle long but not long long aligned access here */
1805	if ((addr & 0x07) != 4)
1806		return SIGBUS;
1807
1808	regno = (instr >> 21) & 0x1f;
1809
1810	if (store) {
1811		/*
1812		 * Two word stores.
1813		 */
1814		if (regno == 0)
1815			value = 0;
1816		else
1817			value = frame->tf_r[regno];
1818		if (copyout(&value, (void *)addr, sizeof(u_int32_t)) != 0)
1819			return SIGSEGV;
1820		if (regno == 31)
1821			value = 0;
1822		else
1823			value = frame->tf_r[regno + 1];
1824		if (copyout(&value, (void *)(addr + 4), sizeof(u_int32_t)) != 0)
1825			return SIGSEGV;
1826	} else {
1827		/*
1828		 * Two word loads. r0 should be left unaltered, but the
1829		 * value should still be fetched even if it is discarded.
1830		 */
1831		if (copyin((void *)addr, &value, sizeof(u_int32_t)) != 0)
1832			return SIGSEGV;
1833		if (regno != 0)
1834			frame->tf_r[regno] = value;
1835		if (copyin((void *)(addr + 4), &value, sizeof(u_int32_t)) != 0)
1836			return SIGSEGV;
1837		if (regno != 31)
1838			frame->tf_r[regno + 1] = value;
1839	}
1840
1841	return 0;
1842}
1843
1844void
1845cache_flush(struct trapframe *tf)
1846{
1847	struct proc *p = curproc;
1848	struct pmap *pmap;
1849	paddr_t pa;
1850	vaddr_t va;
1851	vsize_t len, count;
1852
1853	p->p_md.md_tf = tf;
1854
1855	pmap = vm_map_pmap(&p->p_vmspace->vm_map);
1856	va = tf->tf_r[2];
1857	len = tf->tf_r[3];
1858
1859	if (/* va < VM_MIN_ADDRESS || */ va >= VM_MAXUSER_ADDRESS ||
1860	    va + len <= va || va + len >= VM_MAXUSER_ADDRESS)
1861		len = 0;
1862
1863	while (len != 0) {
1864		count = min(len, PAGE_SIZE - (va & PAGE_MASK));
1865		if (pmap_extract(pmap, va, &pa) != FALSE)
1866			dma_cachectl(pa, count, DMA_CACHE_SYNC);
1867		va += count;
1868		len -= count;
1869	}
1870
1871#ifdef M88100
1872	if (CPU_IS88100) {
1873		tf->tf_snip = tf->tf_snip & ~NIP_E;
1874		tf->tf_sfip = tf->tf_sfip & ~FIP_E;
1875	}
1876#endif
1877#ifdef M88110
1878	if (CPU_IS88110) {
1879		/* skip instruction */
1880		m88110_skip_insn(tf);
1881	}
1882#endif
1883
1884	userret(p);
1885}
1886