trap.c revision 1.9
1/*	$NetBSD: trap.c,v 1.9 2002/11/25 05:11:32 thorpej Exp $	*/
2
3/*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *      This product includes software developed for the NetBSD Project by
20 *      Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38/*
39 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
40 * Copyright (C) 1995, 1996 TooLs GmbH.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 *    notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 *    notice, this list of conditions and the following disclaimer in the
50 *    documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 *    must display the following acknowledgement:
53 *	This product includes software developed by TooLs GmbH.
54 * 4. The name of TooLs GmbH may not be used to endorse or promote products
55 *    derived from this software without specific prior written permission.
56 *
57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67 */
68
69#include "opt_altivec.h"
70#include "opt_ddb.h"
71#include "opt_ktrace.h"
72#include "opt_systrace.h"
73#include "opt_syscall_debug.h"
74
75#include <sys/param.h>
76#include <sys/proc.h>
77#include <sys/reboot.h>
78#include <sys/syscall.h>
79#include <sys/systm.h>
80#include <sys/user.h>
81#ifdef KTRACE
82#include <sys/ktrace.h>
83#endif
84#ifdef SYSTRACE
85#include <sys/systrace.h>
86#endif
87
88#include <uvm/uvm_extern.h>
89
90#include <dev/cons.h>
91
92#include <machine/cpu.h>
93#include <machine/db_machdep.h>
94#include <machine/fpu.h>
95#include <machine/frame.h>
96#include <machine/pcb.h>
97#include <machine/psl.h>
98#include <machine/trap.h>
99
100#include <powerpc/spr.h>
101#include <powerpc/ibm4xx/pmap.h>
102#include <powerpc/ibm4xx/tlb.h>
103#include <powerpc/fpu/fpu_extern.h>
104
105/* These definitions should probably be somewhere else			XXX */
106#define	FIRSTARG	3		/* first argument is in reg 3 */
107#define	NARGREG		8		/* 8 args are in registers */
108#define	MOREARGS(sp)	((caddr_t)((int)(sp) + 8)) /* more args go here */
109
110#ifndef MULTIPROCESSOR
111volatile int astpending;
112volatile int want_resched;
113#endif
114
115static int fix_unaligned __P((struct proc *p, struct trapframe *frame));
116
117void trap __P((struct trapframe *));	/* Called from locore / trap_subr */
118int setfault __P((faultbuf));	/* defined in locore.S */
119/* Why are these not defined in a header? */
120int badaddr __P((void *, size_t));
121int badaddr_read __P((void *, size_t, int *));
122int ctx_setup __P((int, int));
123
124#ifdef DEBUG
125#define TDB_ALL	0x1
126int trapdebug = /* TDB_ALL */ 0;
127#define	DBPRINTF(x, y)	if (trapdebug & (x)) printf y
128#else
129#define DBPRINTF(x, y)
130#endif
131
132void
133trap(struct trapframe *frame)
134{
135	struct proc *p = curproc;
136	int type = frame->exc;
137	int ftype, rv;
138
139	KASSERT(p == 0 || (p->p_stat == SONPROC));
140
141	if (frame->srr1 & PSL_PR)
142		type |= EXC_USER;
143
144	ftype = VM_PROT_READ;
145
146DBPRINTF(TDB_ALL, ("trap(%x) at %x from frame %p &frame %p\n",
147	type, frame->srr0, frame, &frame));
148
149	switch (type) {
150	case EXC_DEBUG|EXC_USER:
151{
152	int srr2, srr3;
153__asm __volatile("mfspr %0,0x3f0" : "=r" (rv), "=r" (srr2), "=r" (srr3) :);
154printf("debug reg is %x srr2 %x srr3 %x\n", rv, srr2, srr3);
155}
156		/*
157		 * DEBUG intr -- probably single-step.
158		 */
159	case EXC_TRC|EXC_USER:
160		KERNEL_PROC_LOCK(p);
161		frame->srr1 &= ~PSL_SE;
162		trapsignal(p, SIGTRAP, EXC_TRC);
163		KERNEL_PROC_UNLOCK(p);
164		break;
165
166	  /* If we could not find and install appropriate TLB entry, fall through */
167
168	case EXC_DSI:
169		/* FALLTHROUGH */
170	case EXC_DTMISS:
171		{
172			struct vm_map *map;
173			vaddr_t va;
174			faultbuf *fb = NULL;
175
176			KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
177			va = frame->dear;
178			if (frame->pid == KERNEL_PID) {
179				map = kernel_map;
180			} else {
181				map = &p->p_vmspace->vm_map;
182			}
183
184			if (frame->esr & (ESR_DST|ESR_DIZ))
185				ftype = VM_PROT_WRITE;
186
187DBPRINTF(TDB_ALL, ("trap(EXC_DSI) at %x %s fault on %p esr %x\n",
188frame->srr0, (ftype&VM_PROT_WRITE) ? "write" : "read", (void *)va, frame->esr));
189			rv = uvm_fault(map, trunc_page(va), 0, ftype);
190			KERNEL_UNLOCK();
191			if (rv == 0)
192				goto done;
193			if ((fb = p->p_addr->u_pcb.pcb_onfault) != NULL) {
194				frame->pid = KERNEL_PID;
195				frame->srr0 = (*fb)[0];
196				frame->srr1 |= PSL_IR; /* Re-enable IMMU */
197				frame->fixreg[1] = (*fb)[1];
198				frame->fixreg[2] = (*fb)[2];
199				frame->fixreg[3] = 1; /* Return TRUE */
200				frame->cr = (*fb)[3];
201				memcpy(&frame->fixreg[13], &(*fb)[4],
202				      19 * sizeof(register_t));
203				goto done;
204			}
205		}
206		goto brain_damage;
207
208	case EXC_DSI|EXC_USER:
209		/* FALLTHROUGH */
210	case EXC_DTMISS|EXC_USER:
211		KERNEL_PROC_LOCK(p);
212
213		if (frame->esr & (ESR_DST|ESR_DIZ))
214			ftype = VM_PROT_WRITE;
215
216DBPRINTF(TDB_ALL, ("trap(EXC_DSI|EXC_USER) at %x %s fault on %x %x\n",
217frame->srr0, (ftype&VM_PROT_WRITE) ? "write" : "read", frame->dear, frame->esr));
218KASSERT(p == curproc && (p->p_stat == SONPROC));
219		rv = uvm_fault(&p->p_vmspace->vm_map,
220			       trunc_page(frame->dear), 0, ftype);
221		if (rv == 0) {
222		  KERNEL_PROC_UNLOCK(p);
223		  break;
224		}
225		if (rv == ENOMEM) {
226			printf("UVM: pid %d (%s), uid %d killed: "
227			       "out of swap\n",
228			       p->p_pid, p->p_comm,
229			       p->p_cred && p->p_ucred ?
230			       p->p_ucred->cr_uid : -1);
231			trapsignal(p, SIGKILL, EXC_DSI);
232		} else {
233			trapsignal(p, SIGSEGV, EXC_DSI);
234		}
235		KERNEL_PROC_UNLOCK(p);
236		break;
237	case EXC_ITMISS|EXC_USER:
238	case EXC_ISI|EXC_USER:
239		KERNEL_PROC_LOCK(p);
240		ftype = VM_PROT_READ | VM_PROT_EXECUTE;
241DBPRINTF(TDB_ALL, ("trap(EXC_ISI|EXC_USER) at %x %s fault on %x tf %p\n",
242frame->srr0, (ftype&VM_PROT_WRITE) ? "write" : "read", frame->srr0, frame));
243		rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(frame->srr0), 0, ftype);
244		if (rv == 0) {
245		  KERNEL_PROC_UNLOCK(p);
246		  break;
247		}
248		trapsignal(p, SIGSEGV, EXC_ISI);
249		KERNEL_PROC_UNLOCK(p);
250		break;
251
252	case EXC_AST|EXC_USER:
253		astpending = 0;		/* we are about to do it */
254		KERNEL_PROC_LOCK(p);
255		uvmexp.softs++;
256		if (p->p_flag & P_OWEUPC) {
257			p->p_flag &= ~P_OWEUPC;
258			ADDUPROF(p);
259		}
260		/* Check whether we are being preempted. */
261		if (want_resched)
262			preempt(NULL);
263		KERNEL_PROC_UNLOCK(p);
264		break;
265
266
267	case EXC_ALI|EXC_USER:
268		KERNEL_PROC_LOCK(p);
269		if (fix_unaligned(p, frame) != 0)
270			trapsignal(p, SIGBUS, EXC_ALI);
271		else
272			frame->srr0 += 4;
273		KERNEL_PROC_UNLOCK(p);
274		break;
275
276	case EXC_PGM|EXC_USER:
277		/*
278		 * Illegal insn:
279		 *
280		 * let's try to see if it's FPU and can be emulated.
281		 */
282		uvmexp.traps ++;
283		if (!(p->p_addr->u_pcb.pcb_flags & PCB_FPU)) {
284			memset(&p->p_addr->u_pcb.pcb_fpu, 0,
285				sizeof p->p_addr->u_pcb.pcb_fpu);
286			p->p_addr->u_pcb.pcb_flags |= PCB_FPU;
287		}
288
289		if ((rv = fpu_emulate(frame,
290			(struct fpreg *)&p->p_addr->u_pcb.pcb_fpu))) {
291			KERNEL_PROC_LOCK(p);
292			trapsignal(p, rv, EXC_PGM);
293			KERNEL_PROC_UNLOCK(p);
294		}
295		break;
296
297	case EXC_MCHK:
298		{
299			faultbuf *fb;
300
301			if ((fb = p->p_addr->u_pcb.pcb_onfault) != NULL) {
302				frame->pid = KERNEL_PID;
303				frame->srr0 = (*fb)[0];
304				frame->srr1 |= PSL_IR; /* Re-enable IMMU */
305				frame->fixreg[1] = (*fb)[1];
306				frame->fixreg[2] = (*fb)[2];
307				frame->fixreg[3] = 1; /* Return TRUE */
308				frame->cr = (*fb)[3];
309				memcpy(&frame->fixreg[13], &(*fb)[4],
310				      19 * sizeof(register_t));
311				goto done;
312			}
313		}
314		goto brain_damage;
315	default:
316brain_damage:
317		printf("trap type 0x%x at 0x%x\n", type, frame->srr0);
318#ifdef DDB
319		if (kdb_trap(type, frame))
320			goto done;
321#endif
322#ifdef TRAP_PANICWAIT
323		printf("Press a key to panic.\n");
324		cngetc();
325#endif
326		panic("trap");
327	}
328
329	/* Take pending signals. */
330	{
331		int sig;
332
333		while ((sig = CURSIG(p)) != 0)
334			postsig(sig);
335	}
336
337	curcpu()->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri;
338  done:
339	return;
340}
341
342int
343ctx_setup(int ctx, int srr1)
344{
345	volatile struct pmap *pm;
346
347	/* Update PID if we're returning to user mode. */
348	if (srr1 & PSL_PR) {
349		pm = curproc->p_vmspace->vm_map.pmap;
350		if (!pm->pm_ctx) {
351			ctx_alloc((struct pmap *)pm);
352		}
353		ctx = pm->pm_ctx;
354		if (srr1 & PSL_SE) {
355			int dbreg, mask = 0x48000000;
356				/*
357				 * Set the Internal Debug and
358				 * Instruction Completion bits of
359				 * the DBCR0 register.
360				 *
361				 * XXX this is also used by jtag debuggers...
362				 */
363			__asm __volatile("mfspr %0,0x3f2;"
364				"or %0,%0,%1;"
365				"mtspr 0x3f2,%0;" :
366				"=&r" (dbreg) : "r" (mask));
367		}
368	}
369	else if (!ctx) {
370		ctx = KERNEL_PID;
371	}
372	return (ctx);
373}
374
375/*
376 * Used by copyin()/copyout()
377 */
378extern vaddr_t vmaprange __P((struct proc *, vaddr_t, vsize_t, int));
379extern void vunmaprange __P((vaddr_t, vsize_t));
380static int bigcopyin __P((const void *,	void *,	size_t ));
381static int bigcopyout __P((const void *, void *, size_t ));
382
383int
384copyin(const void *udaddr, void *kaddr, size_t len)
385{
386	struct pmap *pm = curproc->p_vmspace->vm_map.pmap;
387	int msr, pid, tmp, ctx;
388	faultbuf env;
389
390	/* For bigger buffers use the faster copy */
391	if (len > 256) return (bigcopyin(udaddr, kaddr, len));
392
393	if (setfault(env)) {
394		curpcb->pcb_onfault = 0;
395		return EFAULT;
396	}
397
398	if (!(ctx = pm->pm_ctx)) {
399		/* No context -- assign it one */
400		ctx_alloc(pm);
401		ctx = pm->pm_ctx;
402	}
403
404	asm volatile("addi %6,%6,1; mtctr %6;"	/* Set up counter */
405		"mfmsr %0;"			/* Save MSR */
406		"li %1,0x20; "
407		"andc %1,%0,%1; mtmsr %1;"	/* Disable IMMU */
408		"mfpid %1;"			/* Save old PID */
409		"sync; isync;"
410
411		"1: bdz 2f;"			/* while len */
412		"mtpid %3; sync;"		/* Load user ctx */
413		"lbz %2,0(%4); addi %4,%4,1;"	/* Load byte */
414		"sync; isync;"
415		"mtpid %1;sync;"
416		"stb %2,0(%5); dcbf 0,%5; addi %5,%5,1;"	/* Store kernel byte */
417		"sync; isync;"
418		"b 1b;"				/* repeat */
419
420		"2: mtpid %1; mtmsr %0;"	/* Restore PID and MSR */
421		"sync; isync;"
422		: "=&r" (msr), "=&r" (pid), "=&r" (tmp)
423		: "r" (ctx), "r" (udaddr), "r" (kaddr), "r" (len));
424
425	curpcb->pcb_onfault = 0;
426	return 0;
427}
428
429static int
430bigcopyin(const void *udaddr, void *kaddr, size_t len)
431{
432	const char *up;
433	char *kp = kaddr;
434	struct proc *p = curproc;
435	int error;
436
437	if (!p) {
438		return EFAULT;
439	}
440
441	/*
442	 * Stolen from physio():
443	 */
444	PHOLD(p);
445	error = uvm_vslock(p, (caddr_t)udaddr, len, VM_PROT_READ);
446	if (error) {
447		PRELE(p);
448		return EFAULT;
449	}
450	up = (char *)vmaprange(p, (vaddr_t)udaddr, len, VM_PROT_READ);
451
452	memcpy(kp, up, len);
453	vunmaprange((vaddr_t)up, len);
454	uvm_vsunlock(p, (caddr_t)udaddr, len);
455	PRELE(p);
456
457	return 0;
458}
459
460int
461copyout(const void *kaddr, void *udaddr, size_t len)
462{
463	struct pmap *pm = curproc->p_vmspace->vm_map.pmap;
464	int msr, pid, tmp, ctx;
465	faultbuf env;
466
467	/* For big copies use more efficient routine */
468	if (len > 256) return (bigcopyout(kaddr, udaddr, len));
469
470	if (setfault(env)) {
471		curpcb->pcb_onfault = 0;
472		return EFAULT;
473	}
474
475	if (!(ctx = pm->pm_ctx)) {
476		/* No context -- assign it one */
477		ctx_alloc(pm);
478		ctx = pm->pm_ctx;
479	}
480
481	asm volatile("addi %6,%6,1; mtctr %6;"	/* Set up counter */
482		"mfmsr %0;"			/* Save MSR */
483		"li %1,0x20; "
484		"andc %1,%0,%1; mtmsr %1;"	/* Disable IMMU */
485		"mfpid %1;"			/* Save old PID */
486		"sync; isync;"
487
488		"1: bdz 2f;"			/* while len */
489		"mtpid %1;sync;"
490		"lbz %2,0(%5); addi %5,%5,1;"	/* Load kernel byte */
491		"sync; isync;"
492		"mtpid %3; sync;"		/* Load user ctx */
493		"stb %2,0(%4);  dcbf 0,%4; addi %4,%4,1;"	/* Store user byte */
494		"sync; isync;"
495		"b 1b;"				/* repeat */
496
497		"2: mtpid %1; mtmsr %0;"	/* Restore PID and MSR */
498		"sync; isync;"
499		: "=&r" (msr), "=&r" (pid), "=&r" (tmp)
500		: "r" (ctx), "r" (udaddr), "r" (kaddr), "r" (len));
501
502	curpcb->pcb_onfault = 0;
503	return 0;
504}
505
506static int
507bigcopyout(const void *kaddr, void *udaddr, size_t len)
508{
509	char *up;
510	const char *kp = (char *)kaddr;
511	struct proc *p = curproc;
512	int error;
513
514	if (!p) {
515		return EFAULT;
516	}
517
518	/*
519	 * Stolen from physio():
520	 */
521	PHOLD(p);
522	error = uvm_vslock(p, udaddr, len, VM_PROT_WRITE);
523	if (error) {
524		PRELE(p);
525		return EFAULT;
526	}
527	up = (char *)vmaprange(p, (vaddr_t)udaddr, len,
528		VM_PROT_READ|VM_PROT_WRITE);
529
530	memcpy(up, kp, len);
531	vunmaprange((vaddr_t)up, len);
532	uvm_vsunlock(p, udaddr, len);
533	PRELE(p);
534
535	return 0;
536}
537
538/*
539 * kcopy(const void *src, void *dst, size_t len);
540 *
541 * Copy len bytes from src to dst, aborting if we encounter a fatal
542 * page fault.
543 *
544 * kcopy() _must_ save and restore the old fault handler since it is
545 * called by uiomove(), which may be in the path of servicing a non-fatal
546 * page fault.
547 */
548int
549kcopy(const void *src, void *dst, size_t len)
550{
551	faultbuf env, *oldfault;
552
553	oldfault = curpcb->pcb_onfault;
554	if (setfault(env)) {
555		curpcb->pcb_onfault = oldfault;
556		return EFAULT;
557	}
558
559	memcpy(dst, src, len);
560
561	curpcb->pcb_onfault = oldfault;
562	return 0;
563}
564
565int
566badaddr(void *addr, size_t size)
567{
568
569	return badaddr_read(addr, size, NULL);
570}
571
572int
573badaddr_read(void *addr, size_t size, int *rptr)
574{
575	faultbuf env;
576	int x;
577
578	/* Get rid of any stale machine checks that have been waiting.  */
579	__asm __volatile ("sync; isync");
580
581	if (setfault(env)) {
582		curpcb->pcb_onfault = 0;
583		__asm __volatile ("sync");
584		return 1;
585	}
586
587	__asm __volatile ("sync");
588
589	switch (size) {
590	case 1:
591		x = *(volatile int8_t *)addr;
592		break;
593	case 2:
594		x = *(volatile int16_t *)addr;
595		break;
596	case 4:
597		x = *(volatile int32_t *)addr;
598		break;
599	default:
600		panic("badaddr: invalid size (%d)", size);
601	}
602
603	/* Make sure we took the machine check, if we caused one. */
604	__asm __volatile ("sync; isync");
605
606	curpcb->pcb_onfault = 0;
607	__asm __volatile ("sync");	/* To be sure. */
608
609	/* Use the value to avoid reorder. */
610	if (rptr)
611		*rptr = x;
612
613	return 0;
614}
615
616/*
617 * For now, this only deals with the particular unaligned access case
618 * that gcc tends to generate.  Eventually it should handle all of the
619 * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
620 */
621
622static int
623fix_unaligned(struct proc *p, struct trapframe *frame)
624{
625
626	return -1;
627}
628