trap.c revision 1.21
1/*	$NetBSD: trap.c,v 1.21 2004/02/24 18:31:46 drochner Exp $	*/
2
3/*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *      This product includes software developed for the NetBSD Project by
20 *      Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38/*
39 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
40 * Copyright (C) 1995, 1996 TooLs GmbH.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 *    notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 *    notice, this list of conditions and the following disclaimer in the
50 *    documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 *    must display the following acknowledgement:
53 *	This product includes software developed by TooLs GmbH.
54 * 4. The name of TooLs GmbH may not be used to endorse or promote products
55 *    derived from this software without specific prior written permission.
56 *
57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67 */
68
69#include <sys/cdefs.h>
70__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.21 2004/02/24 18:31:46 drochner Exp $");
71
72#include "opt_altivec.h"
73#include "opt_ddb.h"
74#include "opt_ktrace.h"
75#include "opt_systrace.h"
76#include "opt_syscall_debug.h"
77
78#include <sys/param.h>
79#include <sys/proc.h>
80#include <sys/reboot.h>
81#include <sys/syscall.h>
82#include <sys/systm.h>
83#include <sys/user.h>
84#ifdef KTRACE
85#include <sys/ktrace.h>
86#endif
87#include <sys/pool.h>
88#include <sys/sa.h>
89#include <sys/savar.h>
90#ifdef SYSTRACE
91#include <sys/systrace.h>
92#endif
93#include <sys/userret.h>
94
95#include <uvm/uvm_extern.h>
96
97#include <dev/cons.h>
98
99#include <machine/cpu.h>
100#include <machine/db_machdep.h>
101#include <machine/fpu.h>
102#include <machine/frame.h>
103#include <machine/pcb.h>
104#include <machine/psl.h>
105#include <machine/trap.h>
106
107#include <powerpc/spr.h>
108#include <powerpc/ibm4xx/pmap.h>
109#include <powerpc/ibm4xx/tlb.h>
110#include <powerpc/fpu/fpu_extern.h>
111
112/* These definitions should probably be somewhere else			XXX */
113#define	FIRSTARG	3		/* first argument is in reg 3 */
114#define	NARGREG		8		/* 8 args are in registers */
115#define	MOREARGS(sp)	((caddr_t)((int)(sp) + 8)) /* more args go here */
116
117static int fix_unaligned __P((struct lwp *l, struct trapframe *frame));
118
119void trap __P((struct trapframe *));	/* Called from locore / trap_subr */
120/* Why are these not defined in a header? */
121int badaddr __P((void *, size_t));
122int badaddr_read __P((void *, size_t, int *));
123int ctx_setup __P((int, int));
124
125#ifdef DEBUG
126#define TDB_ALL	0x1
127int trapdebug = /* TDB_ALL */ 0;
128#define	DBPRINTF(x, y)	if (trapdebug & (x)) printf y
129#else
130#define DBPRINTF(x, y)
131#endif
132
133void
134trap(struct trapframe *frame)
135{
136	struct lwp *l = curlwp;
137	struct proc *p = l ? l->l_proc : NULL;
138	int type = frame->exc;
139	int ftype, rv;
140	ksiginfo_t ksi;
141
142	KASSERT(l == 0 || (l->l_stat == LSONPROC));
143
144	if (frame->srr1 & PSL_PR)
145		type |= EXC_USER;
146
147	ftype = VM_PROT_READ;
148
149	DBPRINTF(TDB_ALL, ("trap(%x) at %lx from frame %p &frame %p\n",
150	    type, frame->srr0, frame, &frame));
151
152	switch (type) {
153	case EXC_DEBUG|EXC_USER:
154		{
155			int srr2, srr3;
156
157			__asm __volatile("mfspr %0,0x3f0" :
158			    "=r" (rv), "=r" (srr2), "=r" (srr3) :);
159			printf("debug reg is %x srr2 %x srr3 %x\n", rv, srr2,
160			    srr3);
161			/* XXX fall through or break here?! */
162		}
163		/*
164		 * DEBUG intr -- probably single-step.
165		 */
166	case EXC_TRC|EXC_USER:
167		frame->srr1 &= ~PSL_SE;
168		KSI_INIT_TRAP(&ksi);
169		ksi.ksi_signo = SIGTRAP;
170		ksi.ksi_trap = EXC_TRC;
171		ksi.ksi_addr = (void *)frame->srr0;
172		KERNEL_PROC_LOCK(l);
173		trapsignal(l, &ksi);
174		KERNEL_PROC_UNLOCK(l);
175		break;
176
177	/*
178	 * If we could not find and install appropriate TLB entry, fall through.
179	 */
180
181	case EXC_DSI:
182		/* FALLTHROUGH */
183	case EXC_DTMISS:
184		{
185			struct vm_map *map;
186			vaddr_t va;
187			struct faultbuf *fb = NULL;
188
189			KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
190			va = frame->dar;
191			if (frame->tf_xtra[TF_PID] == KERNEL_PID) {
192				map = kernel_map;
193			} else {
194				map = &p->p_vmspace->vm_map;
195				if (l->l_flag & L_SA) {
196					KDASSERT(p != NULL && p->p_sa != NULL);
197					p->p_sa->sa_vp_faultaddr = va;
198					l->l_flag |= L_SA_PAGEFAULT;
199				}
200			}
201
202			if (frame->tf_xtra[TF_ESR] & (ESR_DST|ESR_DIZ))
203				ftype = VM_PROT_WRITE;
204
205			DBPRINTF(TDB_ALL,
206			    ("trap(EXC_DSI) at %lx %s fault on %p esr %x\n",
207			    frame->srr0,
208			    (ftype & VM_PROT_WRITE) ? "write" : "read",
209			    (void *)va, frame->tf_xtra[TF_ESR]));
210			rv = uvm_fault(map, trunc_page(va), 0, ftype);
211			KERNEL_UNLOCK();
212			if (map != kernel_map)
213				l->l_flag &= ~L_SA_PAGEFAULT;
214			if (rv == 0)
215				goto done;
216			if ((fb = l->l_addr->u_pcb.pcb_onfault) != NULL) {
217				frame->tf_xtra[TF_PID] = KERNEL_PID;
218				frame->srr0 = fb->fb_pc;
219				frame->srr1 |= PSL_IR; /* Re-enable IMMU */
220				frame->fixreg[1] = fb->fb_sp;
221				frame->fixreg[2] = fb->fb_r2;
222				frame->fixreg[3] = 1; /* Return TRUE */
223				frame->cr = fb->fb_cr;
224				memcpy(&frame->fixreg[13], fb->fb_fixreg,
225				    sizeof(fb->fb_fixreg));
226				goto done;
227			}
228		}
229		goto brain_damage;
230
231	case EXC_DSI|EXC_USER:
232		/* FALLTHROUGH */
233	case EXC_DTMISS|EXC_USER:
234		KERNEL_PROC_LOCK(l);
235
236		if (frame->tf_xtra[TF_ESR] & (ESR_DST|ESR_DIZ))
237			ftype = VM_PROT_WRITE;
238
239		DBPRINTF(TDB_ALL,
240		    ("trap(EXC_DSI|EXC_USER) at %lx %s fault on %lx %x\n",
241		    frame->srr0, (ftype & VM_PROT_WRITE) ? "write" : "read",
242		    frame->dar, frame->tf_xtra[TF_ESR]));
243		KASSERT(l == curlwp && (l->l_stat == LSONPROC));
244		if (l->l_flag & L_SA) {
245			KDASSERT(p != NULL && p->p_sa != NULL);
246			p->p_sa->sa_vp_faultaddr = (vaddr_t)frame->dar;
247			l->l_flag |= L_SA_PAGEFAULT;
248		}
249		rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(frame->dar),
250		    0, ftype);
251		if (rv == 0) {
252			l->l_flag &= ~L_SA_PAGEFAULT;
253			KERNEL_PROC_UNLOCK(l);
254			break;
255		}
256		KSI_INIT_TRAP(&ksi);
257		ksi.ksi_signo = SIGSEGV;
258		ksi.ksi_trap = EXC_DSI;
259		ksi.ksi_addr = (void *)frame->dar;
260		if (rv == ENOMEM) {
261			printf("UVM: pid %d (%s) lid %d, uid %d killed: "
262			    "out of swap\n",
263			    p->p_pid, p->p_comm, l->l_lid,
264			    p->p_cred && p->p_ucred ?
265			    p->p_ucred->cr_uid : -1);
266			ksi.ksi_signo = SIGKILL;
267		}
268		trapsignal(l, &ksi);
269		l->l_flag &= ~L_SA_PAGEFAULT;
270		KERNEL_PROC_UNLOCK(l);
271		break;
272
273	case EXC_ITMISS|EXC_USER:
274	case EXC_ISI|EXC_USER:
275		KERNEL_PROC_LOCK(l);
276		if (l->l_flag & L_SA) {
277			KDASSERT(p != NULL && p->p_sa != NULL);
278			p->p_sa->sa_vp_faultaddr = (vaddr_t)frame->srr0;
279			l->l_flag |= L_SA_PAGEFAULT;
280		}
281		ftype = VM_PROT_EXECUTE;
282		DBPRINTF(TDB_ALL,
283		    ("trap(EXC_ISI|EXC_USER) at %lx execute fault tf %p\n",
284		    frame->srr0, frame));
285		rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(frame->srr0),
286		    0, ftype);
287		if (rv == 0) {
288			l->l_flag &= ~L_SA_PAGEFAULT;
289			KERNEL_PROC_UNLOCK(l);
290			break;
291		}
292		KSI_INIT_TRAP(&ksi);
293		ksi.ksi_signo = SIGSEGV;
294		ksi.ksi_trap = EXC_ISI;
295		ksi.ksi_addr = (void *)frame->srr0;
296		ksi.ksi_code = (rv == EACCES ? SEGV_ACCERR : SEGV_MAPERR);
297		trapsignal(l, &ksi);
298		l->l_flag &= ~L_SA_PAGEFAULT;
299		KERNEL_PROC_UNLOCK(l);
300		break;
301
302	case EXC_AST|EXC_USER:
303		curcpu()->ci_astpending = 0;	/* we are about to do it */
304		KERNEL_PROC_LOCK(l);
305		uvmexp.softs++;
306		if (p->p_flag & P_OWEUPC) {
307			p->p_flag &= ~P_OWEUPC;
308			ADDUPROF(p);
309		}
310		/* Check whether we are being preempted. */
311		if (curcpu()->ci_want_resched)
312			preempt(0);
313		KERNEL_PROC_UNLOCK(l);
314		break;
315
316
317	case EXC_ALI|EXC_USER:
318		KERNEL_PROC_LOCK(l);
319		if (fix_unaligned(l, frame) != 0) {
320			KSI_INIT_TRAP(&ksi);
321			ksi.ksi_signo = SIGBUS;
322			ksi.ksi_trap = EXC_ALI;
323			ksi.ksi_addr = (void *)frame->dar;
324			trapsignal(l, &ksi);
325		} else
326			frame->srr0 += 4;
327		KERNEL_PROC_UNLOCK(l);
328		break;
329
330	case EXC_PGM|EXC_USER:
331		/*
332		 * Illegal insn:
333		 *
334		 * let's try to see if it's FPU and can be emulated.
335		 */
336		uvmexp.traps ++;
337		if (!(l->l_addr->u_pcb.pcb_flags & PCB_FPU)) {
338			memset(&l->l_addr->u_pcb.pcb_fpu, 0,
339				sizeof l->l_addr->u_pcb.pcb_fpu);
340			l->l_addr->u_pcb.pcb_flags |= PCB_FPU;
341		}
342
343		if ((rv = fpu_emulate(frame,
344			(struct fpreg *)&l->l_addr->u_pcb.pcb_fpu))) {
345			KSI_INIT_TRAP(&ksi);
346			ksi.ksi_signo = rv;
347			ksi.ksi_trap = EXC_PGM;
348			ksi.ksi_addr = (void *)frame->srr0;
349			KERNEL_PROC_LOCK(l);
350			trapsignal(l, &ksi);
351			KERNEL_PROC_UNLOCK(l);
352		}
353		break;
354
355	case EXC_MCHK:
356		{
357			struct faultbuf *fb;
358
359			if ((fb = l->l_addr->u_pcb.pcb_onfault) != NULL) {
360				frame->tf_xtra[TF_PID] = KERNEL_PID;
361				frame->srr0 = fb->fb_pc;
362				frame->srr1 |= PSL_IR; /* Re-enable IMMU */
363				frame->fixreg[1] = fb->fb_sp;
364				frame->fixreg[2] = fb->fb_r2;
365				frame->fixreg[3] = 1; /* Return TRUE */
366				frame->cr = fb->fb_cr;
367				memcpy(&frame->fixreg[13], fb->fb_fixreg,
368				    sizeof(fb->fb_fixreg));
369				goto done;
370			}
371		}
372		goto brain_damage;
373	default:
374 brain_damage:
375		printf("trap type 0x%x at 0x%lx\n", type, frame->srr0);
376#ifdef DDB
377		if (kdb_trap(type, frame))
378			goto done;
379#endif
380#ifdef TRAP_PANICWAIT
381		printf("Press a key to panic.\n");
382		cngetc();
383#endif
384		panic("trap");
385	}
386
387	/* Invoke MI userret code */
388	mi_userret(l);
389
390	curcpu()->ci_schedstate.spc_curpriority = l->l_priority = l->l_usrpri;
391 done:
392	return;
393}
394
395int
396ctx_setup(int ctx, int srr1)
397{
398	volatile struct pmap *pm;
399
400	/* Update PID if we're returning to user mode. */
401	if (srr1 & PSL_PR) {
402		pm = curproc->p_vmspace->vm_map.pmap;
403		if (!pm->pm_ctx) {
404			ctx_alloc((struct pmap *)pm);
405		}
406		ctx = pm->pm_ctx;
407		if (srr1 & PSL_SE) {
408			int dbreg, mask = 0x48000000;
409				/*
410				 * Set the Internal Debug and
411				 * Instruction Completion bits of
412				 * the DBCR0 register.
413				 *
414				 * XXX this is also used by jtag debuggers...
415				 */
416			__asm __volatile("mfspr %0,0x3f2;"
417			    "or %0,%0,%1;"
418			    "mtspr 0x3f2,%0;" :
419			    "=&r" (dbreg) : "r" (mask));
420		}
421	}
422	else if (!ctx) {
423		ctx = KERNEL_PID;
424	}
425	return (ctx);
426}
427
428/*
429 * Used by copyin()/copyout()
430 */
431extern vaddr_t vmaprange __P((struct proc *, vaddr_t, vsize_t, int));
432extern void vunmaprange __P((vaddr_t, vsize_t));
433static int bigcopyin __P((const void *, void *, size_t ));
434static int bigcopyout __P((const void *, void *, size_t ));
435
436int
437copyin(const void *udaddr, void *kaddr, size_t len)
438{
439	struct pmap *pm = curproc->p_vmspace->vm_map.pmap;
440	int msr, pid, tmp, ctx;
441	struct faultbuf env;
442
443	/* For bigger buffers use the faster copy */
444	if (len > 256) return (bigcopyin(udaddr, kaddr, len));
445
446	if (setfault(&env)) {
447		curpcb->pcb_onfault = 0;
448		return EFAULT;
449	}
450
451	if (!(ctx = pm->pm_ctx)) {
452		/* No context -- assign it one */
453		ctx_alloc(pm);
454		ctx = pm->pm_ctx;
455	}
456
457	asm volatile("addi %6,%6,1; mtctr %6;"	/* Set up counter */
458		"mfmsr %0;"			/* Save MSR */
459		"li %1,0x20; "
460		"andc %1,%0,%1; mtmsr %1;"	/* Disable IMMU */
461		"mfpid %1;"			/* Save old PID */
462		"sync; isync;"
463
464		"1: bdz 2f;"			/* while len */
465		"mtpid %3; sync;"		/* Load user ctx */
466		"lbz %2,0(%4); addi %4,%4,1;"	/* Load byte */
467		"sync; isync;"
468		"mtpid %1;sync;"
469		"stb %2,0(%5); dcbf 0,%5; addi %5,%5,1;" /* Store kernel byte */
470		"sync; isync;"
471		"b 1b;"				/* repeat */
472
473		"2: mtpid %1; mtmsr %0;"	/* Restore PID and MSR */
474		"sync; isync;"
475		: "=&r" (msr), "=&r" (pid), "=&r" (tmp)
476		: "r" (ctx), "r" (udaddr), "r" (kaddr), "r" (len));
477
478	curpcb->pcb_onfault = 0;
479	return 0;
480}
481
482static int
483bigcopyin(const void *udaddr, void *kaddr, size_t len)
484{
485	const char *up;
486	char *kp = kaddr;
487	struct lwp *l = curlwp;
488	struct proc *p;
489	int error;
490
491	if (!l) {
492		return EFAULT;
493	}
494
495	p = l->l_proc;
496
497	/*
498	 * Stolen from physio():
499	 */
500	PHOLD(l);
501	error = uvm_vslock(p, (caddr_t)udaddr, len, VM_PROT_READ);
502	if (error) {
503		PRELE(l);
504		return EFAULT;
505	}
506	up = (char *)vmaprange(p, (vaddr_t)udaddr, len, VM_PROT_READ);
507
508	memcpy(kp, up, len);
509	vunmaprange((vaddr_t)up, len);
510	uvm_vsunlock(p, (caddr_t)udaddr, len);
511	PRELE(l);
512
513	return 0;
514}
515
516int
517copyout(const void *kaddr, void *udaddr, size_t len)
518{
519	struct pmap *pm = curproc->p_vmspace->vm_map.pmap;
520	int msr, pid, tmp, ctx;
521	struct faultbuf env;
522
523	/* For big copies use more efficient routine */
524	if (len > 256) return (bigcopyout(kaddr, udaddr, len));
525
526	if (setfault(&env)) {
527		curpcb->pcb_onfault = 0;
528		return EFAULT;
529	}
530
531	if (!(ctx = pm->pm_ctx)) {
532		/* No context -- assign it one */
533		ctx_alloc(pm);
534		ctx = pm->pm_ctx;
535	}
536
537	asm volatile("addi %6,%6,1; mtctr %6;"	/* Set up counter */
538		"mfmsr %0;"			/* Save MSR */
539		"li %1,0x20; "
540		"andc %1,%0,%1; mtmsr %1;"	/* Disable IMMU */
541		"mfpid %1;"			/* Save old PID */
542		"sync; isync;"
543
544		"1: bdz 2f;"			/* while len */
545		"mtpid %1;sync;"
546		"lbz %2,0(%5); addi %5,%5,1;"	/* Load kernel byte */
547		"sync; isync;"
548		"mtpid %3; sync;"		/* Load user ctx */
549		"stb %2,0(%4);  dcbf 0,%4; addi %4,%4,1;" /* Store user byte */
550		"sync; isync;"
551		"b 1b;"				/* repeat */
552
553		"2: mtpid %1; mtmsr %0;"	/* Restore PID and MSR */
554		"sync; isync;"
555		: "=&r" (msr), "=&r" (pid), "=&r" (tmp)
556		: "r" (ctx), "r" (udaddr), "r" (kaddr), "r" (len));
557
558	curpcb->pcb_onfault = 0;
559	return 0;
560}
561
562static int
563bigcopyout(const void *kaddr, void *udaddr, size_t len)
564{
565	char *up;
566	const char *kp = (char *)kaddr;
567	struct lwp *l = curlwp;
568	struct proc *p;
569	int error;
570
571	if (!l) {
572		return EFAULT;
573	}
574
575	p = l->l_proc;
576
577	/*
578	 * Stolen from physio():
579	 */
580	PHOLD(l);
581	error = uvm_vslock(p, udaddr, len, VM_PROT_WRITE);
582	if (error) {
583		PRELE(l);
584		return EFAULT;
585	}
586	up = (char *)vmaprange(p, (vaddr_t)udaddr, len,
587	    VM_PROT_READ | VM_PROT_WRITE);
588
589	memcpy(up, kp, len);
590	vunmaprange((vaddr_t)up, len);
591	uvm_vsunlock(p, udaddr, len);
592	PRELE(l);
593
594	return 0;
595}
596
597/*
598 * kcopy(const void *src, void *dst, size_t len);
599 *
600 * Copy len bytes from src to dst, aborting if we encounter a fatal
601 * page fault.
602 *
603 * kcopy() _must_ save and restore the old fault handler since it is
604 * called by uiomove(), which may be in the path of servicing a non-fatal
605 * page fault.
606 */
607int
608kcopy(const void *src, void *dst, size_t len)
609{
610	struct faultbuf env, *oldfault;
611
612	oldfault = curpcb->pcb_onfault;
613	if (setfault(&env)) {
614		curpcb->pcb_onfault = oldfault;
615		return EFAULT;
616	}
617
618	memcpy(dst, src, len);
619
620	curpcb->pcb_onfault = oldfault;
621	return 0;
622}
623
624int
625badaddr(void *addr, size_t size)
626{
627
628	return badaddr_read(addr, size, NULL);
629}
630
631int
632badaddr_read(void *addr, size_t size, int *rptr)
633{
634	struct faultbuf env;
635	int x;
636
637	/* Get rid of any stale machine checks that have been waiting.  */
638	__asm __volatile ("sync; isync");
639
640	if (setfault(&env)) {
641		curpcb->pcb_onfault = 0;
642		__asm __volatile ("sync");
643		return 1;
644	}
645
646	__asm __volatile ("sync");
647
648	switch (size) {
649	case 1:
650		x = *(volatile int8_t *)addr;
651		break;
652	case 2:
653		x = *(volatile int16_t *)addr;
654		break;
655	case 4:
656		x = *(volatile int32_t *)addr;
657		break;
658	default:
659		panic("badaddr: invalid size (%d)", size);
660	}
661
662	/* Make sure we took the machine check, if we caused one. */
663	__asm __volatile ("sync; isync");
664
665	curpcb->pcb_onfault = 0;
666	__asm __volatile ("sync");	/* To be sure. */
667
668	/* Use the value to avoid reorder. */
669	if (rptr)
670		*rptr = x;
671
672	return 0;
673}
674
675/*
676 * For now, this only deals with the particular unaligned access case
677 * that gcc tends to generate.  Eventually it should handle all of the
678 * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
679 */
680
681static int
682fix_unaligned(struct lwp *l, struct trapframe *frame)
683{
684
685	return -1;
686}
687
688/*
689 * Start a new LWP
690 */
691void
692startlwp(arg)
693	void *arg;
694{
695	int err;
696	ucontext_t *uc = arg;
697	struct lwp *l = curlwp;
698
699	err = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
700#if DIAGNOSTIC
701	if (err) {
702		printf("Error %d from cpu_setmcontext.", err);
703	}
704#endif
705	pool_put(&lwp_uc_pool, uc);
706
707	upcallret(l);
708}
709
710/*
711 * XXX This is a terrible name.
712 */
713void
714upcallret(l)
715	struct lwp *l;
716{
717
718	/* Invoke MI userret code */
719	mi_userret(l);
720
721	curcpu()->ci_schedstate.spc_curpriority = l->l_priority = l->l_usrpri;
722}
723