trap.c revision 1.30
1/*	$NetBSD: trap.c,v 1.30 2006/03/07 03:32:05 thorpej Exp $	*/
2
3/*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *      This product includes software developed for the NetBSD Project by
20 *      Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38/*
39 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
40 * Copyright (C) 1995, 1996 TooLs GmbH.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 *    notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 *    notice, this list of conditions and the following disclaimer in the
50 *    documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 *    must display the following acknowledgement:
53 *	This product includes software developed by TooLs GmbH.
54 * 4. The name of TooLs GmbH may not be used to endorse or promote products
55 *    derived from this software without specific prior written permission.
56 *
57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67 */
68
69#include <sys/cdefs.h>
70__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.30 2006/03/07 03:32:05 thorpej Exp $");
71
72#include "opt_altivec.h"
73#include "opt_ddb.h"
74#include "opt_syscall_debug.h"
75
76#include <sys/param.h>
77#include <sys/proc.h>
78#include <sys/reboot.h>
79#include <sys/syscall.h>
80#include <sys/systm.h>
81#include <sys/user.h>
82#include <sys/pool.h>
83#include <sys/sa.h>
84#include <sys/savar.h>
85#include <sys/userret.h>
86
87#include <uvm/uvm_extern.h>
88
89#include <dev/cons.h>
90
91#include <machine/cpu.h>
92#include <machine/db_machdep.h>
93#include <machine/fpu.h>
94#include <machine/frame.h>
95#include <machine/pcb.h>
96#include <machine/psl.h>
97#include <machine/trap.h>
98
99#include <powerpc/spr.h>
100#include <powerpc/ibm4xx/pmap.h>
101#include <powerpc/ibm4xx/tlb.h>
102#include <powerpc/fpu/fpu_extern.h>
103
104/* These definitions should probably be somewhere else			XXX */
105#define	FIRSTARG	3		/* first argument is in reg 3 */
106#define	NARGREG		8		/* 8 args are in registers */
107#define	MOREARGS(sp)	((caddr_t)((int)(sp) + 8)) /* more args go here */
108
109static int fix_unaligned __P((struct lwp *l, struct trapframe *frame));
110
111void trap __P((struct trapframe *));	/* Called from locore / trap_subr */
112/* Why are these not defined in a header? */
113int badaddr __P((void *, size_t));
114int badaddr_read __P((void *, size_t, int *));
115int ctx_setup __P((int, int));
116
117#ifdef DEBUG
118#define TDB_ALL	0x1
119int trapdebug = /* TDB_ALL */ 0;
120#define	DBPRINTF(x, y)	if (trapdebug & (x)) printf y
121#else
122#define DBPRINTF(x, y)
123#endif
124
125void
126trap(struct trapframe *frame)
127{
128	struct lwp *l = curlwp;
129	struct proc *p = l ? l->l_proc : NULL;
130	int type = frame->exc;
131	int ftype, rv;
132	ksiginfo_t ksi;
133
134	KASSERT(l == 0 || (l->l_stat == LSONPROC));
135
136	if (frame->srr1 & PSL_PR)
137		type |= EXC_USER;
138
139	ftype = VM_PROT_READ;
140
141	DBPRINTF(TDB_ALL, ("trap(%x) at %lx from frame %p &frame %p\n",
142	    type, frame->srr0, frame, &frame));
143
144	switch (type) {
145	case EXC_DEBUG|EXC_USER:
146		{
147			int srr2, srr3;
148
149			__asm volatile("mfspr %0,0x3f0" :
150			    "=r" (rv), "=r" (srr2), "=r" (srr3) :);
151			printf("debug reg is %x srr2 %x srr3 %x\n", rv, srr2,
152			    srr3);
153			/* XXX fall through or break here?! */
154		}
155		/*
156		 * DEBUG intr -- probably single-step.
157		 */
158	case EXC_TRC|EXC_USER:
159		frame->srr1 &= ~PSL_SE;
160		KSI_INIT_TRAP(&ksi);
161		ksi.ksi_signo = SIGTRAP;
162		ksi.ksi_trap = EXC_TRC;
163		ksi.ksi_addr = (void *)frame->srr0;
164		KERNEL_PROC_LOCK(l);
165		trapsignal(l, &ksi);
166		KERNEL_PROC_UNLOCK(l);
167		break;
168
169	/*
170	 * If we could not find and install appropriate TLB entry, fall through.
171	 */
172
173	case EXC_DSI:
174		/* FALLTHROUGH */
175	case EXC_DTMISS:
176		{
177			struct vm_map *map;
178			vaddr_t va;
179			struct faultbuf *fb = NULL;
180
181			KERNEL_LOCK(LK_CANRECURSE|LK_EXCLUSIVE);
182			va = frame->dar;
183			if (frame->tf_xtra[TF_PID] == KERNEL_PID) {
184				map = kernel_map;
185			} else {
186				map = &p->p_vmspace->vm_map;
187				if (l->l_flag & L_SA) {
188					l->l_savp->savp_faultaddr = va;
189					l->l_flag |= L_SA_PAGEFAULT;
190				}
191			}
192
193			if (frame->tf_xtra[TF_ESR] & (ESR_DST|ESR_DIZ))
194				ftype = VM_PROT_WRITE;
195
196			DBPRINTF(TDB_ALL,
197			    ("trap(EXC_DSI) at %lx %s fault on %p esr %x\n",
198			    frame->srr0,
199			    (ftype & VM_PROT_WRITE) ? "write" : "read",
200			    (void *)va, frame->tf_xtra[TF_ESR]));
201			rv = uvm_fault(map, trunc_page(va), 0, ftype);
202			KERNEL_UNLOCK();
203			if (map != kernel_map)
204				l->l_flag &= ~L_SA_PAGEFAULT;
205			if (rv == 0)
206				goto done;
207			if ((fb = l->l_addr->u_pcb.pcb_onfault) != NULL) {
208				frame->tf_xtra[TF_PID] = KERNEL_PID;
209				frame->srr0 = fb->fb_pc;
210				frame->srr1 |= PSL_IR; /* Re-enable IMMU */
211				frame->fixreg[1] = fb->fb_sp;
212				frame->fixreg[2] = fb->fb_r2;
213				frame->fixreg[3] = 1; /* Return TRUE */
214				frame->cr = fb->fb_cr;
215				memcpy(&frame->fixreg[13], fb->fb_fixreg,
216				    sizeof(fb->fb_fixreg));
217				goto done;
218			}
219		}
220		goto brain_damage;
221
222	case EXC_DSI|EXC_USER:
223		/* FALLTHROUGH */
224	case EXC_DTMISS|EXC_USER:
225		KERNEL_PROC_LOCK(l);
226
227		if (frame->tf_xtra[TF_ESR] & (ESR_DST|ESR_DIZ))
228			ftype = VM_PROT_WRITE;
229
230		DBPRINTF(TDB_ALL,
231		    ("trap(EXC_DSI|EXC_USER) at %lx %s fault on %lx %x\n",
232		    frame->srr0, (ftype & VM_PROT_WRITE) ? "write" : "read",
233		    frame->dar, frame->tf_xtra[TF_ESR]));
234		KASSERT(l == curlwp && (l->l_stat == LSONPROC));
235		if (l->l_flag & L_SA) {
236			l->l_savp->savp_faultaddr = (vaddr_t)frame->dar;
237			l->l_flag |= L_SA_PAGEFAULT;
238		}
239		rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(frame->dar),
240		    0, ftype);
241		if (rv == 0) {
242			l->l_flag &= ~L_SA_PAGEFAULT;
243			KERNEL_PROC_UNLOCK(l);
244			break;
245		}
246		KSI_INIT_TRAP(&ksi);
247		ksi.ksi_signo = SIGSEGV;
248		ksi.ksi_trap = EXC_DSI;
249		ksi.ksi_addr = (void *)frame->dar;
250		if (rv == ENOMEM) {
251			printf("UVM: pid %d (%s) lid %d, uid %d killed: "
252			    "out of swap\n",
253			    p->p_pid, p->p_comm, l->l_lid,
254			    p->p_cred && p->p_ucred ?
255			    p->p_ucred->cr_uid : -1);
256			ksi.ksi_signo = SIGKILL;
257		}
258		trapsignal(l, &ksi);
259		l->l_flag &= ~L_SA_PAGEFAULT;
260		KERNEL_PROC_UNLOCK(l);
261		break;
262
263	case EXC_ITMISS|EXC_USER:
264	case EXC_ISI|EXC_USER:
265		KERNEL_PROC_LOCK(l);
266		if (l->l_flag & L_SA) {
267			l->l_savp->savp_faultaddr = (vaddr_t)frame->srr0;
268			l->l_flag |= L_SA_PAGEFAULT;
269		}
270		ftype = VM_PROT_EXECUTE;
271		DBPRINTF(TDB_ALL,
272		    ("trap(EXC_ISI|EXC_USER) at %lx execute fault tf %p\n",
273		    frame->srr0, frame));
274		rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(frame->srr0),
275		    0, ftype);
276		if (rv == 0) {
277			l->l_flag &= ~L_SA_PAGEFAULT;
278			KERNEL_PROC_UNLOCK(l);
279			break;
280		}
281		KSI_INIT_TRAP(&ksi);
282		ksi.ksi_signo = SIGSEGV;
283		ksi.ksi_trap = EXC_ISI;
284		ksi.ksi_addr = (void *)frame->srr0;
285		ksi.ksi_code = (rv == EACCES ? SEGV_ACCERR : SEGV_MAPERR);
286		trapsignal(l, &ksi);
287		l->l_flag &= ~L_SA_PAGEFAULT;
288		KERNEL_PROC_UNLOCK(l);
289		break;
290
291	case EXC_AST|EXC_USER:
292		curcpu()->ci_astpending = 0;	/* we are about to do it */
293		KERNEL_PROC_LOCK(l);
294		uvmexp.softs++;
295		if (p->p_flag & P_OWEUPC) {
296			p->p_flag &= ~P_OWEUPC;
297			ADDUPROF(p);
298		}
299		/* Check whether we are being preempted. */
300		if (curcpu()->ci_want_resched)
301			preempt(0);
302		KERNEL_PROC_UNLOCK(l);
303		break;
304
305
306	case EXC_ALI|EXC_USER:
307		KERNEL_PROC_LOCK(l);
308		if (fix_unaligned(l, frame) != 0) {
309			KSI_INIT_TRAP(&ksi);
310			ksi.ksi_signo = SIGBUS;
311			ksi.ksi_trap = EXC_ALI;
312			ksi.ksi_addr = (void *)frame->dar;
313			trapsignal(l, &ksi);
314		} else
315			frame->srr0 += 4;
316		KERNEL_PROC_UNLOCK(l);
317		break;
318
319	case EXC_PGM|EXC_USER:
320		/*
321		 * Illegal insn:
322		 *
323		 * let's try to see if it's FPU and can be emulated.
324		 */
325		uvmexp.traps++;
326		if (!(l->l_addr->u_pcb.pcb_flags & PCB_FPU)) {
327			memset(&l->l_addr->u_pcb.pcb_fpu, 0,
328				sizeof l->l_addr->u_pcb.pcb_fpu);
329			l->l_addr->u_pcb.pcb_flags |= PCB_FPU;
330		}
331
332		if ((rv = fpu_emulate(frame,
333			(struct fpreg *)&l->l_addr->u_pcb.pcb_fpu))) {
334			KSI_INIT_TRAP(&ksi);
335			ksi.ksi_signo = rv;
336			ksi.ksi_trap = EXC_PGM;
337			ksi.ksi_addr = (void *)frame->srr0;
338			KERNEL_PROC_LOCK(l);
339			trapsignal(l, &ksi);
340			KERNEL_PROC_UNLOCK(l);
341		}
342		break;
343
344	case EXC_MCHK:
345		{
346			struct faultbuf *fb;
347
348			if ((fb = l->l_addr->u_pcb.pcb_onfault) != NULL) {
349				frame->tf_xtra[TF_PID] = KERNEL_PID;
350				frame->srr0 = fb->fb_pc;
351				frame->srr1 |= PSL_IR; /* Re-enable IMMU */
352				frame->fixreg[1] = fb->fb_sp;
353				frame->fixreg[2] = fb->fb_r2;
354				frame->fixreg[3] = 1; /* Return TRUE */
355				frame->cr = fb->fb_cr;
356				memcpy(&frame->fixreg[13], fb->fb_fixreg,
357				    sizeof(fb->fb_fixreg));
358				goto done;
359			}
360		}
361		goto brain_damage;
362	default:
363 brain_damage:
364		printf("trap type 0x%x at 0x%lx\n", type, frame->srr0);
365#ifdef DDB
366		if (kdb_trap(type, frame))
367			goto done;
368#endif
369#ifdef TRAP_PANICWAIT
370		printf("Press a key to panic.\n");
371		cngetc();
372#endif
373		panic("trap");
374	}
375
376	/* Invoke MI userret code */
377	mi_userret(l);
378
379	curcpu()->ci_schedstate.spc_curpriority = l->l_priority = l->l_usrpri;
380 done:
381	return;
382}
383
384int
385ctx_setup(int ctx, int srr1)
386{
387	volatile struct pmap *pm;
388
389	/* Update PID if we're returning to user mode. */
390	if (srr1 & PSL_PR) {
391		pm = curproc->p_vmspace->vm_map.pmap;
392		if (!pm->pm_ctx) {
393			ctx_alloc(__UNVOLATILE(pm));
394		}
395		ctx = pm->pm_ctx;
396		if (srr1 & PSL_SE) {
397			int dbreg, mask = 0x48000000;
398				/*
399				 * Set the Internal Debug and
400				 * Instruction Completion bits of
401				 * the DBCR0 register.
402				 *
403				 * XXX this is also used by jtag debuggers...
404				 */
405			__asm volatile("mfspr %0,0x3f2;"
406			    "or %0,%0,%1;"
407			    "mtspr 0x3f2,%0;" :
408			    "=&r" (dbreg) : "r" (mask));
409		}
410	}
411	else if (!ctx) {
412		ctx = KERNEL_PID;
413	}
414	return (ctx);
415}
416
417/*
418 * Used by copyin()/copyout()
419 */
420extern vaddr_t vmaprange __P((struct proc *, vaddr_t, vsize_t, int));
421extern void vunmaprange __P((vaddr_t, vsize_t));
422static int bigcopyin __P((const void *, void *, size_t ));
423static int bigcopyout __P((const void *, void *, size_t ));
424
425int
426copyin(const void *udaddr, void *kaddr, size_t len)
427{
428	struct pmap *pm = curproc->p_vmspace->vm_map.pmap;
429	int msr, pid, tmp, ctx;
430	struct faultbuf env;
431
432	/* For bigger buffers use the faster copy */
433	if (len > 256)
434		return (bigcopyin(udaddr, kaddr, len));
435
436	if (setfault(&env)) {
437		curpcb->pcb_onfault = 0;
438		return EFAULT;
439	}
440
441	if (!(ctx = pm->pm_ctx)) {
442		/* No context -- assign it one */
443		ctx_alloc(pm);
444		ctx = pm->pm_ctx;
445	}
446
447	__asm volatile("addi %6,%6,1; mtctr %6;"	/* Set up counter */
448		"mfmsr %0;"			/* Save MSR */
449		"li %1,0x20; "
450		"andc %1,%0,%1; mtmsr %1;"	/* Disable IMMU */
451		"mfpid %1;"			/* Save old PID */
452		"sync; isync;"
453
454		"1: bdz 2f;"			/* while len */
455		"mtpid %3; sync;"		/* Load user ctx */
456		"lbz %2,0(%4); addi %4,%4,1;"	/* Load byte */
457		"sync; isync;"
458		"mtpid %1;sync;"
459		"stb %2,0(%5); dcbf 0,%5; addi %5,%5,1;" /* Store kernel byte */
460		"sync; isync;"
461		"b 1b;"				/* repeat */
462
463		"2: mtpid %1; mtmsr %0;"	/* Restore PID and MSR */
464		"sync; isync;"
465		: "=&r" (msr), "=&r" (pid), "=&r" (tmp)
466		: "r" (ctx), "b" (udaddr), "b" (kaddr), "b" (len));
467
468	curpcb->pcb_onfault = 0;
469	return 0;
470}
471
472static int
473bigcopyin(const void *udaddr, void *kaddr, size_t len)
474{
475	const char *up;
476	char *kp = kaddr;
477	struct lwp *l = curlwp;
478	struct proc *p;
479	int error;
480
481	if (!l) {
482		return EFAULT;
483	}
484
485	p = l->l_proc;
486
487	/*
488	 * Stolen from physio():
489	 */
490	PHOLD(l);
491	error = uvm_vslock(p, __UNCONST(udaddr), len, VM_PROT_READ);
492	if (error) {
493		PRELE(l);
494		return EFAULT;
495	}
496	up = (char *)vmaprange(p, (vaddr_t)udaddr, len, VM_PROT_READ);
497
498	memcpy(kp, up, len);
499	vunmaprange((vaddr_t)up, len);
500	uvm_vsunlock(p, __UNCONST(udaddr), len);
501	PRELE(l);
502
503	return 0;
504}
505
506int
507copyout(const void *kaddr, void *udaddr, size_t len)
508{
509	struct pmap *pm = curproc->p_vmspace->vm_map.pmap;
510	int msr, pid, tmp, ctx;
511	struct faultbuf env;
512
513	/* For big copies use more efficient routine */
514	if (len > 256)
515		return (bigcopyout(kaddr, udaddr, len));
516
517	if (setfault(&env)) {
518		curpcb->pcb_onfault = 0;
519		return EFAULT;
520	}
521
522	if (!(ctx = pm->pm_ctx)) {
523		/* No context -- assign it one */
524		ctx_alloc(pm);
525		ctx = pm->pm_ctx;
526	}
527
528	__asm volatile("addi %6,%6,1; mtctr %6;"	/* Set up counter */
529		"mfmsr %0;"			/* Save MSR */
530		"li %1,0x20; "
531		"andc %1,%0,%1; mtmsr %1;"	/* Disable IMMU */
532		"mfpid %1;"			/* Save old PID */
533		"sync; isync;"
534
535		"1: bdz 2f;"			/* while len */
536		"mtpid %1;sync;"
537		"lbz %2,0(%5); addi %5,%5,1;"	/* Load kernel byte */
538		"sync; isync;"
539		"mtpid %3; sync;"		/* Load user ctx */
540		"stb %2,0(%4);  dcbf 0,%4; addi %4,%4,1;" /* Store user byte */
541		"sync; isync;"
542		"b 1b;"				/* repeat */
543
544		"2: mtpid %1; mtmsr %0;"	/* Restore PID and MSR */
545		"sync; isync;"
546		: "=&r" (msr), "=&r" (pid), "=&r" (tmp)
547		: "r" (ctx), "b" (udaddr), "b" (kaddr), "b" (len));
548
549	curpcb->pcb_onfault = 0;
550	return 0;
551}
552
553static int
554bigcopyout(const void *kaddr, void *udaddr, size_t len)
555{
556	char *up;
557	const char *kp = (const char *)kaddr;
558	struct lwp *l = curlwp;
559	struct proc *p;
560	int error;
561
562	if (!l) {
563		return EFAULT;
564	}
565
566	p = l->l_proc;
567
568	/*
569	 * Stolen from physio():
570	 */
571	PHOLD(l);
572	error = uvm_vslock(p, udaddr, len, VM_PROT_WRITE);
573	if (error) {
574		PRELE(l);
575		return EFAULT;
576	}
577	up = (char *)vmaprange(p, (vaddr_t)udaddr, len,
578	    VM_PROT_READ | VM_PROT_WRITE);
579
580	memcpy(up, kp, len);
581	vunmaprange((vaddr_t)up, len);
582	uvm_vsunlock(p, udaddr, len);
583	PRELE(l);
584
585	return 0;
586}
587
588/*
589 * kcopy(const void *src, void *dst, size_t len);
590 *
591 * Copy len bytes from src to dst, aborting if we encounter a fatal
592 * page fault.
593 *
594 * kcopy() _must_ save and restore the old fault handler since it is
595 * called by uiomove(), which may be in the path of servicing a non-fatal
596 * page fault.
597 */
598int
599kcopy(const void *src, void *dst, size_t len)
600{
601	struct faultbuf env, *oldfault;
602
603	oldfault = curpcb->pcb_onfault;
604	if (setfault(&env)) {
605		curpcb->pcb_onfault = oldfault;
606		return EFAULT;
607	}
608
609	memcpy(dst, src, len);
610
611	curpcb->pcb_onfault = oldfault;
612	return 0;
613}
614
615int
616badaddr(void *addr, size_t size)
617{
618
619	return badaddr_read(addr, size, NULL);
620}
621
622int
623badaddr_read(void *addr, size_t size, int *rptr)
624{
625	struct faultbuf env;
626	int x;
627
628	/* Get rid of any stale machine checks that have been waiting.  */
629	__asm volatile ("sync; isync");
630
631	if (setfault(&env)) {
632		curpcb->pcb_onfault = 0;
633		__asm volatile ("sync");
634		return 1;
635	}
636
637	__asm volatile ("sync");
638
639	switch (size) {
640	case 1:
641		x = *(volatile int8_t *)addr;
642		break;
643	case 2:
644		x = *(volatile int16_t *)addr;
645		break;
646	case 4:
647		x = *(volatile int32_t *)addr;
648		break;
649	default:
650		panic("badaddr: invalid size (%d)", size);
651	}
652
653	/* Make sure we took the machine check, if we caused one. */
654	__asm volatile ("sync; isync");
655
656	curpcb->pcb_onfault = 0;
657	__asm volatile ("sync");	/* To be sure. */
658
659	/* Use the value to avoid reorder. */
660	if (rptr)
661		*rptr = x;
662
663	return 0;
664}
665
666/*
667 * For now, this only deals with the particular unaligned access case
668 * that gcc tends to generate.  Eventually it should handle all of the
669 * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
670 */
671
672static int
673fix_unaligned(struct lwp *l, struct trapframe *frame)
674{
675
676	return -1;
677}
678
679/*
680 * Start a new LWP
681 */
682void
683startlwp(arg)
684	void *arg;
685{
686	int err;
687	ucontext_t *uc = arg;
688	struct lwp *l = curlwp;
689
690	err = cpu_setmcontext(l, &uc->uc_mcontext, uc->uc_flags);
691#if DIAGNOSTIC
692	if (err) {
693		printf("Error %d from cpu_setmcontext.", err);
694	}
695#endif
696	pool_put(&lwp_uc_pool, uc);
697
698	upcallret(l);
699}
700
701/*
702 * XXX This is a terrible name.
703 */
704void
705upcallret(l)
706	struct lwp *l;
707{
708
709	/* Invoke MI userret code */
710	mi_userret(l);
711
712	curcpu()->ci_schedstate.spc_curpriority = l->l_priority = l->l_usrpri;
713}
714