trap.c revision 1.77
1/*	$NetBSD: trap.c,v 1.77 2020/02/21 14:49:57 rin Exp $	*/
2
3/*
4 * Copyright 2001 Wasabi Systems, Inc.
5 * All rights reserved.
6 *
7 * Written by Eduardo Horvath and Simon Burge for Wasabi Systems, Inc.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 *    notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 *    notice, this list of conditions and the following disclaimer in the
16 *    documentation and/or other materials provided with the distribution.
17 * 3. All advertising materials mentioning features or use of this software
18 *    must display the following acknowledgement:
19 *      This product includes software developed for the NetBSD Project by
20 *      Wasabi Systems, Inc.
21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22 *    or promote products derived from this software without specific prior
23 *    written permission.
24 *
25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38/*
39 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
40 * Copyright (C) 1995, 1996 TooLs GmbH.
41 * All rights reserved.
42 *
43 * Redistribution and use in source and binary forms, with or without
44 * modification, are permitted provided that the following conditions
45 * are met:
46 * 1. Redistributions of source code must retain the above copyright
47 *    notice, this list of conditions and the following disclaimer.
48 * 2. Redistributions in binary form must reproduce the above copyright
49 *    notice, this list of conditions and the following disclaimer in the
50 *    documentation and/or other materials provided with the distribution.
51 * 3. All advertising materials mentioning features or use of this software
52 *    must display the following acknowledgement:
53 *	This product includes software developed by TooLs GmbH.
54 * 4. The name of TooLs GmbH may not be used to endorse or promote products
55 *    derived from this software without specific prior written permission.
56 *
57 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
58 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
59 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
60 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
61 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
62 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
63 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
64 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
65 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
66 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67 */
68
69#include <sys/cdefs.h>
70__KERNEL_RCSID(0, "$NetBSD: trap.c,v 1.77 2020/02/21 14:49:57 rin Exp $");
71
72#include "opt_altivec.h"
73#include "opt_ddb.h"
74#include "opt_kgdb.h"
75
76#define	__UFETCHSTORE_PRIVATE
77
78#include <sys/param.h>
79#include <sys/cpu.h>
80#include <sys/kauth.h>
81#include <sys/proc.h>
82#include <sys/reboot.h>
83#include <sys/syscall.h>
84#include <sys/systm.h>
85
86#if defined(KGDB)
87#include <sys/kgdb.h>
88#endif
89
90#include <uvm/uvm_extern.h>
91
92#include <dev/cons.h>
93
94#include <machine/fpu.h>
95#include <machine/frame.h>
96#include <machine/pcb.h>
97#include <machine/psl.h>
98#include <machine/trap.h>
99
100#include <powerpc/db_machdep.h>
101#include <powerpc/spr.h>
102#include <powerpc/userret.h>
103
104#include <powerpc/ibm4xx/cpu.h>
105#include <powerpc/ibm4xx/pmap.h>
106#include <powerpc/ibm4xx/spr.h>
107#include <powerpc/ibm4xx/tlb.h>
108
109#include <powerpc/fpu/fpu_extern.h>
110
111/* These definitions should probably be somewhere else			XXX */
112#define	FIRSTARG	3		/* first argument is in reg 3 */
113#define	NARGREG		8		/* 8 args are in registers */
114#define	MOREARGS(sp)	((void *)((int)(sp) + 8)) /* more args go here */
115
116static int fix_unaligned(struct lwp *l, struct trapframe *tf);
117
118void trap(struct trapframe *);	/* Called from locore / trap_subr */
119#if 0
120/* Not currently used nor exposed externally in any header file */
121int badaddr(void *, size_t);
122int badaddr_read(void *, size_t, int *);
123#endif
124int ctx_setup(int, int);
125
126#ifdef DEBUG
127#define TDB_ALL	0x1
128int trapdebug = /* TDB_ALL */ 0;
129#define	DBPRINTF(x, y)	if (trapdebug & (x)) printf y
130#else
131#define DBPRINTF(x, y)
132#endif
133
134void
135trap(struct trapframe *tf)
136{
137	struct lwp *l = curlwp;
138	struct proc *p = l->l_proc;
139	struct pcb *pcb;
140	int type = tf->tf_exc;
141	int ftype, rv;
142	ksiginfo_t ksi;
143
144	KASSERT(l->l_stat == LSONPROC);
145
146	if (tf->tf_srr1 & PSL_PR) {
147		LWP_CACHE_CREDS(l, p);
148		type |= EXC_USER;
149	}
150
151	ftype = VM_PROT_READ;
152
153	DBPRINTF(TDB_ALL, ("trap(%x) at %lx from frame %p &frame %p\n",
154	    type, tf->tf_srr0, tf, &tf));
155
156	switch (type) {
157	case EXC_DEBUG|EXC_USER:
158		{
159			int srr2, srr3;
160
161			__asm volatile("mfspr %0,0x3f0" :
162			    "=r" (rv), "=r" (srr2), "=r" (srr3) :);
163			printf("debug reg is %x srr2 %x srr3 %x\n", rv, srr2,
164			    srr3);
165			/* XXX fall through or break here?! */
166		}
167		/*
168		 * DEBUG intr -- probably single-step.
169		 */
170	case EXC_TRC|EXC_USER:
171		tf->tf_srr1 &= ~PSL_SE;
172		KSI_INIT_TRAP(&ksi);
173		ksi.ksi_signo = SIGTRAP;
174		ksi.ksi_trap = EXC_TRC;
175		ksi.ksi_addr = (void *)tf->tf_srr0;
176		trapsignal(l, &ksi);
177		break;
178
179	case EXC_DSI:
180		/* FALLTHROUGH */
181	case EXC_DTMISS:
182		{
183			struct vm_map *map;
184			vaddr_t va;
185			struct faultbuf *fb = NULL;
186
187			va = tf->tf_dear;
188			if (tf->tf_pid == KERNEL_PID) {
189				map = kernel_map;
190			} else {
191				map = &p->p_vmspace->vm_map;
192			}
193
194			if (tf->tf_esr & (ESR_DST|ESR_DIZ))
195				ftype = VM_PROT_WRITE;
196
197			DBPRINTF(TDB_ALL,
198			    ("trap(EXC_DSI) at %lx %s fault on %p esr %x\n",
199			    tf->tf_srr0,
200			    (ftype & VM_PROT_WRITE) ? "write" : "read",
201			    (void *)va, tf->tf_esr));
202
203			pcb = lwp_getpcb(l);
204			fb = pcb->pcb_onfault;
205			pcb->pcb_onfault = NULL;
206			rv = uvm_fault(map, trunc_page(va), ftype);
207			pcb->pcb_onfault = fb;
208			if (rv == 0)
209				return;
210			if (fb != NULL) {
211				tf->tf_pid = KERNEL_PID;
212				tf->tf_srr0 = fb->fb_pc;
213				tf->tf_srr1 |= PSL_IR; /* Re-enable IMMU */
214				tf->tf_cr = fb->fb_cr;
215				tf->tf_fixreg[1] = fb->fb_sp;
216				tf->tf_fixreg[2] = fb->fb_r2;
217				tf->tf_fixreg[3] = 1; /* Return TRUE */
218				memcpy(&tf->tf_fixreg[13], fb->fb_fixreg,
219				    sizeof(fb->fb_fixreg));
220				return;
221			}
222		}
223		goto brain_damage;
224
225	case EXC_DSI|EXC_USER:
226		/* FALLTHROUGH */
227	case EXC_DTMISS|EXC_USER:
228		if (tf->tf_esr & (ESR_DST|ESR_DIZ))
229			ftype = VM_PROT_WRITE;
230
231		DBPRINTF(TDB_ALL,
232		    ("trap(EXC_DSI|EXC_USER) at %lx %s fault on %lx %x\n",
233		    tf->tf_srr0, (ftype & VM_PROT_WRITE) ? "write" : "read",
234		    tf->tf_dear, tf->tf_esr));
235		KASSERT(l == curlwp && (l->l_stat == LSONPROC));
236//		KASSERT(curpcb->pcb_onfault == NULL);
237		rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(tf->tf_dear),
238		    ftype);
239		if (rv == 0) {
240			break;
241		}
242		KSI_INIT_TRAP(&ksi);
243		ksi.ksi_signo = SIGSEGV;
244		ksi.ksi_trap = EXC_DSI;
245		ksi.ksi_addr = (void *)tf->tf_dear;
246		if (rv == ENOMEM) {
247			printf("UVM: pid %d (%s) lid %d, uid %d killed: "
248			    "out of swap\n",
249			    p->p_pid, p->p_comm, l->l_lid,
250			    l->l_cred ?
251			    kauth_cred_geteuid(l->l_cred) : -1);
252			ksi.ksi_signo = SIGKILL;
253		}
254		trapsignal(l, &ksi);
255		break;
256
257	case EXC_ITMISS|EXC_USER:
258	case EXC_ISI|EXC_USER:
259		ftype = VM_PROT_EXECUTE;
260		DBPRINTF(TDB_ALL,
261		    ("trap(EXC_ISI|EXC_USER) at %lx execute fault tf %p\n",
262		    tf->tf_srr0, tf));
263//		KASSERT(curpcb->pcb_onfault == NULL);
264		rv = uvm_fault(&p->p_vmspace->vm_map, trunc_page(tf->tf_srr0),
265		    ftype);
266		if (rv == 0) {
267			break;
268		}
269		KSI_INIT_TRAP(&ksi);
270		ksi.ksi_signo = SIGSEGV;
271		ksi.ksi_trap = EXC_ISI;
272		ksi.ksi_addr = (void *)tf->tf_srr0;
273		ksi.ksi_code = (rv == EACCES ? SEGV_ACCERR : SEGV_MAPERR);
274		trapsignal(l, &ksi);
275		break;
276
277	case EXC_AST|EXC_USER:
278		cpu_ast(l, curcpu());
279		break;
280
281	case EXC_ALI|EXC_USER:
282		if (fix_unaligned(l, tf) != 0) {
283			KSI_INIT_TRAP(&ksi);
284			ksi.ksi_signo = SIGBUS;
285			ksi.ksi_trap = EXC_ALI;
286			ksi.ksi_addr = (void *)tf->tf_dear;
287			trapsignal(l, &ksi);
288		} else
289			tf->tf_srr0 += 4;
290		break;
291
292	case EXC_PGM|EXC_USER:
293		/*
294		 * Illegal insn:
295		 *
296		 * let's try to see if its FPU and can be emulated.
297		 */
298		curcpu()->ci_data.cpu_ntrap++;
299		pcb = lwp_getpcb(l);
300
301		if (__predict_false(!fpu_used_p(l))) {
302			memset(&pcb->pcb_fpu, 0, sizeof(pcb->pcb_fpu));
303			fpu_mark_used(l);
304		}
305
306		if (fpu_emulate(tf, &pcb->pcb_fpu, &ksi)) {
307			if (ksi.ksi_signo == 0)	/* was emulated */
308				break;
309		} else {
310			ksi.ksi_signo = SIGILL;
311			ksi.ksi_code = ILL_ILLOPC;
312			ksi.ksi_trap = EXC_PGM;
313			ksi.ksi_addr = (void *)tf->tf_srr0;
314		}
315
316		trapsignal(l, &ksi);
317		break;
318
319	case EXC_MCHK:
320		{
321			struct faultbuf *fb;
322
323			pcb = lwp_getpcb(l);
324			if ((fb = pcb->pcb_onfault) != NULL) {
325				tf->tf_pid = KERNEL_PID;
326				tf->tf_srr0 = fb->fb_pc;
327				tf->tf_srr1 |= PSL_IR; /* Re-enable IMMU */
328				tf->tf_fixreg[1] = fb->fb_sp;
329				tf->tf_fixreg[2] = fb->fb_r2;
330				tf->tf_fixreg[3] = 1; /* Return TRUE */
331				tf->tf_cr = fb->fb_cr;
332				memcpy(&tf->tf_fixreg[13], fb->fb_fixreg,
333				    sizeof(fb->fb_fixreg));
334				return;
335			}
336		}
337		goto brain_damage;
338
339	default:
340brain_damage:
341		printf("trap type 0x%x at 0x%lx\n", type, tf->tf_srr0);
342#if defined(DDB) || defined(KGDB)
343		if (kdb_trap(type, tf))
344			return;
345#endif
346#ifdef TRAP_PANICWAIT
347		printf("Press a key to panic.\n");
348		cngetc();
349#endif
350		panic("trap");
351	}
352
353	/* Invoke powerpc userret code */
354	userret(l, tf);
355}
356
357int
358ctx_setup(int ctx, int srr1)
359{
360	volatile struct pmap *pm;
361
362	/* Update PID if we're returning to user mode. */
363	if (srr1 & PSL_PR) {
364		pm = curproc->p_vmspace->vm_map.pmap;
365		if (!pm->pm_ctx) {
366			ctx_alloc(__UNVOLATILE(pm));
367		}
368		ctx = pm->pm_ctx;
369		if (srr1 & PSL_SE) {
370			int dbreg, mask = 0x48000000;
371				/*
372				 * Set the Internal Debug and
373				 * Instruction Completion bits of
374				 * the DBCR0 register.
375				 *
376				 * XXX this is also used by jtag debuggers...
377				 */
378			__asm volatile("mfspr %0,0x3f2;"
379			    "or %0,%0,%1;"
380			    "mtspr 0x3f2,%0;" :
381			    "=&r" (dbreg) : "r" (mask));
382		}
383	}
384	else if (!ctx) {
385		ctx = KERNEL_PID;
386	}
387	return (ctx);
388}
389
390/*
391 * Used by copyin()/copyout()
392 */
393extern vaddr_t vmaprange(struct proc *, vaddr_t, vsize_t, int);
394extern void vunmaprange(vaddr_t, vsize_t);
395static int bigcopyin(const void *, void *, size_t );
396static int bigcopyout(const void *, void *, size_t );
397
398int
399copyin(const void *udaddr, void *kaddr, size_t len)
400{
401	struct pmap *pm = curproc->p_vmspace->vm_map.pmap;
402	int rv, msr, pid, tmp, ctx, count = 0;
403	struct faultbuf env;
404
405	/* For bigger buffers use the faster copy */
406	if (len > 1024)
407		return (bigcopyin(udaddr, kaddr, len));
408
409	if ((rv = setfault(&env))) {
410		curpcb->pcb_onfault = NULL;
411		return rv;
412	}
413
414	if (!(ctx = pm->pm_ctx)) {
415		/* No context -- assign it one */
416		ctx_alloc(pm);
417		ctx = pm->pm_ctx;
418	}
419
420	__asm volatile(
421		"   mfmsr %[msr];"		/* Save MSR */
422		"   li %[pid],0x20;"
423		"   andc %[pid],%[msr],%[pid]; mtmsr %[pid];" /* Disable IMMU */
424		"   mfpid %[pid];"		/* Save old PID */
425		"   sync; isync;"
426
427		"   srwi. %[count],%[len],0x2;"	/* How many words? */
428		"   beq- 2f;"			/* No words. Go do bytes */
429		"   mtctr %[count];"
430		"1: mtpid %[ctx]; sync;"
431#ifdef PPC_IBM403
432		"   lswi %[tmp],%[udaddr],4;"	/* Load user word */
433#else
434		"   lwz %[tmp],0(%[udaddr]);"
435#endif
436		"   addi %[udaddr],%[udaddr],0x4;" /* next udaddr word */
437		"   sync; isync;"
438		"   mtpid %[pid]; sync;"
439#ifdef PPC_IBM403
440		"   stswi %[tmp],%[kaddr],4;"	/* Store kernel word */
441#else
442		"   stw %[tmp],0(%[kaddr]);"
443#endif
444		"   dcbst 0,%[kaddr];"		/* flush cache */
445		"   addi %[kaddr],%[kaddr],0x4;" /* next udaddr word */
446		"   sync; isync;"
447		"   bdnz 1b;"			/* repeat */
448
449		"2: andi. %[count],%[len],0x3;"	/* How many remaining bytes? */
450		"   addi %[count],%[count],0x1;"
451		"   mtctr %[count];"
452		"3: bdz 10f;"			/* while count */
453		"   mtpid %[ctx]; sync;"
454		"   lbz %[tmp],0(%[udaddr]);"	/* Load user byte */
455		"   addi %[udaddr],%[udaddr],0x1;" /* next udaddr byte */
456		"   sync; isync;"
457		"   mtpid %[pid]; sync;"
458		"   stb %[tmp],0(%[kaddr]);"	/* Store kernel byte */
459		"   dcbst 0,%[kaddr];"		/* flush cache */
460		"   addi %[kaddr],%[kaddr],0x1;"
461		"   sync; isync;"
462		"   b 3b;"
463		"10:mtpid %[pid]; mtmsr %[msr]; sync; isync;"
464						/* Restore PID and MSR */
465		: [msr] "=&r" (msr), [pid] "=&r" (pid), [tmp] "=&r" (tmp)
466		: [udaddr] "b" (udaddr), [ctx] "b" (ctx), [kaddr] "b" (kaddr),
467		  [len] "b" (len), [count] "b" (count));
468
469	curpcb->pcb_onfault = NULL;
470	return 0;
471}
472
473static int
474bigcopyin(const void *udaddr, void *kaddr, size_t len)
475{
476	const char *up;
477	char *kp = kaddr;
478	struct lwp *l = curlwp;
479	struct proc *p;
480	struct faultbuf env;
481	int error;
482
483	p = l->l_proc;
484
485	/*
486	 * Stolen from physio():
487	 */
488	error = uvm_vslock(p->p_vmspace, __UNCONST(udaddr), len, VM_PROT_READ);
489	if (error) {
490		return error;
491	}
492	up = (char *)vmaprange(p, (vaddr_t)udaddr, len, VM_PROT_READ);
493
494	if ((error = setfault(&env)) == 0) {
495		memcpy(kp, up, len);
496	}
497
498	curpcb->pcb_onfault = NULL;
499	vunmaprange((vaddr_t)up, len);
500	uvm_vsunlock(p->p_vmspace, __UNCONST(udaddr), len);
501
502	return error;
503}
504
505int
506copyout(const void *kaddr, void *udaddr, size_t len)
507{
508	struct pmap *pm = curproc->p_vmspace->vm_map.pmap;
509	int rv, msr, pid, tmp, ctx, count = 0;
510	struct faultbuf env;
511
512	/* For big copies use more efficient routine */
513	if (len > 1024)
514		return (bigcopyout(kaddr, udaddr, len));
515
516	if ((rv = setfault(&env))) {
517		curpcb->pcb_onfault = NULL;
518		return rv;
519	}
520
521	if (!(ctx = pm->pm_ctx)) {
522		/* No context -- assign it one */
523		ctx_alloc(pm);
524		ctx = pm->pm_ctx;
525	}
526
527	__asm volatile(
528		"   mfmsr %[msr];"		/* Save MSR */
529		"   li %[pid],0x20;"
530		"   andc %[pid],%[msr],%[pid]; mtmsr %[pid];" /* Disable IMMU */
531		"   mfpid %[pid];"		/* Save old PID */
532		"   sync; isync;"
533
534		"   srwi. %[count],%[len],0x2;"	/* How many words? */
535		"   beq- 2f;"			/* No words. Go do bytes */
536		"   mtctr %[count];"
537		"1: mtpid %[pid]; sync;"
538#ifdef PPC_IBM403
539		"   lswi %[tmp],%[kaddr],4;"	/* Load kernel word */
540#else
541		"   lwz %[tmp],0(%[kaddr]);"
542#endif
543		"   addi %[kaddr],%[kaddr],0x4;" /* next kaddr word */
544		"   sync; isync;"
545		"   mtpid %[ctx]; sync;"
546#ifdef PPC_IBM403
547		"   stswi %[tmp],%[udaddr],4;"	/* Store user word */
548#else
549		"   stw %[tmp],0(%[udaddr]);"
550#endif
551		"   dcbst 0,%[udaddr];"		/* flush cache */
552		"   addi %[udaddr],%[udaddr],0x4;" /* next udaddr word */
553		"   sync; isync;"
554		"   bdnz 1b;"			/* repeat */
555
556		"2: andi. %[count],%[len],0x3;"	/* How many remaining bytes? */
557		"   addi %[count],%[count],0x1;"
558		"   mtctr %[count];"
559		"3: bdz  10f;"			/* while count */
560		"   mtpid %[pid]; sync;"
561		"   lbz %[tmp],0(%[kaddr]);"	/* Load kernel byte */
562		"   addi %[kaddr],%[kaddr],0x1;" /* next kaddr byte */
563		"   sync; isync;"
564		"   mtpid %[ctx]; sync;"
565		"   stb %[tmp],0(%[udaddr]);"	/* Store user byte */
566		"   dcbst 0,%[udaddr];"		/* flush cache */
567		"   addi %[udaddr],%[udaddr],0x1;"
568		"   sync; isync;"
569		"   b 3b;"
570		"10:mtpid %[pid]; mtmsr %[msr]; sync; isync;"
571						/* Restore PID and MSR */
572		: [msr] "=&r" (msr), [pid] "=&r" (pid), [tmp] "=&r" (tmp)
573		: [udaddr] "b" (udaddr), [ctx] "b" (ctx), [kaddr] "b" (kaddr),
574		  [len] "b" (len), [count] "b" (count));
575
576	curpcb->pcb_onfault = NULL;
577	return 0;
578}
579
580static int
581bigcopyout(const void *kaddr, void *udaddr, size_t len)
582{
583	char *up;
584	const char *kp = (const char *)kaddr;
585	struct lwp *l = curlwp;
586	struct proc *p;
587	struct faultbuf env;
588	int error;
589
590	p = l->l_proc;
591
592	/*
593	 * Stolen from physio():
594	 */
595	error = uvm_vslock(p->p_vmspace, udaddr, len, VM_PROT_WRITE);
596	if (error) {
597		return error;
598	}
599	up = (char *)vmaprange(p, (vaddr_t)udaddr, len,
600	    VM_PROT_READ | VM_PROT_WRITE);
601
602	if ((error = setfault(&env)) == 0) {
603		memcpy(up, kp, len);
604	}
605
606	curpcb->pcb_onfault = NULL;
607	vunmaprange((vaddr_t)up, len);
608	uvm_vsunlock(p->p_vmspace, udaddr, len);
609
610	return error;
611}
612
613/*
614 * kcopy(const void *src, void *dst, size_t len);
615 *
616 * Copy len bytes from src to dst, aborting if we encounter a fatal
617 * page fault.
618 *
619 * kcopy() _must_ save and restore the old fault handler since it is
620 * called by uiomove(), which may be in the path of servicing a non-fatal
621 * page fault.
622 */
623int
624kcopy(const void *src, void *dst, size_t len)
625{
626	struct faultbuf env, *oldfault;
627	int rv;
628
629	oldfault = curpcb->pcb_onfault;
630	if ((rv = setfault(&env))) {
631		curpcb->pcb_onfault = oldfault;
632		return rv;
633	}
634
635	memcpy(dst, src, len);
636
637	curpcb->pcb_onfault = oldfault;
638	return 0;
639}
640
641#if 0
642int
643badaddr(void *addr, size_t size)
644{
645
646	return badaddr_read(addr, size, NULL);
647}
648
649int
650badaddr_read(void *addr, size_t size, int *rptr)
651{
652	struct faultbuf env;
653	int x;
654
655	/* Get rid of any stale machine checks that have been waiting.  */
656	__asm volatile ("sync; isync");
657
658	if (setfault(&env)) {
659		curpcb->pcb_onfault = NULL;
660		__asm volatile ("sync");
661		return 1;
662	}
663
664	__asm volatile ("sync");
665
666	switch (size) {
667	case 1:
668		x = *(volatile int8_t *)addr;
669		break;
670	case 2:
671		x = *(volatile int16_t *)addr;
672		break;
673	case 4:
674		x = *(volatile int32_t *)addr;
675		break;
676	default:
677		panic("badaddr: invalid size (%d)", size);
678	}
679
680	/* Make sure we took the machine check, if we caused one. */
681	__asm volatile ("sync; isync");
682
683	curpcb->pcb_onfault = NULL;
684	__asm volatile ("sync");	/* To be sure. */
685
686	/* Use the value to avoid reorder. */
687	if (rptr)
688		*rptr = x;
689
690	return 0;
691}
692#endif
693
694/*
695 * For now, this only deals with the particular unaligned access case
696 * that gcc tends to generate.  Eventually it should handle all of the
697 * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
698 */
699
700static int
701fix_unaligned(struct lwp *l, struct trapframe *tf)
702{
703
704	return -1;
705}
706
707/*
708 * XXX Extremely lame implementations of _ufetch_* / _ustore_*.  IBM 4xx
709 * experts should make versions that are good.
710 */
711
712#define UFETCH(sz)							\
713int									\
714_ufetch_ ## sz(const uint ## sz ## _t *uaddr, uint ## sz ## _t *valp)	\
715{									\
716	return copyin(uaddr, valp, sizeof(*valp));			\
717}
718
719UFETCH(8)
720UFETCH(16)
721UFETCH(32)
722
723#define USTORE(sz)							\
724int									\
725_ustore_ ## sz(uint ## sz ## _t *uaddr, uint ## sz ## _t val)		\
726{									\
727	return copyout(&val, uaddr, sizeof(val));			\
728}
729
730USTORE(8)
731USTORE(16)
732USTORE(32)
733