trap.c revision 249129
1/*-
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 *    derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $NetBSD: trap.c,v 1.58 2002/03/04 04:07:35 dbj Exp $
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/powerpc/aim/trap.c 249129 2013-04-05 04:53:43Z jhibbits $");
36
37#include "opt_hwpmc_hooks.h"
38#include "opt_kdtrace.h"
39
40#include <sys/param.h>
41#include <sys/kdb.h>
42#include <sys/proc.h>
43#include <sys/ktr.h>
44#include <sys/lock.h>
45#include <sys/mutex.h>
46#include <sys/pioctl.h>
47#include <sys/ptrace.h>
48#include <sys/reboot.h>
49#include <sys/syscall.h>
50#include <sys/sysent.h>
51#include <sys/systm.h>
52#include <sys/uio.h>
53#include <sys/signalvar.h>
54#include <sys/vmmeter.h>
55#ifdef HWPMC_HOOKS
56#include <sys/pmckern.h>
57#endif
58
59#include <security/audit/audit.h>
60
61#include <vm/vm.h>
62#include <vm/pmap.h>
63#include <vm/vm_extern.h>
64#include <vm/vm_param.h>
65#include <vm/vm_kern.h>
66#include <vm/vm_map.h>
67#include <vm/vm_page.h>
68
69#include <machine/_inttypes.h>
70#include <machine/altivec.h>
71#include <machine/cpu.h>
72#include <machine/db_machdep.h>
73#include <machine/fpu.h>
74#include <machine/frame.h>
75#include <machine/pcb.h>
76#include <machine/pmap.h>
77#include <machine/psl.h>
78#include <machine/trap.h>
79#include <machine/spr.h>
80#include <machine/sr.h>
81
82static void	trap_fatal(struct trapframe *frame);
83static void	printtrap(u_int vector, struct trapframe *frame, int isfatal,
84		    int user);
85static int	trap_pfault(struct trapframe *frame, int user);
86static int	fix_unaligned(struct thread *td, struct trapframe *frame);
87static int	ppc_instr_emulate(struct trapframe *frame);
88static int	handle_onfault(struct trapframe *frame);
89static void	syscall(struct trapframe *frame);
90
91#ifdef __powerpc64__
92       void	handle_kernel_slb_spill(int, register_t, register_t);
93static int	handle_user_slb_spill(pmap_t pm, vm_offset_t addr);
94extern int	n_slbs;
95#endif
96
97int	setfault(faultbuf);		/* defined in locore.S */
98
99/* Why are these not defined in a header? */
100int	badaddr(void *, size_t);
101int	badaddr_read(void *, size_t, int *);
102
103struct powerpc_exception {
104	u_int	vector;
105	char	*name;
106};
107
108#ifdef KDTRACE_HOOKS
109#include <sys/dtrace_bsd.h>
110
111/*
112 * This is a hook which is initialised by the dtrace module
113 * to handle traps which might occur during DTrace probe
114 * execution.
115 */
116dtrace_trap_func_t	dtrace_trap_func;
117
118dtrace_doubletrap_func_t	dtrace_doubletrap_func;
119
120/*
121 * This is a hook which is initialised by the systrace module
122 * when it is loaded. This keeps the DTrace syscall provider
123 * implementation opaque.
124 */
125systrace_probe_func_t	systrace_probe_func;
126
127/*
128 * These hooks are necessary for the pid, usdt and fasttrap providers.
129 */
130dtrace_fasttrap_probe_ptr_t	dtrace_fasttrap_probe_ptr;
131dtrace_pid_probe_ptr_t		dtrace_pid_probe_ptr;
132dtrace_return_probe_ptr_t	dtrace_return_probe_ptr;
133int (*dtrace_invop_jump_addr)(struct trapframe *);
134#endif
135
136static struct powerpc_exception powerpc_exceptions[] = {
137	{ 0x0100, "system reset" },
138	{ 0x0200, "machine check" },
139	{ 0x0300, "data storage interrupt" },
140	{ 0x0380, "data segment exception" },
141	{ 0x0400, "instruction storage interrupt" },
142	{ 0x0480, "instruction segment exception" },
143	{ 0x0500, "external interrupt" },
144	{ 0x0600, "alignment" },
145	{ 0x0700, "program" },
146	{ 0x0800, "floating-point unavailable" },
147	{ 0x0900, "decrementer" },
148	{ 0x0c00, "system call" },
149	{ 0x0d00, "trace" },
150	{ 0x0e00, "floating-point assist" },
151	{ 0x0f00, "performance monitoring" },
152	{ 0x0f20, "altivec unavailable" },
153	{ 0x1000, "instruction tlb miss" },
154	{ 0x1100, "data load tlb miss" },
155	{ 0x1200, "data store tlb miss" },
156	{ 0x1300, "instruction breakpoint" },
157	{ 0x1400, "system management" },
158	{ 0x1600, "altivec assist" },
159	{ 0x1700, "thermal management" },
160	{ 0x2000, "run mode/trace" },
161	{ 0x3000, NULL }
162};
163
164static const char *
165trapname(u_int vector)
166{
167	struct	powerpc_exception *pe;
168
169	for (pe = powerpc_exceptions; pe->vector != 0x3000; pe++) {
170		if (pe->vector == vector)
171			return (pe->name);
172	}
173
174	return ("unknown");
175}
176
177void
178trap(struct trapframe *frame)
179{
180	struct thread	*td;
181	struct proc	*p;
182	int		sig, type, user;
183	u_int		ucode;
184	ksiginfo_t	ksi;
185
186	PCPU_INC(cnt.v_trap);
187
188	td = curthread;
189	p = td->td_proc;
190
191	type = ucode = frame->exc;
192	sig = 0;
193	user = frame->srr1 & PSL_PR;
194
195	CTR3(KTR_TRAP, "trap: %s type=%s (%s)", td->td_name,
196	    trapname(type), user ? "user" : "kernel");
197
198#ifdef HWPMC_HOOKS
199	if (type == EXC_PERF && (pmc_intr != NULL)) {
200#ifdef notyet
201	    (*pmc_intr)(PCPU_GET(cpuid), frame);
202	    if (!user)
203		return;
204#endif
205	}
206	else
207#endif
208#ifdef KDTRACE_HOOKS
209	/*
210	 * A trap can occur while DTrace executes a probe. Before
211	 * executing the probe, DTrace blocks re-scheduling and sets
212	 * a flag in it's per-cpu flags to indicate that it doesn't
213	 * want to fault. On returning from the probe, the no-fault
214	 * flag is cleared and finally re-scheduling is enabled.
215	 *
216	 * If the DTrace kernel module has registered a trap handler,
217	 * call it and if it returns non-zero, assume that it has
218	 * handled the trap and modified the trap frame so that this
219	 * function can return normally.
220	 */
221	/*
222	 * XXXDTRACE: add fasttrap and pid  probes handlers here (if ever)
223	 */
224	if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type))
225		return;
226#endif
227
228	if (user) {
229		td->td_pticks = 0;
230		td->td_frame = frame;
231		if (td->td_ucred != p->p_ucred)
232			cred_update_thread(td);
233
234		/* User Mode Traps */
235		switch (type) {
236		case EXC_RUNMODETRC:
237		case EXC_TRC:
238			frame->srr1 &= ~PSL_SE;
239			sig = SIGTRAP;
240			break;
241
242#ifdef __powerpc64__
243		case EXC_ISE:
244		case EXC_DSE:
245			if (handle_user_slb_spill(&p->p_vmspace->vm_pmap,
246			    (type == EXC_ISE) ? frame->srr0 :
247			    frame->cpu.aim.dar) != 0)
248				sig = SIGSEGV;
249			break;
250#endif
251		case EXC_DSI:
252		case EXC_ISI:
253			sig = trap_pfault(frame, 1);
254			break;
255
256		case EXC_SC:
257			syscall(frame);
258			break;
259
260		case EXC_FPU:
261			KASSERT((td->td_pcb->pcb_flags & PCB_FPU) != PCB_FPU,
262			    ("FPU already enabled for thread"));
263			enable_fpu(td);
264			break;
265
266		case EXC_VEC:
267			KASSERT((td->td_pcb->pcb_flags & PCB_VEC) != PCB_VEC,
268			    ("Altivec already enabled for thread"));
269			enable_vec(td);
270			break;
271
272		case EXC_VECAST_G4:
273		case EXC_VECAST_G5:
274			/*
275			 * We get a VPU assist exception for IEEE mode
276			 * vector operations on denormalized floats.
277			 * Emulating this is a giant pain, so for now,
278			 * just switch off IEEE mode and treat them as
279			 * zero.
280			 */
281
282			save_vec(td);
283			td->td_pcb->pcb_vec.vscr |= ALTIVEC_VSCR_NJ;
284			enable_vec(td);
285			break;
286
287		case EXC_ALI:
288			if (fix_unaligned(td, frame) != 0)
289				sig = SIGBUS;
290			else
291				frame->srr0 += 4;
292			break;
293
294		case EXC_PGM:
295			/* Identify the trap reason */
296			if (frame->srr1 & EXC_PGM_TRAP)
297				sig = SIGTRAP;
298			else if (ppc_instr_emulate(frame) == 0)
299				frame->srr0 += 4;
300			else
301				sig = SIGILL;
302			break;
303
304		default:
305			trap_fatal(frame);
306		}
307	} else {
308		/* Kernel Mode Traps */
309
310		KASSERT(cold || td->td_ucred != NULL,
311		    ("kernel trap doesn't have ucred"));
312		switch (type) {
313#ifdef KDTRACE_HOOKS
314		case EXC_PGM:
315			if (frame->srr1 & EXC_PGM_TRAP) {
316				if (*(uintptr_t *)frame->srr0 == 0x7c810808) {
317					if (dtrace_invop_jump_addr != NULL) {
318						dtrace_invop_jump_addr(frame);
319					}
320				}
321			}
322#endif
323#ifdef __powerpc64__
324		case EXC_DSE:
325			if ((frame->cpu.aim.dar & SEGMENT_MASK) == USER_ADDR) {
326				__asm __volatile ("slbmte %0, %1" ::
327					"r"(td->td_pcb->pcb_cpu.aim.usr_vsid),
328					"r"(USER_SLB_SLBE));
329				return;
330			}
331			break;
332#endif
333		case EXC_DSI:
334			if (trap_pfault(frame, 0) == 0)
335 				return;
336			break;
337		case EXC_MCHK:
338			if (handle_onfault(frame))
339 				return;
340			break;
341		default:
342			break;
343		}
344		trap_fatal(frame);
345	}
346
347	if (sig != 0) {
348		if (p->p_sysent->sv_transtrap != NULL)
349			sig = (p->p_sysent->sv_transtrap)(sig, type);
350		ksiginfo_init_trap(&ksi);
351		ksi.ksi_signo = sig;
352		ksi.ksi_code = (int) ucode; /* XXX, not POSIX */
353		/* ksi.ksi_addr = ? */
354		ksi.ksi_trapno = type;
355		trapsignal(td, &ksi);
356	}
357
358	userret(td, frame);
359}
360
361static void
362trap_fatal(struct trapframe *frame)
363{
364
365	printtrap(frame->exc, frame, 1, (frame->srr1 & PSL_PR));
366#ifdef KDB
367	if ((debugger_on_panic || kdb_active) &&
368	    kdb_trap(frame->exc, 0, frame))
369		return;
370#endif
371	panic("%s trap", trapname(frame->exc));
372}
373
374static void
375printtrap(u_int vector, struct trapframe *frame, int isfatal, int user)
376{
377
378	printf("\n");
379	printf("%s %s trap:\n", isfatal ? "fatal" : "handled",
380	    user ? "user" : "kernel");
381	printf("\n");
382	printf("   exception       = 0x%x (%s)\n", vector, trapname(vector));
383	switch (vector) {
384	case EXC_DSE:
385	case EXC_DSI:
386		printf("   virtual address = 0x%" PRIxPTR "\n",
387		    frame->cpu.aim.dar);
388		printf("   dsisr           = 0x%" PRIxPTR "\n",
389		    frame->cpu.aim.dsisr);
390		break;
391	case EXC_ISE:
392	case EXC_ISI:
393		printf("   virtual address = 0x%" PRIxPTR "\n", frame->srr0);
394		break;
395	}
396	printf("   srr0            = 0x%" PRIxPTR "\n", frame->srr0);
397	printf("   srr1            = 0x%" PRIxPTR "\n", frame->srr1);
398	printf("   lr              = 0x%" PRIxPTR "\n", frame->lr);
399	printf("   curthread       = %p\n", curthread);
400	if (curthread != NULL)
401		printf("          pid = %d, comm = %s\n",
402		    curthread->td_proc->p_pid, curthread->td_name);
403	printf("\n");
404}
405
406/*
407 * Handles a fatal fault when we have onfault state to recover.  Returns
408 * non-zero if there was onfault recovery state available.
409 */
410static int
411handle_onfault(struct trapframe *frame)
412{
413	struct		thread *td;
414	faultbuf	*fb;
415
416	td = curthread;
417	fb = td->td_pcb->pcb_onfault;
418	if (fb != NULL) {
419		frame->srr0 = (*fb)[0];
420		frame->fixreg[1] = (*fb)[1];
421		frame->fixreg[2] = (*fb)[2];
422		frame->fixreg[3] = 1;
423		frame->cr = (*fb)[3];
424		bcopy(&(*fb)[4], &frame->fixreg[13],
425		    19 * sizeof(register_t));
426		return (1);
427	}
428	return (0);
429}
430
431int
432cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
433{
434	struct proc *p;
435	struct trapframe *frame;
436	caddr_t	params;
437	size_t argsz;
438	int error, n, i;
439
440	p = td->td_proc;
441	frame = td->td_frame;
442
443	sa->code = frame->fixreg[0];
444	params = (caddr_t)(frame->fixreg + FIRSTARG);
445	n = NARGREG;
446
447	if (sa->code == SYS_syscall) {
448		/*
449		 * code is first argument,
450		 * followed by actual args.
451		 */
452		sa->code = *(register_t *) params;
453		params += sizeof(register_t);
454		n -= 1;
455	} else if (sa->code == SYS___syscall) {
456		/*
457		 * Like syscall, but code is a quad,
458		 * so as to maintain quad alignment
459		 * for the rest of the args.
460		 */
461		if (SV_PROC_FLAG(p, SV_ILP32)) {
462			params += sizeof(register_t);
463			sa->code = *(register_t *) params;
464			params += sizeof(register_t);
465			n -= 2;
466		} else {
467			sa->code = *(register_t *) params;
468			params += sizeof(register_t);
469			n -= 1;
470		}
471	}
472
473 	if (p->p_sysent->sv_mask)
474		sa->code &= p->p_sysent->sv_mask;
475	if (sa->code >= p->p_sysent->sv_size)
476		sa->callp = &p->p_sysent->sv_table[0];
477	else
478		sa->callp = &p->p_sysent->sv_table[sa->code];
479
480	sa->narg = sa->callp->sy_narg;
481
482	if (SV_PROC_FLAG(p, SV_ILP32)) {
483		argsz = sizeof(uint32_t);
484
485		for (i = 0; i < n; i++)
486			sa->args[i] = ((u_register_t *)(params))[i] &
487			    0xffffffff;
488	} else {
489		argsz = sizeof(uint64_t);
490
491		for (i = 0; i < n; i++)
492			sa->args[i] = ((u_register_t *)(params))[i];
493	}
494
495	if (sa->narg > n)
496		error = copyin(MOREARGS(frame->fixreg[1]), sa->args + n,
497			       (sa->narg - n) * argsz);
498	else
499		error = 0;
500
501#ifdef __powerpc64__
502	if (SV_PROC_FLAG(p, SV_ILP32) && sa->narg > n) {
503		/* Expand the size of arguments copied from the stack */
504
505		for (i = sa->narg; i >= n; i--)
506			sa->args[i] = ((uint32_t *)(&sa->args[n]))[i-n];
507	}
508#endif
509
510	if (error == 0) {
511		td->td_retval[0] = 0;
512		td->td_retval[1] = frame->fixreg[FIRSTARG + 1];
513	}
514	return (error);
515}
516
517#include "../../kern/subr_syscall.c"
518
519void
520syscall(struct trapframe *frame)
521{
522	struct thread *td;
523	struct syscall_args sa;
524	int error;
525
526	td = curthread;
527	td->td_frame = frame;
528
529#ifdef __powerpc64__
530	/*
531	 * Speculatively restore last user SLB segment, which we know is
532	 * invalid already, since we are likely to do copyin()/copyout().
533	 */
534	__asm __volatile ("slbmte %0, %1; isync" ::
535            "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE));
536#endif
537
538	error = syscallenter(td, &sa);
539	syscallret(td, error, &sa);
540}
541
542#ifdef __powerpc64__
543/* Handle kernel SLB faults -- runs in real mode, all seat belts off */
544void
545handle_kernel_slb_spill(int type, register_t dar, register_t srr0)
546{
547	struct slb *slbcache;
548	uint64_t slbe, slbv;
549	uint64_t esid, addr;
550	int i;
551
552	addr = (type == EXC_ISE) ? srr0 : dar;
553	slbcache = PCPU_GET(slb);
554	esid = (uintptr_t)addr >> ADDR_SR_SHFT;
555	slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
556
557	/* See if the hardware flushed this somehow (can happen in LPARs) */
558	for (i = 0; i < n_slbs; i++)
559		if (slbcache[i].slbe == (slbe | (uint64_t)i))
560			return;
561
562	/* Not in the map, needs to actually be added */
563	slbv = kernel_va_to_slbv(addr);
564	if (slbcache[USER_SLB_SLOT].slbe == 0) {
565		for (i = 0; i < n_slbs; i++) {
566			if (i == USER_SLB_SLOT)
567				continue;
568			if (!(slbcache[i].slbe & SLBE_VALID))
569				goto fillkernslb;
570		}
571
572		if (i == n_slbs)
573			slbcache[USER_SLB_SLOT].slbe = 1;
574	}
575
576	/* Sacrifice a random SLB entry that is not the user entry */
577	i = mftb() % n_slbs;
578	if (i == USER_SLB_SLOT)
579		i = (i+1) % n_slbs;
580
581fillkernslb:
582	/* Write new entry */
583	slbcache[i].slbv = slbv;
584	slbcache[i].slbe = slbe | (uint64_t)i;
585
586	/* Trap handler will restore from cache on exit */
587}
588
589static int
590handle_user_slb_spill(pmap_t pm, vm_offset_t addr)
591{
592	struct slb *user_entry;
593	uint64_t esid;
594	int i;
595
596	esid = (uintptr_t)addr >> ADDR_SR_SHFT;
597
598	PMAP_LOCK(pm);
599	user_entry = user_va_to_slb_entry(pm, addr);
600
601	if (user_entry == NULL) {
602		/* allocate_vsid auto-spills it */
603		(void)allocate_user_vsid(pm, esid, 0);
604	} else {
605		/*
606		 * Check that another CPU has not already mapped this.
607		 * XXX: Per-thread SLB caches would be better.
608		 */
609		for (i = 0; i < pm->pm_slb_len; i++)
610			if (pm->pm_slb[i] == user_entry)
611				break;
612
613		if (i == pm->pm_slb_len)
614			slb_insert_user(pm, user_entry);
615	}
616	PMAP_UNLOCK(pm);
617
618	return (0);
619}
620#endif
621
622static int
623trap_pfault(struct trapframe *frame, int user)
624{
625	vm_offset_t	eva, va;
626	struct		thread *td;
627	struct		proc *p;
628	vm_map_t	map;
629	vm_prot_t	ftype;
630	int		rv;
631	register_t	user_sr;
632
633	td = curthread;
634	p = td->td_proc;
635	if (frame->exc == EXC_ISI) {
636		eva = frame->srr0;
637		ftype = VM_PROT_EXECUTE;
638		if (frame->srr1 & SRR1_ISI_PFAULT)
639			ftype |= VM_PROT_READ;
640	} else {
641		eva = frame->cpu.aim.dar;
642		if (frame->cpu.aim.dsisr & DSISR_STORE)
643			ftype = VM_PROT_WRITE;
644		else
645			ftype = VM_PROT_READ;
646	}
647
648	if (user) {
649		map = &p->p_vmspace->vm_map;
650	} else {
651		if ((eva >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
652			if (p->p_vmspace == NULL)
653				return (SIGSEGV);
654
655			map = &p->p_vmspace->vm_map;
656
657			user_sr = td->td_pcb->pcb_cpu.aim.usr_segm;
658			eva &= ADDR_PIDX | ADDR_POFF;
659			eva |= user_sr << ADDR_SR_SHFT;
660		} else {
661			map = kernel_map;
662		}
663	}
664	va = trunc_page(eva);
665
666	if (map != kernel_map) {
667		/*
668		 * Keep swapout from messing with us during this
669		 *	critical time.
670		 */
671		PROC_LOCK(p);
672		++p->p_lock;
673		PROC_UNLOCK(p);
674
675		/* Fault in the user page: */
676		rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
677
678		PROC_LOCK(p);
679		--p->p_lock;
680		PROC_UNLOCK(p);
681		/*
682		 * XXXDTRACE: add dtrace_doubletrap_func here?
683		 */
684	} else {
685		/*
686		 * Don't have to worry about process locking or stacks in the
687		 * kernel.
688		 */
689		rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
690	}
691
692	if (rv == KERN_SUCCESS)
693		return (0);
694
695	if (!user && handle_onfault(frame))
696		return (0);
697
698	return (SIGSEGV);
699}
700
701int
702badaddr(void *addr, size_t size)
703{
704	return (badaddr_read(addr, size, NULL));
705}
706
707int
708badaddr_read(void *addr, size_t size, int *rptr)
709{
710	struct thread	*td;
711	faultbuf	env;
712	int		x;
713
714	/* Get rid of any stale machine checks that have been waiting.  */
715	__asm __volatile ("sync; isync");
716
717	td = curthread;
718
719	if (setfault(env)) {
720		td->td_pcb->pcb_onfault = 0;
721		__asm __volatile ("sync");
722		return 1;
723	}
724
725	__asm __volatile ("sync");
726
727	switch (size) {
728	case 1:
729		x = *(volatile int8_t *)addr;
730		break;
731	case 2:
732		x = *(volatile int16_t *)addr;
733		break;
734	case 4:
735		x = *(volatile int32_t *)addr;
736		break;
737	default:
738		panic("badaddr: invalid size (%zd)", size);
739	}
740
741	/* Make sure we took the machine check, if we caused one. */
742	__asm __volatile ("sync; isync");
743
744	td->td_pcb->pcb_onfault = 0;
745	__asm __volatile ("sync");	/* To be sure. */
746
747	/* Use the value to avoid reorder. */
748	if (rptr)
749		*rptr = x;
750
751	return (0);
752}
753
754/*
755 * For now, this only deals with the particular unaligned access case
756 * that gcc tends to generate.  Eventually it should handle all of the
757 * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
758 */
759
760static int
761fix_unaligned(struct thread *td, struct trapframe *frame)
762{
763	struct thread	*fputhread;
764	int		indicator, reg;
765	double		*fpr;
766
767	indicator = EXC_ALI_OPCODE_INDICATOR(frame->cpu.aim.dsisr);
768
769	switch (indicator) {
770	case EXC_ALI_LFD:
771	case EXC_ALI_STFD:
772		reg = EXC_ALI_RST(frame->cpu.aim.dsisr);
773		fpr = &td->td_pcb->pcb_fpu.fpr[reg];
774		fputhread = PCPU_GET(fputhread);
775
776		/* Juggle the FPU to ensure that we've initialized
777		 * the FPRs, and that their current state is in
778		 * the PCB.
779		 */
780		if (fputhread != td) {
781			if (fputhread)
782				save_fpu(fputhread);
783			enable_fpu(td);
784		}
785		save_fpu(td);
786
787		if (indicator == EXC_ALI_LFD) {
788			if (copyin((void *)frame->cpu.aim.dar, fpr,
789			    sizeof(double)) != 0)
790				return -1;
791			enable_fpu(td);
792		} else {
793			if (copyout(fpr, (void *)frame->cpu.aim.dar,
794			    sizeof(double)) != 0)
795				return -1;
796		}
797		return 0;
798		break;
799	}
800
801	return -1;
802}
803
804static int
805ppc_instr_emulate(struct trapframe *frame)
806{
807	uint32_t instr;
808	int reg;
809
810	instr = fuword32((void *)frame->srr0);
811
812	if ((instr & 0xfc1fffff) == 0x7c1f42a6) {	/* mfpvr */
813		reg = (instr & ~0xfc1fffff) >> 21;
814		frame->fixreg[reg] = mfpvr();
815		return (0);
816	}
817
818	return (-1);
819}
820
821