trap.c revision 260667
1/*-
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 *    derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $NetBSD: trap.c,v 1.58 2002/03/04 04:07:35 dbj Exp $
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: stable/10/sys/powerpc/aim/trap.c 260667 2014-01-15 04:16:45Z jhibbits $");
36
37#include "opt_kdtrace.h"
38
39#include <sys/param.h>
40#include <sys/kdb.h>
41#include <sys/proc.h>
42#include <sys/ktr.h>
43#include <sys/lock.h>
44#include <sys/mutex.h>
45#include <sys/pioctl.h>
46#include <sys/ptrace.h>
47#include <sys/reboot.h>
48#include <sys/syscall.h>
49#include <sys/sysent.h>
50#include <sys/systm.h>
51#include <sys/uio.h>
52#include <sys/signalvar.h>
53#include <sys/vmmeter.h>
54
55#include <security/audit/audit.h>
56
57#include <vm/vm.h>
58#include <vm/pmap.h>
59#include <vm/vm_extern.h>
60#include <vm/vm_param.h>
61#include <vm/vm_kern.h>
62#include <vm/vm_map.h>
63#include <vm/vm_page.h>
64
65#include <machine/_inttypes.h>
66#include <machine/altivec.h>
67#include <machine/cpu.h>
68#include <machine/db_machdep.h>
69#include <machine/fpu.h>
70#include <machine/frame.h>
71#include <machine/pcb.h>
72#include <machine/pmap.h>
73#include <machine/psl.h>
74#include <machine/trap.h>
75#include <machine/spr.h>
76#include <machine/sr.h>
77
78static void	trap_fatal(struct trapframe *frame);
79static void	printtrap(u_int vector, struct trapframe *frame, int isfatal,
80		    int user);
81static int	trap_pfault(struct trapframe *frame, int user);
82static int	fix_unaligned(struct thread *td, struct trapframe *frame);
83static int	ppc_instr_emulate(struct trapframe *frame);
84static int	handle_onfault(struct trapframe *frame);
85static void	syscall(struct trapframe *frame);
86
87#ifdef __powerpc64__
88       void	handle_kernel_slb_spill(int, register_t, register_t);
89static int	handle_user_slb_spill(pmap_t pm, vm_offset_t addr);
90extern int	n_slbs;
91#endif
92
93int	setfault(faultbuf);		/* defined in locore.S */
94
95/* Why are these not defined in a header? */
96int	badaddr(void *, size_t);
97int	badaddr_read(void *, size_t, int *);
98
99struct powerpc_exception {
100	u_int	vector;
101	char	*name;
102};
103
104#ifdef KDTRACE_HOOKS
105#include <sys/dtrace_bsd.h>
106
107/*
108 * This is a hook which is initialised by the dtrace module
109 * to handle traps which might occur during DTrace probe
110 * execution.
111 */
112dtrace_trap_func_t	dtrace_trap_func;
113
114dtrace_doubletrap_func_t	dtrace_doubletrap_func;
115
116/*
117 * This is a hook which is initialised by the systrace module
118 * when it is loaded. This keeps the DTrace syscall provider
119 * implementation opaque.
120 */
121systrace_probe_func_t	systrace_probe_func;
122
123/*
124 * These hooks are necessary for the pid, usdt and fasttrap providers.
125 */
126dtrace_fasttrap_probe_ptr_t	dtrace_fasttrap_probe_ptr;
127dtrace_pid_probe_ptr_t		dtrace_pid_probe_ptr;
128dtrace_return_probe_ptr_t	dtrace_return_probe_ptr;
129int (*dtrace_invop_jump_addr)(struct trapframe *);
130#endif
131
132static struct powerpc_exception powerpc_exceptions[] = {
133	{ 0x0100, "system reset" },
134	{ 0x0200, "machine check" },
135	{ 0x0300, "data storage interrupt" },
136	{ 0x0380, "data segment exception" },
137	{ 0x0400, "instruction storage interrupt" },
138	{ 0x0480, "instruction segment exception" },
139	{ 0x0500, "external interrupt" },
140	{ 0x0600, "alignment" },
141	{ 0x0700, "program" },
142	{ 0x0800, "floating-point unavailable" },
143	{ 0x0900, "decrementer" },
144	{ 0x0c00, "system call" },
145	{ 0x0d00, "trace" },
146	{ 0x0e00, "floating-point assist" },
147	{ 0x0f00, "performance monitoring" },
148	{ 0x0f20, "altivec unavailable" },
149	{ 0x1000, "instruction tlb miss" },
150	{ 0x1100, "data load tlb miss" },
151	{ 0x1200, "data store tlb miss" },
152	{ 0x1300, "instruction breakpoint" },
153	{ 0x1400, "system management" },
154	{ 0x1600, "altivec assist" },
155	{ 0x1700, "thermal management" },
156	{ 0x2000, "run mode/trace" },
157	{ 0x3000, NULL }
158};
159
160static const char *
161trapname(u_int vector)
162{
163	struct	powerpc_exception *pe;
164
165	for (pe = powerpc_exceptions; pe->vector != 0x3000; pe++) {
166		if (pe->vector == vector)
167			return (pe->name);
168	}
169
170	return ("unknown");
171}
172
173void
174trap(struct trapframe *frame)
175{
176	struct thread	*td;
177	struct proc	*p;
178	int		sig, type, user;
179	u_int		ucode;
180	ksiginfo_t	ksi;
181
182	PCPU_INC(cnt.v_trap);
183
184	td = curthread;
185	p = td->td_proc;
186
187	type = ucode = frame->exc;
188	sig = 0;
189	user = frame->srr1 & PSL_PR;
190
191	CTR3(KTR_TRAP, "trap: %s type=%s (%s)", td->td_name,
192	    trapname(type), user ? "user" : "kernel");
193
194#ifdef KDTRACE_HOOKS
195	/*
196	 * A trap can occur while DTrace executes a probe. Before
197	 * executing the probe, DTrace blocks re-scheduling and sets
198	 * a flag in it's per-cpu flags to indicate that it doesn't
199	 * want to fault. On returning from the probe, the no-fault
200	 * flag is cleared and finally re-scheduling is enabled.
201	 *
202	 * If the DTrace kernel module has registered a trap handler,
203	 * call it and if it returns non-zero, assume that it has
204	 * handled the trap and modified the trap frame so that this
205	 * function can return normally.
206	 */
207	/*
208	 * XXXDTRACE: add fasttrap and pid  probes handlers here (if ever)
209	 */
210	if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type))
211		return;
212#endif
213
214	if (user) {
215		td->td_pticks = 0;
216		td->td_frame = frame;
217		if (td->td_ucred != p->p_ucred)
218			cred_update_thread(td);
219
220		/* User Mode Traps */
221		switch (type) {
222		case EXC_RUNMODETRC:
223		case EXC_TRC:
224			frame->srr1 &= ~PSL_SE;
225			sig = SIGTRAP;
226			break;
227
228#ifdef __powerpc64__
229		case EXC_ISE:
230		case EXC_DSE:
231			if (handle_user_slb_spill(&p->p_vmspace->vm_pmap,
232			    (type == EXC_ISE) ? frame->srr0 :
233			    frame->cpu.aim.dar) != 0)
234				sig = SIGSEGV;
235			break;
236#endif
237		case EXC_DSI:
238		case EXC_ISI:
239			sig = trap_pfault(frame, 1);
240			break;
241
242		case EXC_SC:
243			syscall(frame);
244			break;
245
246		case EXC_FPU:
247			KASSERT((td->td_pcb->pcb_flags & PCB_FPU) != PCB_FPU,
248			    ("FPU already enabled for thread"));
249			enable_fpu(td);
250			break;
251
252		case EXC_VEC:
253			KASSERT((td->td_pcb->pcb_flags & PCB_VEC) != PCB_VEC,
254			    ("Altivec already enabled for thread"));
255			enable_vec(td);
256			break;
257
258		case EXC_VECAST_G4:
259		case EXC_VECAST_G5:
260			/*
261			 * We get a VPU assist exception for IEEE mode
262			 * vector operations on denormalized floats.
263			 * Emulating this is a giant pain, so for now,
264			 * just switch off IEEE mode and treat them as
265			 * zero.
266			 */
267
268			save_vec(td);
269			td->td_pcb->pcb_vec.vscr |= ALTIVEC_VSCR_NJ;
270			enable_vec(td);
271			break;
272
273		case EXC_ALI:
274			if (fix_unaligned(td, frame) != 0)
275				sig = SIGBUS;
276			else
277				frame->srr0 += 4;
278			break;
279
280		case EXC_PGM:
281			/* Identify the trap reason */
282			if (frame->srr1 & EXC_PGM_TRAP)
283 				sig = SIGTRAP;
284			else if (ppc_instr_emulate(frame) == 0)
285				frame->srr0 += 4;
286			else
287				sig = SIGILL;
288			break;
289
290		default:
291			trap_fatal(frame);
292		}
293	} else {
294		/* Kernel Mode Traps */
295
296		KASSERT(cold || td->td_ucred != NULL,
297		    ("kernel trap doesn't have ucred"));
298		switch (type) {
299#ifdef KDTRACE_HOOKS
300		case EXC_PGM:
301			if (frame->srr1 & EXC_PGM_TRAP) {
302				if (*(uintptr_t *)frame->srr0 == 0x7c810808) {
303					if (dtrace_invop_jump_addr != NULL) {
304						dtrace_invop_jump_addr(frame);
305						return;
306					}
307				}
308			}
309			break;
310#endif
311#ifdef __powerpc64__
312		case EXC_DSE:
313			if ((frame->cpu.aim.dar & SEGMENT_MASK) == USER_ADDR) {
314				__asm __volatile ("slbmte %0, %1" ::
315					"r"(td->td_pcb->pcb_cpu.aim.usr_vsid),
316					"r"(USER_SLB_SLBE));
317				return;
318			}
319			break;
320#endif
321		case EXC_DSI:
322			if (trap_pfault(frame, 0) == 0)
323 				return;
324			break;
325		case EXC_MCHK:
326			if (handle_onfault(frame))
327 				return;
328			break;
329		default:
330			break;
331		}
332		trap_fatal(frame);
333	}
334
335	if (sig != 0) {
336		if (p->p_sysent->sv_transtrap != NULL)
337			sig = (p->p_sysent->sv_transtrap)(sig, type);
338		ksiginfo_init_trap(&ksi);
339		ksi.ksi_signo = sig;
340		ksi.ksi_code = (int) ucode; /* XXX, not POSIX */
341		/* ksi.ksi_addr = ? */
342		ksi.ksi_trapno = type;
343		trapsignal(td, &ksi);
344	}
345
346	userret(td, frame);
347}
348
349static void
350trap_fatal(struct trapframe *frame)
351{
352
353	printtrap(frame->exc, frame, 1, (frame->srr1 & PSL_PR));
354#ifdef KDB
355	if ((debugger_on_panic || kdb_active) &&
356	    kdb_trap(frame->exc, 0, frame))
357		return;
358#endif
359	panic("%s trap", trapname(frame->exc));
360}
361
362static void
363printtrap(u_int vector, struct trapframe *frame, int isfatal, int user)
364{
365
366	printf("\n");
367	printf("%s %s trap:\n", isfatal ? "fatal" : "handled",
368	    user ? "user" : "kernel");
369	printf("\n");
370	printf("   exception       = 0x%x (%s)\n", vector, trapname(vector));
371	switch (vector) {
372	case EXC_DSE:
373	case EXC_DSI:
374		printf("   virtual address = 0x%" PRIxPTR "\n",
375		    frame->cpu.aim.dar);
376		printf("   dsisr           = 0x%" PRIxPTR "\n",
377		    frame->cpu.aim.dsisr);
378		break;
379	case EXC_ISE:
380	case EXC_ISI:
381		printf("   virtual address = 0x%" PRIxPTR "\n", frame->srr0);
382		break;
383	}
384	printf("   srr0            = 0x%" PRIxPTR "\n", frame->srr0);
385	printf("   srr1            = 0x%" PRIxPTR "\n", frame->srr1);
386	printf("   lr              = 0x%" PRIxPTR "\n", frame->lr);
387	printf("   curthread       = %p\n", curthread);
388	if (curthread != NULL)
389		printf("          pid = %d, comm = %s\n",
390		    curthread->td_proc->p_pid, curthread->td_name);
391	printf("\n");
392}
393
394/*
395 * Handles a fatal fault when we have onfault state to recover.  Returns
396 * non-zero if there was onfault recovery state available.
397 */
398static int
399handle_onfault(struct trapframe *frame)
400{
401	struct		thread *td;
402	faultbuf	*fb;
403
404	td = curthread;
405	fb = td->td_pcb->pcb_onfault;
406	if (fb != NULL) {
407		frame->srr0 = (*fb)[0];
408		frame->fixreg[1] = (*fb)[1];
409		frame->fixreg[2] = (*fb)[2];
410		frame->fixreg[3] = 1;
411		frame->cr = (*fb)[3];
412		bcopy(&(*fb)[4], &frame->fixreg[13],
413		    19 * sizeof(register_t));
414		return (1);
415	}
416	return (0);
417}
418
419int
420cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
421{
422	struct proc *p;
423	struct trapframe *frame;
424	caddr_t	params;
425	size_t argsz;
426	int error, n, i;
427
428	p = td->td_proc;
429	frame = td->td_frame;
430
431	sa->code = frame->fixreg[0];
432	params = (caddr_t)(frame->fixreg + FIRSTARG);
433	n = NARGREG;
434
435	if (sa->code == SYS_syscall) {
436		/*
437		 * code is first argument,
438		 * followed by actual args.
439		 */
440		sa->code = *(register_t *) params;
441		params += sizeof(register_t);
442		n -= 1;
443	} else if (sa->code == SYS___syscall) {
444		/*
445		 * Like syscall, but code is a quad,
446		 * so as to maintain quad alignment
447		 * for the rest of the args.
448		 */
449		if (SV_PROC_FLAG(p, SV_ILP32)) {
450			params += sizeof(register_t);
451			sa->code = *(register_t *) params;
452			params += sizeof(register_t);
453			n -= 2;
454		} else {
455			sa->code = *(register_t *) params;
456			params += sizeof(register_t);
457			n -= 1;
458		}
459	}
460
461 	if (p->p_sysent->sv_mask)
462		sa->code &= p->p_sysent->sv_mask;
463	if (sa->code >= p->p_sysent->sv_size)
464		sa->callp = &p->p_sysent->sv_table[0];
465	else
466		sa->callp = &p->p_sysent->sv_table[sa->code];
467
468	sa->narg = sa->callp->sy_narg;
469
470	if (SV_PROC_FLAG(p, SV_ILP32)) {
471		argsz = sizeof(uint32_t);
472
473		for (i = 0; i < n; i++)
474			sa->args[i] = ((u_register_t *)(params))[i] &
475			    0xffffffff;
476	} else {
477		argsz = sizeof(uint64_t);
478
479		for (i = 0; i < n; i++)
480			sa->args[i] = ((u_register_t *)(params))[i];
481	}
482
483	if (sa->narg > n)
484		error = copyin(MOREARGS(frame->fixreg[1]), sa->args + n,
485			       (sa->narg - n) * argsz);
486	else
487		error = 0;
488
489#ifdef __powerpc64__
490	if (SV_PROC_FLAG(p, SV_ILP32) && sa->narg > n) {
491		/* Expand the size of arguments copied from the stack */
492
493		for (i = sa->narg; i >= n; i--)
494			sa->args[i] = ((uint32_t *)(&sa->args[n]))[i-n];
495	}
496#endif
497
498	if (error == 0) {
499		td->td_retval[0] = 0;
500		td->td_retval[1] = frame->fixreg[FIRSTARG + 1];
501	}
502	return (error);
503}
504
505#include "../../kern/subr_syscall.c"
506
507void
508syscall(struct trapframe *frame)
509{
510	struct thread *td;
511	struct syscall_args sa;
512	int error;
513
514	td = curthread;
515	td->td_frame = frame;
516
517#ifdef __powerpc64__
518	/*
519	 * Speculatively restore last user SLB segment, which we know is
520	 * invalid already, since we are likely to do copyin()/copyout().
521	 */
522	__asm __volatile ("slbmte %0, %1; isync" ::
523            "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE));
524#endif
525
526	error = syscallenter(td, &sa);
527	syscallret(td, error, &sa);
528}
529
530#ifdef __powerpc64__
531/* Handle kernel SLB faults -- runs in real mode, all seat belts off */
532void
533handle_kernel_slb_spill(int type, register_t dar, register_t srr0)
534{
535	struct slb *slbcache;
536	uint64_t slbe, slbv;
537	uint64_t esid, addr;
538	int i;
539
540	addr = (type == EXC_ISE) ? srr0 : dar;
541	slbcache = PCPU_GET(slb);
542	esid = (uintptr_t)addr >> ADDR_SR_SHFT;
543	slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
544
545	/* See if the hardware flushed this somehow (can happen in LPARs) */
546	for (i = 0; i < n_slbs; i++)
547		if (slbcache[i].slbe == (slbe | (uint64_t)i))
548			return;
549
550	/* Not in the map, needs to actually be added */
551	slbv = kernel_va_to_slbv(addr);
552	if (slbcache[USER_SLB_SLOT].slbe == 0) {
553		for (i = 0; i < n_slbs; i++) {
554			if (i == USER_SLB_SLOT)
555				continue;
556			if (!(slbcache[i].slbe & SLBE_VALID))
557				goto fillkernslb;
558		}
559
560		if (i == n_slbs)
561			slbcache[USER_SLB_SLOT].slbe = 1;
562	}
563
564	/* Sacrifice a random SLB entry that is not the user entry */
565	i = mftb() % n_slbs;
566	if (i == USER_SLB_SLOT)
567		i = (i+1) % n_slbs;
568
569fillkernslb:
570	/* Write new entry */
571	slbcache[i].slbv = slbv;
572	slbcache[i].slbe = slbe | (uint64_t)i;
573
574	/* Trap handler will restore from cache on exit */
575}
576
577static int
578handle_user_slb_spill(pmap_t pm, vm_offset_t addr)
579{
580	struct slb *user_entry;
581	uint64_t esid;
582	int i;
583
584	esid = (uintptr_t)addr >> ADDR_SR_SHFT;
585
586	PMAP_LOCK(pm);
587	user_entry = user_va_to_slb_entry(pm, addr);
588
589	if (user_entry == NULL) {
590		/* allocate_vsid auto-spills it */
591		(void)allocate_user_vsid(pm, esid, 0);
592	} else {
593		/*
594		 * Check that another CPU has not already mapped this.
595		 * XXX: Per-thread SLB caches would be better.
596		 */
597		for (i = 0; i < pm->pm_slb_len; i++)
598			if (pm->pm_slb[i] == user_entry)
599				break;
600
601		if (i == pm->pm_slb_len)
602			slb_insert_user(pm, user_entry);
603	}
604	PMAP_UNLOCK(pm);
605
606	return (0);
607}
608#endif
609
610static int
611trap_pfault(struct trapframe *frame, int user)
612{
613	vm_offset_t	eva, va;
614	struct		thread *td;
615	struct		proc *p;
616	vm_map_t	map;
617	vm_prot_t	ftype;
618	int		rv;
619	register_t	user_sr;
620
621	td = curthread;
622	p = td->td_proc;
623	if (frame->exc == EXC_ISI) {
624		eva = frame->srr0;
625		ftype = VM_PROT_EXECUTE;
626		if (frame->srr1 & SRR1_ISI_PFAULT)
627			ftype |= VM_PROT_READ;
628	} else {
629		eva = frame->cpu.aim.dar;
630		if (frame->cpu.aim.dsisr & DSISR_STORE)
631			ftype = VM_PROT_WRITE;
632		else
633			ftype = VM_PROT_READ;
634	}
635
636	if (user) {
637		map = &p->p_vmspace->vm_map;
638	} else {
639		if ((eva >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
640			if (p->p_vmspace == NULL)
641				return (SIGSEGV);
642
643			map = &p->p_vmspace->vm_map;
644
645			user_sr = td->td_pcb->pcb_cpu.aim.usr_segm;
646			eva &= ADDR_PIDX | ADDR_POFF;
647			eva |= user_sr << ADDR_SR_SHFT;
648		} else {
649			map = kernel_map;
650		}
651	}
652	va = trunc_page(eva);
653
654	if (map != kernel_map) {
655		/*
656		 * Keep swapout from messing with us during this
657		 *	critical time.
658		 */
659		PROC_LOCK(p);
660		++p->p_lock;
661		PROC_UNLOCK(p);
662
663		/* Fault in the user page: */
664		rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
665
666		PROC_LOCK(p);
667		--p->p_lock;
668		PROC_UNLOCK(p);
669		/*
670		 * XXXDTRACE: add dtrace_doubletrap_func here?
671		 */
672	} else {
673		/*
674		 * Don't have to worry about process locking or stacks in the
675		 * kernel.
676		 */
677		rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
678	}
679
680	if (rv == KERN_SUCCESS)
681		return (0);
682
683	if (!user && handle_onfault(frame))
684		return (0);
685
686	return (SIGSEGV);
687}
688
689int
690badaddr(void *addr, size_t size)
691{
692	return (badaddr_read(addr, size, NULL));
693}
694
695int
696badaddr_read(void *addr, size_t size, int *rptr)
697{
698	struct thread	*td;
699	faultbuf	env;
700	int		x;
701
702	/* Get rid of any stale machine checks that have been waiting.  */
703	__asm __volatile ("sync; isync");
704
705	td = curthread;
706
707	if (setfault(env)) {
708		td->td_pcb->pcb_onfault = 0;
709		__asm __volatile ("sync");
710		return 1;
711	}
712
713	__asm __volatile ("sync");
714
715	switch (size) {
716	case 1:
717		x = *(volatile int8_t *)addr;
718		break;
719	case 2:
720		x = *(volatile int16_t *)addr;
721		break;
722	case 4:
723		x = *(volatile int32_t *)addr;
724		break;
725	default:
726		panic("badaddr: invalid size (%zd)", size);
727	}
728
729	/* Make sure we took the machine check, if we caused one. */
730	__asm __volatile ("sync; isync");
731
732	td->td_pcb->pcb_onfault = 0;
733	__asm __volatile ("sync");	/* To be sure. */
734
735	/* Use the value to avoid reorder. */
736	if (rptr)
737		*rptr = x;
738
739	return (0);
740}
741
742/*
743 * For now, this only deals with the particular unaligned access case
744 * that gcc tends to generate.  Eventually it should handle all of the
745 * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
746 */
747
748static int
749fix_unaligned(struct thread *td, struct trapframe *frame)
750{
751	struct thread	*fputhread;
752	int		indicator, reg;
753	double		*fpr;
754
755	indicator = EXC_ALI_OPCODE_INDICATOR(frame->cpu.aim.dsisr);
756
757	switch (indicator) {
758	case EXC_ALI_LFD:
759	case EXC_ALI_STFD:
760		reg = EXC_ALI_RST(frame->cpu.aim.dsisr);
761		fpr = &td->td_pcb->pcb_fpu.fpr[reg];
762		fputhread = PCPU_GET(fputhread);
763
764		/* Juggle the FPU to ensure that we've initialized
765		 * the FPRs, and that their current state is in
766		 * the PCB.
767		 */
768		if (fputhread != td) {
769			if (fputhread)
770				save_fpu(fputhread);
771			enable_fpu(td);
772		}
773		save_fpu(td);
774
775		if (indicator == EXC_ALI_LFD) {
776			if (copyin((void *)frame->cpu.aim.dar, fpr,
777			    sizeof(double)) != 0)
778				return -1;
779			enable_fpu(td);
780		} else {
781			if (copyout(fpr, (void *)frame->cpu.aim.dar,
782			    sizeof(double)) != 0)
783				return -1;
784		}
785		return 0;
786		break;
787	}
788
789	return -1;
790}
791
792static int
793ppc_instr_emulate(struct trapframe *frame)
794{
795	uint32_t instr;
796	int reg;
797
798	instr = fuword32((void *)frame->srr0);
799
800	if ((instr & 0xfc1fffff) == 0x7c1f42a6) {	/* mfpvr */
801		reg = (instr & ~0xfc1fffff) >> 21;
802		frame->fixreg[reg] = mfpvr();
803		return (0);
804	}
805
806	return (-1);
807}
808
809