trap.c revision 91504
1/*
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 *    derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $NetBSD: trap.c,v 1.26 2000/05/27 00:40:40 sommerfeld Exp $
32 */
33
34#ifndef lint
35static const char rcsid[] =
36  "$FreeBSD: head/sys/powerpc/aim/trap.c 91504 2002-02-28 21:52:08Z arr $";
37#endif /* not lint */
38
39#include "opt_ddb.h"
40#include "opt_ktrace.h"
41
42#include <sys/param.h>
43#include <sys/proc.h>
44#include <sys/ktr.h>
45#include <sys/lock.h>
46#include <sys/mutex.h>
47#include <sys/pioctl.h>
48#include <sys/reboot.h>
49#include <sys/syscall.h>
50#include <sys/sysent.h>
51#include <sys/systm.h>
52#include <sys/uio.h>
53#include <sys/user.h>
54#ifdef KTRACE
55#include <sys/ktrace.h>
56#endif
57#include <sys/vmmeter.h>
58
59#include <vm/vm.h>
60#include <vm/pmap.h>
61#include <vm/vm_extern.h>
62#include <vm/vm_kern.h>
63#include <vm/vm_map.h>
64#include <vm/vm_param.h>
65
66#include <machine/cpu.h>
67#include <machine/frame.h>
68#include <machine/pcb.h>
69#include <machine/psl.h>
70#include <machine/trap.h>
71
72/* These definitions should probably be somewhere else				XXX */
73#define	FIRSTARG	3		/* first argument is in reg 3 */
74#define	NARGREG		8		/* 8 args are in registers */
75#define	MOREARGS(sp)	((caddr_t)((int)(sp) + 8)) /* more args go here */
76
77#ifdef WITNESS
78extern char *syscallnames[];
79#endif
80
81#if 0 /* XXX: not used yet */
82static int fix_unaligned __P((struct proc *p, struct trapframe *frame));
83#endif
84static void trap_fatal __P((struct trapframe *frame));
85static void printtrap __P((int vector, struct trapframe *frame, int isfatal,
86			      int user));
87static int trap_pfault __P((struct trapframe *frame, int user));
88static int handle_onfault (struct trapframe *frame);
89
90static const char *ppc_exception_names[] = {
91	"Reserved 0",				/* 0 */
92	"Reset",				/* 1 */
93	"Machine Check",			/* 2 */
94	"Data Storage Interrupt",		/* 3 */
95	"Instruction Storage Interrupt",	/* 4 */
96	"External Interrupt",			/* 5 */
97	"Alignment Interrupt",			/* 6 */
98	"Program Interrupt",			/* 7 */
99	"Floating Point Unavailable",		/* 8 */
100	"Decrementer Interrupt",		/* 9 */
101	"Reserved",				/* 10 */
102	"Reserved",				/* 11 */
103	"System Call",				/* 12 */
104	"Trace",				/* 13 */
105	"Floating Point Assist",		/* 14 */
106	"Performance Monitoring",		/* 15 */
107	"Instruction TLB Miss",			/* 16 */
108	"Data Load TLB Miss",			/* 17 */
109	"Data Store TLB Miss",			/* 18 */
110	"Instruction Breakpoint",		/* 19 */
111	"System Management Interrupt",		/* 20 */
112	"Reserved 21",				/* 21 */
113	"Reserved 22",				/* 22 */
114	"Reserved 23",				/* 23 */
115	"Reserved 24",				/* 24 */
116	"Reserved 25",				/* 25 */
117	"Reserved 26",				/* 26 */
118	"Reserved 27",				/* 27 */
119	"Reserved 28",				/* 28 */
120	"Reserved 29",				/* 29 */
121	"Reserved 30",				/* 30 */
122	"Reserved 31",				/* 31 */
123	"Reserved 32",				/* 32 */
124	"Reserved 33",				/* 33 */
125	"Reserved 34",				/* 34 */
126	"Reserved 35",				/* 35 */
127	"Reserved 36",				/* 36 */
128	"Reserved 37",				/* 37 */
129	"Reserved 38",				/* 38 */
130	"Reserved 39",				/* 39 */
131	"Reserved 40",				/* 40 */
132	"Reserved 41",				/* 41 */
133	"Reserved 42",				/* 42 */
134	"Reserved 43",				/* 43 */
135	"Reserved 44",				/* 44 */
136	"Reserved 45",				/* 45 */
137	"Reserved 46",				/* 46 */
138	"Reserved 47",				/* 47 */
139};
140
141static void
142printtrap __P((int vector, struct trapframe *frame, int isfatal, int user))
143{
144
145	printf("\n");
146	printf("%s %s trap:\n", isfatal ? "fatal" : "handled",
147	    user ? "user" : "kernel");
148	printf("\n");
149	printf("   exception       = 0x%x (%s)\n", vector >> 8,
150	    ppc_exception_names[vector >> 8]);
151	switch (vector) {
152	case EXC_DSI:
153		printf("   virtual address = 0x%x\n", frame->dar);
154		break;
155	case EXC_ISI:
156		printf("   virtual address = 0x%x\n", frame->srr0);
157		break;
158	}
159	printf("   srr0            = 0x%x", frame->srr0);
160	printf("   curthread       = %p\n", curthread);
161	if (curthread != NULL)
162		printf("          pid = %d, comm = %s\n",
163		    curthread->td_proc->p_pid, curthread->td_proc->p_comm);
164	printf("\n");
165}
166
167static void
168trap_fatal(struct trapframe *frame)
169{
170
171	printtrap(frame->exc, frame, 1, (frame->srr1 & PSL_PR));
172#ifdef DDB
173	if ((debugger_on_panic || db_active) && kdb_trap(frame->exc, 0, frame))
174		return;
175#endif
176	panic("%s Trap", ppc_exception_names[frame->exc >> 8]);
177}
178
179/*
180 * Handles a fatal fault when we have onfault state to recover.  Returns
181 * non-zero if there was onfault recovery state available.
182 */
183static int
184handle_onfault (struct trapframe *frame)
185{
186	struct thread *td;
187	faultbuf *fb;
188
189	td = curthread;
190	fb = td->td_pcb->pcb_onfault;
191	if (fb != NULL) {
192		frame->srr0 = (*fb)[0];
193		frame->fixreg[1] = (*fb)[1];
194		frame->fixreg[2] = (*fb)[2];
195		frame->cr = (*fb)[3];
196		bcopy(&(*fb)[4], &frame->fixreg[13],
197		    19 * sizeof(register_t));
198		return (1);
199	}
200	return (0);
201}
202
203void
204trap(struct trapframe *frame)
205{
206	struct thread *td;
207	struct proc *p;
208	int sig, type, user;
209	u_int sticks, ucode;
210
211	atomic_add_int(&cnt.v_trap, 1);
212
213	td = curthread;
214	p = td->td_proc;
215
216	type = frame->exc;
217	ucode = type;
218	sig = 0;
219	user = (frame->srr1 & PSL_PR);
220	sticks = 0;
221
222	CTR3(KTR_TRAP, "trap: %s type=%s (%s)", p->p_comm,
223	    ppc_exception_names[type >> 8],
224	    user ? "user" : "kernel");
225
226	if (user) {
227		sticks = td->td_kse->ke_sticks;
228		td->td_frame = frame;
229#ifdef DIAGNOSTIC
230		/* see the comment in ast() */
231		if (td->td_ucred != NULL)
232			panic("trap(): thread got a ucred while in userspace");
233		td->td_ucred = td->td_ucred_cache;
234		td->td_ucred_cache = NULL;
235#endif
236		if (td->td_ucred != p->p_ucred)
237			cred_update_thread(td);
238
239		/* User Mode Traps */
240		switch (type) {
241		case EXC_TRC:
242			frame->srr1 &= ~PSL_SE;
243			sig = SIGTRAP;
244			break;
245		case EXC_DSI:
246		case EXC_ISI:
247			sig = trap_pfault(frame, 1);
248			break;
249		case EXC_SC:
250			syscall(frame);
251			break;
252		case EXC_FPU:
253			enable_fpu(PCPU_GET(curpcb));
254			frame->srr1 |= PSL_FP;
255			break;
256
257		case EXC_ALI:
258#if 0
259		if (fix_unaligned(p, frame) != 0)
260#endif
261			sig = SIGBUS;
262#if 0
263		else
264			frame->srr0 += 4;
265#endif
266		break;
267
268		case EXC_PGM:
269			/* XXX temporarily */
270			/* XXX: Magic Number? */
271			if (frame->srr1 & 0x0002000)
272				sig = SIGTRAP;
273			else
274				sig = SIGILL;
275			break;
276
277		default:
278			trap_fatal(frame);
279		}
280	} else {
281		/* Kernel Mode Traps */
282
283		KASSERT(cold || td->td_ucred != NULL,
284		    ("kernel trap doesn't have ucred"));
285		switch (type) {
286		case EXC_DSI:
287			if (trap_pfault(frame, 0) == 0)
288				return;
289			break;
290		case EXC_MCHK:
291			if (handle_onfault(frame))
292				return;
293			break;
294		default:
295			trap_fatal(frame);
296		}
297		/* NOTREACHED */
298	}
299	if (sig != 0) {
300		if (p->p_sysent->sv_transtrap != NULL)
301			sig = (p->p_sysent->sv_transtrap)(sig, type);
302		trapsignal(p, sig, ucode);
303	}
304	userret(td, frame, sticks);
305	mtx_assert(&Giant, MA_NOTOWNED);
306#ifdef DIAGNOSTIC 			/* see the comment in ast() */
307	if (td->td_ucred_cache)
308		panic("trap:thread already has cached ucred");
309	td->td_ucred_cache = td->td_ucred;
310       	td->td_ucred = NULL;
311#endif /* DIAGNOSTIC */
312}
313
314void
315syscall(struct trapframe *frame)
316{
317	caddr_t params;
318	struct sysent *callp;
319	struct thread *td;
320	struct proc *p;
321	int error, n;
322	size_t narg;
323	register_t args[10];
324	u_int code;
325
326	td = curthread;
327	p = td->td_proc;
328
329	atomic_add_int(&cnt.v_syscall, 1);
330
331	code = frame->fixreg[0];
332	params = (caddr_t) (frame->fixreg + FIRSTARG);
333
334	if (p->p_sysent->sv_prepsyscall)
335		/*
336		 * The prep code is MP aware.
337		 */
338		(*p->p_sysent->sv_prepsyscall)(frame, args, &code, &params);
339	else if (code == SYS_syscall)
340		/*
341		 * code is first argument,
342		 * followed by actual args.
343		 */
344		code = *params++;
345	else if (code == SYS___syscall) {
346		/*
347		 * Like syscall, but code is a quad,
348		 * so as to maintain quad alignment
349		 * for the rest of the args.
350		 */
351		params++;
352		code = *params++;
353	}
354
355 	if (p->p_sysent->sv_mask)
356 		code &= p->p_sysent->sv_mask;
357
358 	if (code >= p->p_sysent->sv_size)
359 		callp = &p->p_sysent->sv_table[0];
360  	else
361 		callp = &p->p_sysent->sv_table[code];
362
363	narg = callp->sy_narg & SYF_ARGMASK;
364
365	n = NARGREG - (params - (caddr_t)(frame->fixreg + FIRSTARG));
366	if (narg > n * sizeof(register_t)) {
367		bcopy(params, args, n * sizeof(register_t));
368		if (error = copyin(MOREARGS(frame->fixreg[1]), args + n,
369			narg - n * sizeof(register_t))) {
370#ifdef	KTRACE
371			/* Can't get all the arguments! */
372			if (KTRPOINT(p, KTR_SYSCALL))
373				ktrsyscall(p->p_tracep, code, narg, args);
374#endif
375			goto bad;
376		}
377		params = (caddr_t) args;
378	}
379
380	/*
381	 * Try to run the syscall without Giant if the syscall is MP safe.
382	 */
383	if ((callp->sy_narg & SYF_MPSAFE) == 0)
384		mtx_lock(&Giant);
385
386#ifdef	KTRACE
387	if (KTRPOINT(p, KTR_SYSCALL))
388		ktrsyscall(p->p_tracep, code, narg, params);
389#endif
390	td->td_retval[0] = 0;
391	td->td_retval[1] = frame->fixreg[FIRSTARG + 1];
392
393	STOPEVENT(p, S_SCE, narg);
394
395	error = (*callp->sy_call)(td, args);
396	switch (error) {
397	case 0:
398		frame->fixreg[FIRSTARG] = td->td_retval[0];
399		frame->fixreg[FIRSTARG + 1] = td->td_retval[1];
400		/* XXX: Magic number */
401		frame->cr &= ~0x10000000;
402		break;
403	case ERESTART:
404		/*
405		 * Set user's pc back to redo the system call.
406		 */
407		frame->srr0 -= 4;
408		break;
409	case EJUSTRETURN:
410		/* nothing to do */
411		break;
412	default:
413bad:
414		if (p->p_sysent->sv_errsize) {
415			if (error >= p->p_sysent->sv_errsize)
416				error = -1;	/* XXX */
417			else
418				error = p->p_sysent->sv_errtbl[error];
419		}
420		frame->fixreg[FIRSTARG] = error;
421		/* XXX: Magic number: Carry Flag Equivalent? */
422		frame->cr |= 0x10000000;
423		break;
424	}
425
426
427#ifdef	KTRACE
428	if (KTRPOINT(p, KTR_SYSRET))
429		ktrsysret(p->p_tracep, code, error, td->td_retval[0]);
430#endif
431
432	if ((callp->sy_narg & SYF_MPSAFE) == 0)
433		mtx_unlock(&Giant);
434
435	/*
436	 * Does the comment in the i386 code about errno apply here?
437	 */
438	STOPEVENT(p, S_SCX, code);
439
440#ifdef WITNESS
441	if (witness_list(td)) {
442		panic("system call %s returning with mutex(s) held\n",
443		    syscallnames[code]);
444	}
445#endif
446	mtx_assert(&sched_lock, MA_NOTOWNED);
447	mtx_assert(&Giant, MA_NOTOWNED);
448}
449
450static int
451trap_pfault(struct trapframe *frame, int user)
452{
453	vm_offset_t eva, va;
454	struct thread *td;
455	struct proc *p;
456	vm_map_t map;
457	vm_prot_t ftype;
458	int rv;
459
460	td = curthread;
461	p = td->td_proc;
462	if (frame->exc == EXC_ISI) {
463		eva = frame->srr0;
464		ftype = VM_PROT_READ | VM_PROT_EXECUTE;
465	} else {
466		eva = frame->dar;
467		if (frame->dsisr & DSISR_STORE)
468			ftype = VM_PROT_READ | VM_PROT_WRITE;
469		else
470			ftype = VM_PROT_READ;
471	}
472
473	if ((eva >> ADDR_SR_SHFT) != USER_SR) {
474		if (user)
475			return (SIGSEGV);
476		map = kernel_map;
477	} else {
478		u_int user_sr;
479
480		if (p->p_vmspace == NULL)
481			return (SIGSEGV);
482
483		__asm ("mfsr %0, %1"
484		    : "=r"(user_sr)
485		    : "K"(USER_SR));
486		eva &= ADDR_PIDX | ADDR_POFF;
487		eva |= user_sr << ADDR_SR_SHFT;
488		map = &p->p_vmspace->vm_map;
489	}
490	va = trunc_page(eva);
491
492	mtx_lock(&Giant);
493	if (map != kernel_map) {
494		/*
495		 * Keep swapout from messing with us during this
496		 *	critical time.
497		 */
498		PROC_LOCK(p);
499		++p->p_lock;
500		PROC_UNLOCK(p);
501
502		/*
503		 * Grow the stack if necessary
504		 */
505		/* grow_stack returns false only if va falls into
506		 * a growable stack region and the stack growth
507		 * fails.  It returns true if va was not within
508		 * a growable stack region, or if the stack
509		 * growth succeeded.
510		 */
511		if (!grow_stack (p, va))
512			rv = KERN_FAILURE;
513		else
514			/* Fault in the user page: */
515			rv = vm_fault(map, va, ftype,
516			      (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
517						      : VM_FAULT_NORMAL);
518
519		PROC_LOCK(p);
520		--p->p_lock;
521		PROC_UNLOCK(p);
522	} else {
523		/*
524		 * Don't have to worry about process locking or stacks in the
525		 * kernel.
526		 */
527		rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
528	}
529	mtx_unlock(&Giant);
530
531	if (rv == KERN_SUCCESS)
532		return (0);
533
534	if (!user && handle_onfault(frame))
535		return (0);
536
537	return (SIGSEGV);
538}
539
540#if 0 /* XXX: child_return not used */
541/*
542 * XXX: the trapframe return values should be setup in vm_machdep.c in
543 * cpu_fork().
544 */
545void
546child_return(void *arg)
547{
548	struct proc *p;
549	struct trapframe *tf;
550
551	p = arg;
552	tf = trapframe(p);
553
554	tf->fixreg[FIRSTARG] = 0;
555	tf->fixreg[FIRSTARG + 1] = 1;
556	tf->cr &= ~0x10000000;
557	tf->srr1 &= ~PSL_FP;	/* Disable FPU, as we can't be fpuproc */
558#ifdef	KTRACE
559	if (KTRPOINT(p, KTR_SYSRET))
560		ktrsysret(p, SYS_fork, 0, 0);
561#endif
562	/* Profiling?							XXX */
563	curcpu()->ci_schedstate.spc_curpriority = p->p_priority;
564}
565#endif
566
567static __inline void
568setusr(int content)
569{
570
571	__asm __volatile ("isync; mtsr %0,%1; isync"
572		          :: "n"(USER_SR), "r"(content));
573}
574
575int
576copyin(udaddr, kaddr, len)
577	const void *udaddr;
578	void *kaddr;
579	size_t len;
580{
581	const char *up;
582	char *kp;
583	char *p;
584	size_t l;
585	faultbuf env;
586	uint segment;
587
588	up = udaddr;
589	kp = kaddr;
590
591#if 0
592	if (setfault(env)) {
593		PCPU_GET(curpcb)->pcb_onfault = 0;
594		return EFAULT;
595	}
596#endif
597	while (len > 0) {
598		p = (char *)USER_ADDR + ((u_int)up & ~SEGMENT_MASK);
599		l = ((char *)USER_ADDR + SEGMENT_LENGTH) - p;
600		if (l > len)
601			l = len;
602		segment = (uint)up >> ADDR_SR_SHFT;
603		setusr(PCPU_GET(curpcb)->pcb_pm->pm_sr[segment]);
604		bcopy(p, kp, l);
605		up += l;
606		kp += l;
607		len -= l;
608	}
609	PCPU_GET(curpcb)->pcb_onfault = 0;
610	return 0;
611}
612
613int
614copyout(kaddr, udaddr, len)
615	const void *kaddr;
616	void *udaddr;
617	size_t len;
618{
619	const char *kp;
620	char *up;
621	char *p;
622	size_t l;
623	faultbuf env;
624	uint segment;
625
626	kp = kaddr;
627	up = udaddr;
628
629#if 0
630	if (setfault(env)) {
631		PCPU_GET(curpcb)->pcb_onfault = 0;
632		return EFAULT;
633	}
634#endif
635	while (len > 0) {
636		p = (char *)USER_ADDR + ((u_int)up & ~SEGMENT_MASK);
637		l = ((char *)USER_ADDR + SEGMENT_LENGTH) - p;
638		if (l > len)
639			l = len;
640		segment = (u_int)up >> ADDR_SR_SHFT;
641		setusr(PCPU_GET(curpcb)->pcb_pm->pm_sr[segment]);
642		bcopy(kp, p, l);
643		up += l;
644		kp += l;
645		len -= l;
646	}
647	PCPU_GET(curpcb)->pcb_onfault = 0;
648	return 0;
649}
650
651#if 0 /* XXX: not used yet */
652/*
653 * kcopy(const void *src, void *dst, size_t len);
654 *
655 * Copy len bytes from src to dst, aborting if we encounter a fatal
656 * page fault.
657 *
658 * kcopy() _must_ save and restore the old fault handler since it is
659 * called by uiomove(), which may be in the path of servicing a non-fatal
660 * page fault.
661 */
662int
663kcopy(const void *src, void *dst, size_t len)
664{
665	faultbuf env, *oldfault;
666
667	oldfault = PCPU_GET(curpcb)->pcb_onfault;
668	if (setfault(env)) {
669		PCPU_GET(curpcb)->pcb_onfault = oldfault;
670		return EFAULT;
671	}
672
673	bcopy(src, dst, len);
674
675	PCPU_GET(curpcb)->pcb_onfault = oldfault;
676	return 0;
677}
678
679int
680badaddr(void *addr, size_t size)
681{
682
683	return badaddr_read(addr, size, NULL);
684}
685
686int
687badaddr_read(void *addr, size_t size, int *rptr)
688{
689	faultbuf env;
690	int x;
691
692	/* Get rid of any stale machine checks that have been waiting.  */
693	__asm __volatile ("sync; isync");
694
695	if (setfault(env)) {
696		PCPU_GET(curpcb)->pcb_onfault = 0;
697		__asm __volatile ("sync");
698		return 1;
699	}
700
701	__asm __volatile ("sync");
702
703	switch (size) {
704	case 1:
705		x = *(volatile int8_t *)addr;
706		break;
707	case 2:
708		x = *(volatile int16_t *)addr;
709		break;
710	case 4:
711		x = *(volatile int32_t *)addr;
712		break;
713	default:
714		panic("badaddr: invalid size (%d)", size);
715	}
716
717	/* Make sure we took the machine check, if we caused one. */
718	__asm __volatile ("sync; isync");
719
720	PCPU_GET(curpcb)->pcb_onfault = 0;
721	__asm __volatile ("sync");	/* To be sure. */
722
723	/* Use the value to avoid reorder. */
724	if (rptr)
725		*rptr = x;
726
727	return 0;
728}
729#endif
730
731/*
732 * For now, this only deals with the particular unaligned access case
733 * that gcc tends to generate.  Eventually it should handle all of the
734 * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
735 */
736
737#if 0 /* XXX: Not used yet */
738static int
739fix_unaligned(p, frame)
740	struct proc *p;
741	struct trapframe *frame;
742{
743	int indicator;
744
745	indicator = EXC_ALI_OPCODE_INDICATOR(frame->dsisr);
746
747	switch (indicator) {
748	case EXC_ALI_LFD:
749	case EXC_ALI_STFD:
750		{
751			int reg = EXC_ALI_RST(frame->dsisr);
752			double *fpr = &p->p_addr->u_pcb.pcb_fpu.fpr[reg];
753
754			/* Juggle the FPU to ensure that we've initialized
755			 * the FPRs, and that their current state is in
756			 * the PCB.
757			 */
758			if (!(pcb->pcb_flags & PCB_FPU))
759				enable_fpu(PCPU_GET(curpcb));
760				frame->srr1 |= PSL_FP;
761			}
762			save_fpu(PCPU_GET(curpcb));
763
764			if (indicator == EXC_ALI_LFD) {
765				if (copyin((void *)frame->dar, fpr,
766				    sizeof(double)) != 0)
767					return -1;
768				if (!(pcb->pcb_flags & PCB_FPU))
769					enable_fpu(PCPU_GET(curpcb));
770					frame->srr1 |= PSL_FP;
771				}
772			} else {
773				if (copyout(fpr, (void *)frame->dar,
774				    sizeof(double)) != 0)
775					return -1;
776			}
777			return 0;
778		}
779		break;
780	}
781
782	return -1;
783}
784#endif
785