trap.c revision 91090
1/*
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 *    derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $NetBSD: trap.c,v 1.26 2000/05/27 00:40:40 sommerfeld Exp $
32 */
33
34#ifndef lint
35static const char rcsid[] =
36  "$FreeBSD: head/sys/powerpc/aim/trap.c 91090 2002-02-22 23:58:22Z julian $";
37#endif /* not lint */
38
39#include "opt_ddb.h"
40#include "opt_ktrace.h"
41
42#include <sys/param.h>
43#include <sys/proc.h>
44#include <sys/ktr.h>
45#include <sys/lock.h>
46#include <sys/mutex.h>
47#include <sys/pioctl.h>
48#include <sys/reboot.h>
49#include <sys/syscall.h>
50#include <sys/sysent.h>
51#include <sys/systm.h>
52#include <sys/uio.h>
53#include <sys/user.h>
54#ifdef KTRACE
55#include <sys/ktrace.h>
56#endif
57#include <sys/vmmeter.h>
58
59#include <vm/vm.h>
60#include <vm/pmap.h>
61#include <vm/vm_extern.h>
62#include <vm/vm_kern.h>
63#include <vm/vm_map.h>
64#include <vm/vm_param.h>
65
66#include <machine/cpu.h>
67#include <machine/frame.h>
68#include <machine/pcb.h>
69#include <machine/psl.h>
70#include <machine/trap.h>
71
72/* These definitions should probably be somewhere else				XXX */
73#define	FIRSTARG	3		/* first argument is in reg 3 */
74#define	NARGREG		8		/* 8 args are in registers */
75#define	MOREARGS(sp)	((caddr_t)((int)(sp) + 8)) /* more args go here */
76
77#ifdef WITNESS
78extern char *syscallnames[];
79#endif
80
81#if 0 /* XXX: not used yet */
82static int fix_unaligned __P((struct proc *p, struct trapframe *frame));
83#endif
84static void trap_fatal __P((struct trapframe *frame));
85static void printtrap __P((int vector, struct trapframe *frame, int isfatal,
86			      int user));
87static int trap_pfault __P((struct trapframe *frame, int user));
88static int handle_onfault (struct trapframe *frame);
89
90static const char *ppc_exception_names[] = {
91	"Reserved 0",				/* 0 */
92	"Reset",				/* 1 */
93	"Machine Check",			/* 2 */
94	"Data Storage Interrupt",		/* 3 */
95	"Instruction Storage Interrupt",	/* 4 */
96	"External Interrupt",			/* 5 */
97	"Alignment Interrupt",			/* 6 */
98	"Program Interrupt",			/* 7 */
99	"Floating Point Unavailable",		/* 8 */
100	"Decrementer Interrupt",		/* 9 */
101	"Reserved",				/* 10 */
102	"Reserved",				/* 11 */
103	"System Call",				/* 12 */
104	"Trace",				/* 13 */
105	"Floating Point Assist",		/* 14 */
106	"Performance Monitoring",		/* 15 */
107	"Instruction TLB Miss",			/* 16 */
108	"Data Load TLB Miss",			/* 17 */
109	"Data Store TLB Miss",			/* 18 */
110	"Instruction Breakpoint",		/* 19 */
111	"System Management Interrupt",		/* 20 */
112	"Reserved 21",				/* 21 */
113	"Reserved 22",				/* 22 */
114	"Reserved 23",				/* 23 */
115	"Reserved 24",				/* 24 */
116	"Reserved 25",				/* 25 */
117	"Reserved 26",				/* 26 */
118	"Reserved 27",				/* 27 */
119	"Reserved 28",				/* 28 */
120	"Reserved 29",				/* 29 */
121	"Reserved 30",				/* 30 */
122	"Reserved 31",				/* 31 */
123	"Reserved 32",				/* 32 */
124	"Reserved 33",				/* 33 */
125	"Reserved 34",				/* 34 */
126	"Reserved 35",				/* 35 */
127	"Reserved 36",				/* 36 */
128	"Reserved 37",				/* 37 */
129	"Reserved 38",				/* 38 */
130	"Reserved 39",				/* 39 */
131	"Reserved 40",				/* 40 */
132	"Reserved 41",				/* 41 */
133	"Reserved 42",				/* 42 */
134	"Reserved 43",				/* 43 */
135	"Reserved 44",				/* 44 */
136	"Reserved 45",				/* 45 */
137	"Reserved 46",				/* 46 */
138	"Reserved 47",				/* 47 */
139};
140
141static void
142printtrap __P((int vector, struct trapframe *frame, int isfatal, int user))
143{
144
145	printf("\n");
146	printf("%s %s trap:\n", isfatal ? "fatal" : "handled",
147	    user ? "user" : "kernel");
148	printf("\n");
149	printf("   exception       = 0x%x (%s)\n", vector >> 8,
150	    ppc_exception_names[vector >> 8]);
151	switch (vector) {
152	case EXC_DSI:
153		printf("   virtual address = 0x%x\n", frame->dar);
154		break;
155	case EXC_ISI:
156		printf("   virtual address = 0x%x\n", frame->srr0);
157		break;
158	}
159	printf("   srr0            = 0x%x", frame->srr0);
160	printf("   curthread       = %p\n", curthread);
161	if (curthread != NULL)
162		printf("          pid = %d, comm = %s\n",
163		    curthread->td_proc->p_pid, curthread->td_proc->p_comm);
164	printf("\n");
165}
166
167static void
168trap_fatal(struct trapframe *frame)
169{
170
171	printtrap(frame->exc, frame, 1, (frame->srr1 & PSL_PR));
172#ifdef DDB
173	if ((debugger_on_panic || db_active) && kdb_trap(frame->exc, 0, frame))
174		return;
175#endif
176	panic("%s Trap", ppc_exception_names[frame->exc >> 8]);
177}
178
179/*
180 * Handles a fatal fault when we have onfault state to recover.  Returns
181 * non-zero if there was onfault recovery state available.
182 */
183static int
184handle_onfault (struct trapframe *frame)
185{
186	struct thread *td;
187	faultbuf *fb;
188
189	td = curthread;
190	fb = td->td_pcb->pcb_onfault;
191	if (fb != NULL) {
192		frame->srr0 = (*fb)[0];
193		frame->fixreg[1] = (*fb)[1];
194		frame->fixreg[2] = (*fb)[2];
195		frame->cr = (*fb)[3];
196		bcopy(&(*fb)[4], &frame->fixreg[13],
197		    19 * sizeof(register_t));
198		return (1);
199	}
200	return (0);
201}
202
203void
204trap(struct trapframe *frame)
205{
206	struct thread *td;
207	struct proc *p;
208	int sig, type, user;
209	u_int sticks, ucode;
210
211	atomic_add_int(&cnt.v_trap, 1);
212
213	td = curthread;
214	p = td->td_proc;
215
216	type = frame->exc;
217	ucode = type;
218	sig = 0;
219	user = (frame->srr1 & PSL_PR);
220	sticks = 0;
221
222	CTR3(KTR_TRAP, "trap: %s type=%s (%s)", p->p_comm,
223	    ppc_exception_names[type >> 8],
224	    user ? "user" : "kernel");
225
226	if (user) {
227		sticks = td->td_kse->ke_sticks;
228		td->td_frame = frame;
229#ifdef DIAGNOSTIC 			/* see the comment in ast() */
230		if (td->td_ucred)
231			panic("trap:thread got a cred while userspace");
232		td->td_ucred = td->td_ucred_cache;
233		td->td_ucred_cache = NULL;
234#endif /* DIAGNOSTIC */
235		if (td->td_ucred != p->p_ucred)
236			cred_update_thread(td);
237
238		/* User Mode Traps */
239		switch (type) {
240		case EXC_TRC:
241			frame->srr1 &= ~PSL_SE;
242			sig = SIGTRAP;
243			break;
244		case EXC_DSI:
245		case EXC_ISI:
246			sig = trap_pfault(frame, 1);
247			break;
248		case EXC_SC:
249			syscall(frame);
250			break;
251		case EXC_FPU:
252			enable_fpu(PCPU_GET(curpcb));
253			frame->srr1 |= PSL_FP;
254			break;
255
256		case EXC_ALI:
257#if 0
258		if (fix_unaligned(p, frame) != 0)
259#endif
260			sig = SIGBUS;
261#if 0
262		else
263			frame->srr0 += 4;
264#endif
265		break;
266
267		case EXC_PGM:
268			/* XXX temporarily */
269			/* XXX: Magic Number? */
270			if (frame->srr1 & 0x0002000)
271				sig = SIGTRAP;
272			else
273				sig = SIGILL;
274			break;
275
276		default:
277			trap_fatal(frame);
278		}
279	} else {
280		/* Kernel Mode Traps */
281
282		KASSERT(cold || td->td_ucred != NULL,
283		    ("kernel trap doesn't have ucred"));
284		switch (type) {
285		case EXC_DSI:
286			if (trap_pfault(frame, 0) == 0)
287				return;
288			break;
289		case EXC_MCHK:
290			if (handle_onfault(frame))
291				return;
292			break;
293		default:
294			trap_fatal(frame);
295		}
296		/* NOTREACHED */
297	}
298	if (sig != 0) {
299		if (p->p_sysent->sv_transtrap != NULL)
300			sig = (p->p_sysent->sv_transtrap)(sig, type);
301		trapsignal(p, sig, ucode);
302	}
303	userret(td, frame, sticks);
304	mtx_assert(&Giant, MA_NOTOWNED);
305#ifdef DIAGNOSTIC 			/* see the comment in ast() */
306	if (td->td_ucred_cache)
307		panic("trap:thread already has cached ucred");
308	td->td_ucred_cache = td->td_ucred;
309       	td->td_ucred = NULL;
310#endif /* DIAGNOSTIC */
311}
312
313void
314syscall(struct trapframe *frame)
315{
316	caddr_t params;
317	struct sysent *callp;
318	struct thread *td;
319	struct proc *p;
320	int error, n;
321	size_t narg;
322	register_t args[10];
323	u_int code;
324
325	td = curthread;
326	p = td->td_proc;
327
328	atomic_add_int(&cnt.v_syscall, 1);
329
330	code = frame->fixreg[0];
331	params = (caddr_t) (frame->fixreg + FIRSTARG);
332
333	if (p->p_sysent->sv_prepsyscall)
334		/*
335		 * The prep code is MP aware.
336		 */
337		(*p->p_sysent->sv_prepsyscall)(frame, args, &code, &params);
338	else if (code == SYS_syscall)
339		/*
340		 * code is first argument,
341		 * followed by actual args.
342		 */
343		code = *params++;
344	else if (code == SYS___syscall) {
345		/*
346		 * Like syscall, but code is a quad,
347		 * so as to maintain quad alignment
348		 * for the rest of the args.
349		 */
350		params++;
351		code = *params++;
352	}
353
354 	if (p->p_sysent->sv_mask)
355 		code &= p->p_sysent->sv_mask;
356
357 	if (code >= p->p_sysent->sv_size)
358 		callp = &p->p_sysent->sv_table[0];
359  	else
360 		callp = &p->p_sysent->sv_table[code];
361
362	narg = callp->sy_narg & SYF_ARGMASK;
363
364	n = NARGREG - (params - (caddr_t)(frame->fixreg + FIRSTARG));
365	if (narg > n * sizeof(register_t)) {
366		bcopy(params, args, n * sizeof(register_t));
367		if (error = copyin(MOREARGS(frame->fixreg[1]), args + n,
368			narg - n * sizeof(register_t))) {
369#ifdef	KTRACE
370			/* Can't get all the arguments! */
371			if (KTRPOINT(p, KTR_SYSCALL))
372				ktrsyscall(p->p_tracep, code, narg, args);
373#endif
374			goto bad;
375		}
376		params = (caddr_t) args;
377	}
378
379	/*
380	 * Try to run the syscall without Giant if the syscall is MP safe.
381	 */
382	if ((callp->sy_narg & SYF_MPSAFE) == 0)
383		mtx_lock(&Giant);
384
385#ifdef	KTRACE
386	if (KTRPOINT(p, KTR_SYSCALL))
387		ktrsyscall(p->p_tracep, code, narg, params);
388#endif
389	td->td_retval[0] = 0;
390	td->td_retval[1] = frame->fixreg[FIRSTARG + 1];
391
392	STOPEVENT(p, S_SCE, narg);
393
394	error = (*callp->sy_call)(td, args);
395	switch (error) {
396	case 0:
397		frame->fixreg[FIRSTARG] = td->td_retval[0];
398		frame->fixreg[FIRSTARG + 1] = td->td_retval[1];
399		/* XXX: Magic number */
400		frame->cr &= ~0x10000000;
401		break;
402	case ERESTART:
403		/*
404		 * Set user's pc back to redo the system call.
405		 */
406		frame->srr0 -= 4;
407		break;
408	case EJUSTRETURN:
409		/* nothing to do */
410		break;
411	default:
412bad:
413		if (p->p_sysent->sv_errsize) {
414			if (error >= p->p_sysent->sv_errsize)
415				error = -1;	/* XXX */
416			else
417				error = p->p_sysent->sv_errtbl[error];
418		}
419		frame->fixreg[FIRSTARG] = error;
420		/* XXX: Magic number: Carry Flag Equivalent? */
421		frame->cr |= 0x10000000;
422		break;
423	}
424
425
426#ifdef	KTRACE
427	if (KTRPOINT(p, KTR_SYSRET))
428		ktrsysret(p->p_tracep, code, error, td->td_retval[0]);
429#endif
430
431	if ((callp->sy_narg & SYF_MPSAFE) == 0)
432		mtx_unlock(&Giant);
433
434	/*
435	 * Does the comment in the i386 code about errno apply here?
436	 */
437	STOPEVENT(p, S_SCX, code);
438
439#ifdef WITNESS
440	if (witness_list(td)) {
441		panic("system call %s returning with mutex(s) held\n",
442		    syscallnames[code]);
443	}
444#endif
445	mtx_assert(&sched_lock, MA_NOTOWNED);
446	mtx_assert(&Giant, MA_NOTOWNED);
447}
448
449static int
450trap_pfault(struct trapframe *frame, int user)
451{
452	vm_offset_t eva, va;
453	struct thread *td;
454	struct proc *p;
455	vm_map_t map;
456	vm_prot_t ftype;
457	int rv;
458
459	td = curthread;
460	p = td->td_proc;
461	if (frame->exc == EXC_ISI) {
462		eva = frame->srr0;
463		ftype = VM_PROT_READ | VM_PROT_EXECUTE;
464	} else {
465		eva = frame->dar;
466		if (frame->dsisr & DSISR_STORE)
467			ftype = VM_PROT_READ | VM_PROT_WRITE;
468		else
469			ftype = VM_PROT_READ;
470	}
471
472	if ((eva >> ADDR_SR_SHFT) != USER_SR) {
473		if (user)
474			return (SIGSEGV);
475		map = kernel_map;
476	} else {
477		u_int user_sr;
478
479		if (p->p_vmspace == NULL)
480			return (SIGSEGV);
481
482		__asm ("mfsr %0, %1"
483		    : "=r"(user_sr)
484		    : "K"(USER_SR));
485		eva &= ADDR_PIDX | ADDR_POFF;
486		eva |= user_sr << ADDR_SR_SHFT;
487		map = &p->p_vmspace->vm_map;
488	}
489	va = trunc_page(eva);
490
491	mtx_lock(&Giant);
492	if (map != kernel_map) {
493		/*
494		 * Keep swapout from messing with us during this
495		 *	critical time.
496		 */
497		PROC_LOCK(p);
498		++p->p_lock;
499		PROC_UNLOCK(p);
500
501		/*
502		 * Grow the stack if necessary
503		 */
504		/* grow_stack returns false only if va falls into
505		 * a growable stack region and the stack growth
506		 * fails.  It returns true if va was not within
507		 * a growable stack region, or if the stack
508		 * growth succeeded.
509		 */
510		if (!grow_stack (p, va))
511			rv = KERN_FAILURE;
512		else
513			/* Fault in the user page: */
514			rv = vm_fault(map, va, ftype,
515			      (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
516						      : VM_FAULT_NORMAL);
517
518		PROC_LOCK(p);
519		--p->p_lock;
520		PROC_UNLOCK(p);
521	} else {
522		/*
523		 * Don't have to worry about process locking or stacks in the
524		 * kernel.
525		 */
526		rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
527	}
528	mtx_unlock(&Giant);
529
530	if (rv == KERN_SUCCESS)
531		return (0);
532
533	if (!user && handle_onfault(frame))
534		return (0);
535
536	return (SIGSEGV);
537}
538
539#if 0 /* XXX: child_return not used */
540/*
541 * XXX: the trapframe return values should be setup in vm_machdep.c in
542 * cpu_fork().
543 */
544void
545child_return(void *arg)
546{
547	struct proc *p;
548	struct trapframe *tf;
549
550	p = arg;
551	tf = trapframe(p);
552
553	tf->fixreg[FIRSTARG] = 0;
554	tf->fixreg[FIRSTARG + 1] = 1;
555	tf->cr &= ~0x10000000;
556	tf->srr1 &= ~PSL_FP;	/* Disable FPU, as we can't be fpuproc */
557#ifdef	KTRACE
558	if (KTRPOINT(p, KTR_SYSRET))
559		ktrsysret(p, SYS_fork, 0, 0);
560#endif
561	/* Profiling?							XXX */
562	curcpu()->ci_schedstate.spc_curpriority = p->p_priority;
563}
564#endif
565
566static __inline void
567setusr(int content)
568{
569
570	__asm __volatile ("isync; mtsr %0,%1; isync"
571		          :: "n"(USER_SR), "r"(content));
572}
573
574int
575copyin(udaddr, kaddr, len)
576	const void *udaddr;
577	void *kaddr;
578	size_t len;
579{
580	const char *up;
581	char *kp;
582	char *p;
583	size_t l;
584	faultbuf env;
585	uint segment;
586
587	up = udaddr;
588	kp = kaddr;
589
590#if 0
591	if (setfault(env)) {
592		PCPU_GET(curpcb)->pcb_onfault = 0;
593		return EFAULT;
594	}
595#endif
596	while (len > 0) {
597		p = (char *)USER_ADDR + ((u_int)up & ~SEGMENT_MASK);
598		l = ((char *)USER_ADDR + SEGMENT_LENGTH) - p;
599		if (l > len)
600			l = len;
601		segment = (uint)up >> ADDR_SR_SHFT;
602		setusr(PCPU_GET(curpcb)->pcb_pm->pm_sr[segment]);
603		bcopy(p, kp, l);
604		up += l;
605		kp += l;
606		len -= l;
607	}
608	PCPU_GET(curpcb)->pcb_onfault = 0;
609	return 0;
610}
611
612int
613copyout(kaddr, udaddr, len)
614	const void *kaddr;
615	void *udaddr;
616	size_t len;
617{
618	const char *kp;
619	char *up;
620	char *p;
621	size_t l;
622	faultbuf env;
623	uint segment;
624
625	kp = kaddr;
626	up = udaddr;
627
628#if 0
629	if (setfault(env)) {
630		PCPU_GET(curpcb)->pcb_onfault = 0;
631		return EFAULT;
632	}
633#endif
634	while (len > 0) {
635		p = (char *)USER_ADDR + ((u_int)up & ~SEGMENT_MASK);
636		l = ((char *)USER_ADDR + SEGMENT_LENGTH) - p;
637		if (l > len)
638			l = len;
639		segment = (u_int)up >> ADDR_SR_SHFT;
640		setusr(PCPU_GET(curpcb)->pcb_pm->pm_sr[segment]);
641		bcopy(kp, p, l);
642		up += l;
643		kp += l;
644		len -= l;
645	}
646	PCPU_GET(curpcb)->pcb_onfault = 0;
647	return 0;
648}
649
650#if 0 /* XXX: not used yet */
651/*
652 * kcopy(const void *src, void *dst, size_t len);
653 *
654 * Copy len bytes from src to dst, aborting if we encounter a fatal
655 * page fault.
656 *
657 * kcopy() _must_ save and restore the old fault handler since it is
658 * called by uiomove(), which may be in the path of servicing a non-fatal
659 * page fault.
660 */
661int
662kcopy(const void *src, void *dst, size_t len)
663{
664	faultbuf env, *oldfault;
665
666	oldfault = PCPU_GET(curpcb)->pcb_onfault;
667	if (setfault(env)) {
668		PCPU_GET(curpcb)->pcb_onfault = oldfault;
669		return EFAULT;
670	}
671
672	bcopy(src, dst, len);
673
674	PCPU_GET(curpcb)->pcb_onfault = oldfault;
675	return 0;
676}
677
678int
679badaddr(void *addr, size_t size)
680{
681
682	return badaddr_read(addr, size, NULL);
683}
684
685int
686badaddr_read(void *addr, size_t size, int *rptr)
687{
688	faultbuf env;
689	int x;
690
691	/* Get rid of any stale machine checks that have been waiting.  */
692	__asm __volatile ("sync; isync");
693
694	if (setfault(env)) {
695		PCPU_GET(curpcb)->pcb_onfault = 0;
696		__asm __volatile ("sync");
697		return 1;
698	}
699
700	__asm __volatile ("sync");
701
702	switch (size) {
703	case 1:
704		x = *(volatile int8_t *)addr;
705		break;
706	case 2:
707		x = *(volatile int16_t *)addr;
708		break;
709	case 4:
710		x = *(volatile int32_t *)addr;
711		break;
712	default:
713		panic("badaddr: invalid size (%d)", size);
714	}
715
716	/* Make sure we took the machine check, if we caused one. */
717	__asm __volatile ("sync; isync");
718
719	PCPU_GET(curpcb)->pcb_onfault = 0;
720	__asm __volatile ("sync");	/* To be sure. */
721
722	/* Use the value to avoid reorder. */
723	if (rptr)
724		*rptr = x;
725
726	return 0;
727}
728#endif
729
730/*
731 * For now, this only deals with the particular unaligned access case
732 * that gcc tends to generate.  Eventually it should handle all of the
733 * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
734 */
735
736#if 0 /* XXX: Not used yet */
737static int
738fix_unaligned(p, frame)
739	struct proc *p;
740	struct trapframe *frame;
741{
742	int indicator;
743
744	indicator = EXC_ALI_OPCODE_INDICATOR(frame->dsisr);
745
746	switch (indicator) {
747	case EXC_ALI_LFD:
748	case EXC_ALI_STFD:
749		{
750			int reg = EXC_ALI_RST(frame->dsisr);
751			double *fpr = &p->p_addr->u_pcb.pcb_fpu.fpr[reg];
752
753			/* Juggle the FPU to ensure that we've initialized
754			 * the FPRs, and that their current state is in
755			 * the PCB.
756			 */
757			if (!(pcb->pcb_flags & PCB_FPU))
758				enable_fpu(PCPU_GET(curpcb));
759				frame->srr1 |= PSL_FP;
760			}
761			save_fpu(PCPU_GET(curpcb));
762
763			if (indicator == EXC_ALI_LFD) {
764				if (copyin((void *)frame->dar, fpr,
765				    sizeof(double)) != 0)
766					return -1;
767				if (!(pcb->pcb_flags & PCB_FPU))
768					enable_fpu(PCPU_GET(curpcb));
769					frame->srr1 |= PSL_FP;
770				}
771			} else {
772				if (copyout(fpr, (void *)frame->dar,
773				    sizeof(double)) != 0)
774					return -1;
775			}
776			return 0;
777		}
778		break;
779	}
780
781	return -1;
782}
783#endif
784