trap.c revision 92824
1/*
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 *    derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $NetBSD: trap.c,v 1.26 2000/05/27 00:40:40 sommerfeld Exp $
32 */
33
34#ifndef lint
35static const char rcsid[] =
36  "$FreeBSD: head/sys/powerpc/aim/trap.c 92824 2002-03-20 21:09:09Z jhb $";
37#endif /* not lint */
38
39#include "opt_ddb.h"
40#include "opt_ktrace.h"
41
42#include <sys/param.h>
43#include <sys/proc.h>
44#include <sys/ktr.h>
45#include <sys/lock.h>
46#include <sys/mutex.h>
47#include <sys/pioctl.h>
48#include <sys/reboot.h>
49#include <sys/syscall.h>
50#include <sys/sysent.h>
51#include <sys/systm.h>
52#include <sys/uio.h>
53#include <sys/user.h>
54#ifdef KTRACE
55#include <sys/ktrace.h>
56#endif
57#include <sys/vmmeter.h>
58
59#include <vm/vm.h>
60#include <vm/pmap.h>
61#include <vm/vm_extern.h>
62#include <vm/vm_kern.h>
63#include <vm/vm_map.h>
64#include <vm/vm_param.h>
65
66#include <machine/cpu.h>
67#include <machine/frame.h>
68#include <machine/pcb.h>
69#include <machine/psl.h>
70#include <machine/trap.h>
71
72/* These definitions should probably be somewhere else				XXX */
73#define	FIRSTARG	3		/* first argument is in reg 3 */
74#define	NARGREG		8		/* 8 args are in registers */
75#define	MOREARGS(sp)	((caddr_t)((int)(sp) + 8)) /* more args go here */
76
77#ifdef WITNESS
78extern char *syscallnames[];
79#endif
80
81#if 0 /* XXX: not used yet */
82static int fix_unaligned __P((struct proc *p, struct trapframe *frame));
83#endif
84static void trap_fatal __P((struct trapframe *frame));
85static void printtrap __P((int vector, struct trapframe *frame, int isfatal,
86			      int user));
87static int trap_pfault __P((struct trapframe *frame, int user));
88static int handle_onfault (struct trapframe *frame);
89
90static const char *ppc_exception_names[] = {
91	"reserved 0",				/* 0 */
92	"reset",				/* 1 */
93	"machine check",			/* 2 */
94	"data storage interrupt",		/* 3 */
95	"instruction storage interrupt",	/* 4 */
96	"external interrupt",			/* 5 */
97	"alignment interrupt",			/* 6 */
98	"program interrupt",			/* 7 */
99	"floating point unavailable",		/* 8 */
100	"decrementer interrupt",		/* 9 */
101	"reserved",				/* 10 */
102	"reserved",				/* 11 */
103	"system call",				/* 12 */
104	"trace",				/* 13 */
105	"floating point assist",		/* 14 */
106	"performance monitoring",		/* 15 */
107	"instruction tlb miss",			/* 16 */
108	"data load tlb miss",			/* 17 */
109	"data store tlb miss",			/* 18 */
110	"instruction breakpoint",		/* 19 */
111	"system management interrupt",		/* 20 */
112	"reserved 21",				/* 21 */
113	"reserved 22",				/* 22 */
114	"reserved 23",				/* 23 */
115	"reserved 24",				/* 24 */
116	"reserved 25",				/* 25 */
117	"reserved 26",				/* 26 */
118	"reserved 27",				/* 27 */
119	"reserved 28",				/* 28 */
120	"reserved 29",				/* 29 */
121	"reserved 30",				/* 30 */
122	"reserved 31",				/* 31 */
123	"reserved 32",				/* 32 */
124	"reserved 33",				/* 33 */
125	"reserved 34",				/* 34 */
126	"reserved 35",				/* 35 */
127	"reserved 36",				/* 36 */
128	"reserved 37",				/* 37 */
129	"reserved 38",				/* 38 */
130	"reserved 39",				/* 39 */
131	"reserved 40",				/* 40 */
132	"reserved 41",				/* 41 */
133	"reserved 42",				/* 42 */
134	"reserved 43",				/* 43 */
135	"reserved 44",				/* 44 */
136	"reserved 45",				/* 45 */
137	"reserved 46",				/* 46 */
138	"reserved 47",				/* 47 */
139};
140
141static void
142printtrap __P((int vector, struct trapframe *frame, int isfatal, int user))
143{
144
145	printf("\n");
146	printf("%s %s trap:\n", isfatal ? "fatal" : "handled",
147	    user ? "user" : "kernel");
148	printf("\n");
149	printf("   exception       = 0x%x (%s)\n", vector >> 8,
150	    ppc_exception_names[vector >> 8]);
151	switch (vector) {
152	case EXC_DSI:
153		printf("   virtual address = 0x%x\n", frame->dar);
154		break;
155	case EXC_ISI:
156		printf("   virtual address = 0x%x\n", frame->srr0);
157		break;
158	}
159	printf("   srr0            = 0x%x", frame->srr0);
160	printf("   curthread       = %p\n", curthread);
161	if (curthread != NULL)
162		printf("          pid = %d, comm = %s\n",
163		    curthread->td_proc->p_pid, curthread->td_proc->p_comm);
164	printf("\n");
165}
166
167static void
168trap_fatal(struct trapframe *frame)
169{
170
171	printtrap(frame->exc, frame, 1, (frame->srr1 & PSL_PR));
172#ifdef DDB
173	if ((debugger_on_panic || db_active) && kdb_trap(frame->exc, 0, frame))
174		return;
175#endif
176	panic("%s Trap", ppc_exception_names[frame->exc >> 8]);
177}
178
179/*
180 * Handles a fatal fault when we have onfault state to recover.  Returns
181 * non-zero if there was onfault recovery state available.
182 */
183static int
184handle_onfault (struct trapframe *frame)
185{
186	struct thread *td;
187	faultbuf *fb;
188
189	td = curthread;
190	fb = td->td_pcb->pcb_onfault;
191	if (fb != NULL) {
192		frame->srr0 = (*fb)[0];
193		frame->fixreg[1] = (*fb)[1];
194		frame->fixreg[2] = (*fb)[2];
195		frame->cr = (*fb)[3];
196		bcopy(&(*fb)[4], &frame->fixreg[13],
197		    19 * sizeof(register_t));
198		return (1);
199	}
200	return (0);
201}
202
203void
204trap(struct trapframe *frame)
205{
206	struct thread *td;
207	struct proc *p;
208	int sig, type, user;
209	u_int sticks, ucode;
210
211	atomic_add_int(&cnt.v_trap, 1);
212
213	td = curthread;
214	p = td->td_proc;
215
216	type = frame->exc;
217	ucode = type;
218	sig = 0;
219	user = (frame->srr1 & PSL_PR);
220	sticks = 0;
221
222	CTR3(KTR_TRAP, "trap: %s type=%s (%s)", p->p_comm,
223	    ppc_exception_names[type >> 8],
224	    user ? "user" : "kernel");
225
226	if (user) {
227		sticks = td->td_kse->ke_sticks;
228		td->td_frame = frame;
229		if (td->td_ucred != p->p_ucred)
230			cred_update_thread(td);
231
232		/* User Mode Traps */
233		switch (type) {
234		case EXC_TRC:
235			frame->srr1 &= ~PSL_SE;
236			sig = SIGTRAP;
237			break;
238		case EXC_DSI:
239		case EXC_ISI:
240			sig = trap_pfault(frame, 1);
241			break;
242		case EXC_SC:
243			syscall(frame);
244			break;
245		case EXC_FPU:
246			enable_fpu(PCPU_GET(curpcb));
247			frame->srr1 |= PSL_FP;
248			break;
249
250		case EXC_ALI:
251#if 0
252		if (fix_unaligned(p, frame) != 0)
253#endif
254			sig = SIGBUS;
255#if 0
256		else
257			frame->srr0 += 4;
258#endif
259		break;
260
261		case EXC_PGM:
262			/* XXX temporarily */
263			/* XXX: Magic Number? */
264			if (frame->srr1 & 0x0002000)
265				sig = SIGTRAP;
266			else
267				sig = SIGILL;
268			break;
269
270		default:
271			trap_fatal(frame);
272		}
273	} else {
274		/* Kernel Mode Traps */
275
276		KASSERT(cold || td->td_ucred != NULL,
277		    ("kernel trap doesn't have ucred"));
278		switch (type) {
279		case EXC_DSI:
280			if (trap_pfault(frame, 0) == 0)
281				return;
282			break;
283		case EXC_MCHK:
284			if (handle_onfault(frame))
285				return;
286			break;
287		default:
288			trap_fatal(frame);
289		}
290		/* NOTREACHED */
291	}
292	if (sig != 0) {
293		if (p->p_sysent->sv_transtrap != NULL)
294			sig = (p->p_sysent->sv_transtrap)(sig, type);
295		trapsignal(p, sig, ucode);
296	}
297	userret(td, frame, sticks);
298	mtx_assert(&Giant, MA_NOTOWNED);
299#ifdef DIAGNOSTIC
300	cred_free_thread(td);
301#endif
302}
303
304void
305syscall(struct trapframe *frame)
306{
307	caddr_t params;
308	struct sysent *callp;
309	struct thread *td;
310	struct proc *p;
311	int error, n;
312	size_t narg;
313	register_t args[10];
314	u_int code;
315
316	td = curthread;
317	p = td->td_proc;
318
319	atomic_add_int(&cnt.v_syscall, 1);
320
321	code = frame->fixreg[0];
322	params = (caddr_t) (frame->fixreg + FIRSTARG);
323
324	if (p->p_sysent->sv_prepsyscall)
325		/*
326		 * The prep code is MP aware.
327		 */
328		(*p->p_sysent->sv_prepsyscall)(frame, args, &code, &params);
329	else if (code == SYS_syscall)
330		/*
331		 * code is first argument,
332		 * followed by actual args.
333		 */
334		code = *params++;
335	else if (code == SYS___syscall) {
336		/*
337		 * Like syscall, but code is a quad,
338		 * so as to maintain quad alignment
339		 * for the rest of the args.
340		 */
341		params++;
342		code = *params++;
343	}
344
345 	if (p->p_sysent->sv_mask)
346 		code &= p->p_sysent->sv_mask;
347
348 	if (code >= p->p_sysent->sv_size)
349 		callp = &p->p_sysent->sv_table[0];
350  	else
351 		callp = &p->p_sysent->sv_table[code];
352
353	narg = callp->sy_narg & SYF_ARGMASK;
354
355	n = NARGREG - (params - (caddr_t)(frame->fixreg + FIRSTARG));
356	if (narg > n * sizeof(register_t)) {
357		bcopy(params, args, n * sizeof(register_t));
358		if (error = copyin(MOREARGS(frame->fixreg[1]), args + n,
359			narg - n * sizeof(register_t))) {
360#ifdef	KTRACE
361			/* Can't get all the arguments! */
362			if (KTRPOINT(p, KTR_SYSCALL))
363				ktrsyscall(p->p_tracep, code, narg, args);
364#endif
365			goto bad;
366		}
367		params = (caddr_t) args;
368	}
369
370	/*
371	 * Try to run the syscall without Giant if the syscall is MP safe.
372	 */
373	if ((callp->sy_narg & SYF_MPSAFE) == 0)
374		mtx_lock(&Giant);
375
376#ifdef	KTRACE
377	if (KTRPOINT(p, KTR_SYSCALL))
378		ktrsyscall(p->p_tracep, code, narg, params);
379#endif
380	td->td_retval[0] = 0;
381	td->td_retval[1] = frame->fixreg[FIRSTARG + 1];
382
383	STOPEVENT(p, S_SCE, narg);
384
385	error = (*callp->sy_call)(td, args);
386	switch (error) {
387	case 0:
388		frame->fixreg[FIRSTARG] = td->td_retval[0];
389		frame->fixreg[FIRSTARG + 1] = td->td_retval[1];
390		/* XXX: Magic number */
391		frame->cr &= ~0x10000000;
392		break;
393	case ERESTART:
394		/*
395		 * Set user's pc back to redo the system call.
396		 */
397		frame->srr0 -= 4;
398		break;
399	case EJUSTRETURN:
400		/* nothing to do */
401		break;
402	default:
403bad:
404		if (p->p_sysent->sv_errsize) {
405			if (error >= p->p_sysent->sv_errsize)
406				error = -1;	/* XXX */
407			else
408				error = p->p_sysent->sv_errtbl[error];
409		}
410		frame->fixreg[FIRSTARG] = error;
411		/* XXX: Magic number: Carry Flag Equivalent? */
412		frame->cr |= 0x10000000;
413		break;
414	}
415
416
417#ifdef	KTRACE
418	if (KTRPOINT(p, KTR_SYSRET))
419		ktrsysret(p->p_tracep, code, error, td->td_retval[0]);
420#endif
421
422	if ((callp->sy_narg & SYF_MPSAFE) == 0)
423		mtx_unlock(&Giant);
424
425	/*
426	 * Does the comment in the i386 code about errno apply here?
427	 */
428	STOPEVENT(p, S_SCX, code);
429
430#ifdef WITNESS
431	if (witness_list(td)) {
432		panic("system call %s returning with mutex(s) held\n",
433		    syscallnames[code]);
434	}
435#endif
436	mtx_assert(&sched_lock, MA_NOTOWNED);
437	mtx_assert(&Giant, MA_NOTOWNED);
438}
439
440static int
441trap_pfault(struct trapframe *frame, int user)
442{
443	vm_offset_t eva, va;
444	struct thread *td;
445	struct proc *p;
446	vm_map_t map;
447	vm_prot_t ftype;
448	int rv;
449
450	td = curthread;
451	p = td->td_proc;
452	if (frame->exc == EXC_ISI) {
453		eva = frame->srr0;
454		ftype = VM_PROT_READ | VM_PROT_EXECUTE;
455	} else {
456		eva = frame->dar;
457		if (frame->dsisr & DSISR_STORE)
458			ftype = VM_PROT_READ | VM_PROT_WRITE;
459		else
460			ftype = VM_PROT_READ;
461	}
462
463	if ((eva >> ADDR_SR_SHFT) != USER_SR) {
464		if (user)
465			return (SIGSEGV);
466		map = kernel_map;
467	} else {
468		u_int user_sr;
469
470		if (p->p_vmspace == NULL)
471			return (SIGSEGV);
472
473		__asm ("mfsr %0, %1"
474		    : "=r"(user_sr)
475		    : "K"(USER_SR));
476		eva &= ADDR_PIDX | ADDR_POFF;
477		eva |= user_sr << ADDR_SR_SHFT;
478		map = &p->p_vmspace->vm_map;
479	}
480	va = trunc_page(eva);
481
482	mtx_lock(&Giant);
483	if (map != kernel_map) {
484		/*
485		 * Keep swapout from messing with us during this
486		 *	critical time.
487		 */
488		PROC_LOCK(p);
489		++p->p_lock;
490		PROC_UNLOCK(p);
491
492		/*
493		 * Grow the stack if necessary
494		 */
495		/* grow_stack returns false only if va falls into
496		 * a growable stack region and the stack growth
497		 * fails.  It returns true if va was not within
498		 * a growable stack region, or if the stack
499		 * growth succeeded.
500		 */
501		if (!grow_stack (p, va))
502			rv = KERN_FAILURE;
503		else
504			/* Fault in the user page: */
505			rv = vm_fault(map, va, ftype,
506			      (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY
507						      : VM_FAULT_NORMAL);
508
509		PROC_LOCK(p);
510		--p->p_lock;
511		PROC_UNLOCK(p);
512	} else {
513		/*
514		 * Don't have to worry about process locking or stacks in the
515		 * kernel.
516		 */
517		rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
518	}
519	mtx_unlock(&Giant);
520
521	if (rv == KERN_SUCCESS)
522		return (0);
523
524	if (!user && handle_onfault(frame))
525		return (0);
526
527	return (SIGSEGV);
528}
529
530#if 0 /* XXX: child_return not used */
531/*
532 * XXX: the trapframe return values should be setup in vm_machdep.c in
533 * cpu_fork().
534 */
535void
536child_return(void *arg)
537{
538	struct proc *p;
539	struct trapframe *tf;
540
541	p = arg;
542	tf = trapframe(p);
543
544	tf->fixreg[FIRSTARG] = 0;
545	tf->fixreg[FIRSTARG + 1] = 1;
546	tf->cr &= ~0x10000000;
547	tf->srr1 &= ~PSL_FP;	/* Disable FPU, as we can't be fpuproc */
548#ifdef	KTRACE
549	if (KTRPOINT(p, KTR_SYSRET))
550		ktrsysret(p, SYS_fork, 0, 0);
551#endif
552	/* Profiling?							XXX */
553	curcpu()->ci_schedstate.spc_curpriority = p->p_priority;
554}
555#endif
556
557static __inline void
558setusr(int content)
559{
560
561	__asm __volatile ("isync; mtsr %0,%1; isync"
562		          :: "n"(USER_SR), "r"(content));
563}
564
565int
566copyin(udaddr, kaddr, len)
567	const void *udaddr;
568	void *kaddr;
569	size_t len;
570{
571	const char	*up;
572	char		*kp, *p;
573	size_t		l;
574	faultbuf	env;
575	uint		segment;
576	struct thread	*td;
577	pmap_t		pm;
578
579	up = udaddr;
580	kp = kaddr;
581
582#if 0
583	if (setfault(env)) {
584		PCPU_GET(curpcb)->pcb_onfault = 0;
585		return EFAULT;
586	}
587#endif
588	td = PCPU_GET(curthread);
589	pm = &td->td_proc->p_vmspace->vm_pmap;
590	while (len > 0) {
591		p = (char *)USER_ADDR + ((u_int)up & ~SEGMENT_MASK);
592		l = ((char *)USER_ADDR + SEGMENT_LENGTH) - p;
593		if (l > len)
594			l = len;
595		segment = (uint)up >> ADDR_SR_SHFT;
596		setusr(pm->pm_sr[segment]);
597		bcopy(p, kp, l);
598		up += l;
599		kp += l;
600		len -= l;
601	}
602	PCPU_GET(curpcb)->pcb_onfault = 0;
603	return 0;
604}
605
606int
607copyout(kaddr, udaddr, len)
608	const void *kaddr;
609	void *udaddr;
610	size_t len;
611{
612	const char	*kp;
613	char		*up, *p;
614	size_t		l;
615	faultbuf	env;
616	unsigned int	segment;
617	struct thread	*td;
618	pmap_t		pm;
619
620	kp = kaddr;
621	up = udaddr;
622
623#if 0
624	if (setfault(env)) {
625		PCPU_GET(curpcb)->pcb_onfault = 0;
626		return EFAULT;
627	}
628#endif
629	td = PCPU_GET(curthread);
630	pm = &td->td_proc->p_vmspace->vm_pmap;
631	while (len > 0) {
632		p = (char *)USER_ADDR + ((u_int)up & ~SEGMENT_MASK);
633		l = ((char *)USER_ADDR + SEGMENT_LENGTH) - p;
634		if (l > len)
635			l = len;
636		segment = (u_int)up >> ADDR_SR_SHFT;
637		setusr(pm->pm_sr[segment]);
638		bcopy(kp, p, l);
639		up += l;
640		kp += l;
641		len -= l;
642	}
643	PCPU_GET(curpcb)->pcb_onfault = 0;
644	return 0;
645}
646
647#if 0 /* XXX: not used yet */
648/*
649 * kcopy(const void *src, void *dst, size_t len);
650 *
651 * Copy len bytes from src to dst, aborting if we encounter a fatal
652 * page fault.
653 *
654 * kcopy() _must_ save and restore the old fault handler since it is
655 * called by uiomove(), which may be in the path of servicing a non-fatal
656 * page fault.
657 */
658int
659kcopy(const void *src, void *dst, size_t len)
660{
661	faultbuf env, *oldfault;
662
663	oldfault = PCPU_GET(curpcb)->pcb_onfault;
664	if (setfault(env)) {
665		PCPU_GET(curpcb)->pcb_onfault = oldfault;
666		return EFAULT;
667	}
668
669	bcopy(src, dst, len);
670
671	PCPU_GET(curpcb)->pcb_onfault = oldfault;
672	return 0;
673}
674
675int
676badaddr(void *addr, size_t size)
677{
678
679	return badaddr_read(addr, size, NULL);
680}
681
682int
683badaddr_read(void *addr, size_t size, int *rptr)
684{
685	faultbuf env;
686	int x;
687
688	/* Get rid of any stale machine checks that have been waiting.  */
689	__asm __volatile ("sync; isync");
690
691	if (setfault(env)) {
692		PCPU_GET(curpcb)->pcb_onfault = 0;
693		__asm __volatile ("sync");
694		return 1;
695	}
696
697	__asm __volatile ("sync");
698
699	switch (size) {
700	case 1:
701		x = *(volatile int8_t *)addr;
702		break;
703	case 2:
704		x = *(volatile int16_t *)addr;
705		break;
706	case 4:
707		x = *(volatile int32_t *)addr;
708		break;
709	default:
710		panic("badaddr: invalid size (%d)", size);
711	}
712
713	/* Make sure we took the machine check, if we caused one. */
714	__asm __volatile ("sync; isync");
715
716	PCPU_GET(curpcb)->pcb_onfault = 0;
717	__asm __volatile ("sync");	/* To be sure. */
718
719	/* Use the value to avoid reorder. */
720	if (rptr)
721		*rptr = x;
722
723	return 0;
724}
725#endif
726
727/*
728 * For now, this only deals with the particular unaligned access case
729 * that gcc tends to generate.  Eventually it should handle all of the
730 * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
731 */
732
733#if 0 /* XXX: Not used yet */
734static int
735fix_unaligned(p, frame)
736	struct proc *p;
737	struct trapframe *frame;
738{
739	int indicator;
740
741	indicator = EXC_ALI_OPCODE_INDICATOR(frame->dsisr);
742
743	switch (indicator) {
744	case EXC_ALI_LFD:
745	case EXC_ALI_STFD:
746		{
747			int reg = EXC_ALI_RST(frame->dsisr);
748			double *fpr = &p->p_addr->u_pcb.pcb_fpu.fpr[reg];
749
750			/* Juggle the FPU to ensure that we've initialized
751			 * the FPRs, and that their current state is in
752			 * the PCB.
753			 */
754			if (!(pcb->pcb_flags & PCB_FPU))
755				enable_fpu(PCPU_GET(curpcb));
756				frame->srr1 |= PSL_FP;
757			}
758			save_fpu(PCPU_GET(curpcb));
759
760			if (indicator == EXC_ALI_LFD) {
761				if (copyin((void *)frame->dar, fpr,
762				    sizeof(double)) != 0)
763					return -1;
764				if (!(pcb->pcb_flags & PCB_FPU))
765					enable_fpu(PCPU_GET(curpcb));
766					frame->srr1 |= PSL_FP;
767				}
768			} else {
769				if (copyout(fpr, (void *)frame->dar,
770				    sizeof(double)) != 0)
771					return -1;
772			}
773			return 0;
774		}
775		break;
776	}
777
778	return -1;
779}
780#endif
781