trap.c revision 276142
1/*-
2 * Copyright (C) 1995, 1996 Wolfgang Solfrank.
3 * Copyright (C) 1995, 1996 TooLs GmbH.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 3. All advertising materials mentioning features or use of this software
15 *    must display the following acknowledgement:
16 *	This product includes software developed by TooLs GmbH.
17 * 4. The name of TooLs GmbH may not be used to endorse or promote products
18 *    derived from this software without specific prior written permission.
19 *
20 * THIS SOFTWARE IS PROVIDED BY TOOLS GMBH ``AS IS'' AND ANY EXPRESS OR
21 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
22 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
23 * IN NO EVENT SHALL TOOLS GMBH BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
29 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 *
31 * $NetBSD: trap.c,v 1.58 2002/03/04 04:07:35 dbj Exp $
32 */
33
34#include <sys/cdefs.h>
35__FBSDID("$FreeBSD: head/sys/powerpc/aim/trap.c 276142 2014-12-23 15:38:19Z markj $");
36
37#include <sys/param.h>
38#include <sys/kdb.h>
39#include <sys/proc.h>
40#include <sys/ktr.h>
41#include <sys/lock.h>
42#include <sys/mutex.h>
43#include <sys/pioctl.h>
44#include <sys/ptrace.h>
45#include <sys/reboot.h>
46#include <sys/syscall.h>
47#include <sys/sysent.h>
48#include <sys/systm.h>
49#include <sys/kernel.h>
50#include <sys/uio.h>
51#include <sys/signalvar.h>
52#include <sys/vmmeter.h>
53
54#include <security/audit/audit.h>
55
56#include <vm/vm.h>
57#include <vm/pmap.h>
58#include <vm/vm_extern.h>
59#include <vm/vm_param.h>
60#include <vm/vm_kern.h>
61#include <vm/vm_map.h>
62#include <vm/vm_page.h>
63
64#include <machine/_inttypes.h>
65#include <machine/altivec.h>
66#include <machine/cpu.h>
67#include <machine/db_machdep.h>
68#include <machine/fpu.h>
69#include <machine/frame.h>
70#include <machine/pcb.h>
71#include <machine/pmap.h>
72#include <machine/psl.h>
73#include <machine/trap.h>
74#include <machine/spr.h>
75#include <machine/sr.h>
76
77static void	trap_fatal(struct trapframe *frame);
78static void	printtrap(u_int vector, struct trapframe *frame, int isfatal,
79		    int user);
80static int	trap_pfault(struct trapframe *frame, int user);
81static int	fix_unaligned(struct thread *td, struct trapframe *frame);
82static int	handle_onfault(struct trapframe *frame);
83static void	syscall(struct trapframe *frame);
84
85#ifdef __powerpc64__
86       void	handle_kernel_slb_spill(int, register_t, register_t);
87static int	handle_user_slb_spill(pmap_t pm, vm_offset_t addr);
88extern int	n_slbs;
89#endif
90
91struct powerpc_exception {
92	u_int	vector;
93	char	*name;
94};
95
96#ifdef KDTRACE_HOOKS
97#include <sys/dtrace_bsd.h>
98
99int (*dtrace_invop_jump_addr)(struct trapframe *);
100#endif
101
102static struct powerpc_exception powerpc_exceptions[] = {
103	{ 0x0100, "system reset" },
104	{ 0x0200, "machine check" },
105	{ 0x0300, "data storage interrupt" },
106	{ 0x0380, "data segment exception" },
107	{ 0x0400, "instruction storage interrupt" },
108	{ 0x0480, "instruction segment exception" },
109	{ 0x0500, "external interrupt" },
110	{ 0x0600, "alignment" },
111	{ 0x0700, "program" },
112	{ 0x0800, "floating-point unavailable" },
113	{ 0x0900, "decrementer" },
114	{ 0x0c00, "system call" },
115	{ 0x0d00, "trace" },
116	{ 0x0e00, "floating-point assist" },
117	{ 0x0f00, "performance monitoring" },
118	{ 0x0f20, "altivec unavailable" },
119	{ 0x1000, "instruction tlb miss" },
120	{ 0x1100, "data load tlb miss" },
121	{ 0x1200, "data store tlb miss" },
122	{ 0x1300, "instruction breakpoint" },
123	{ 0x1400, "system management" },
124	{ 0x1600, "altivec assist" },
125	{ 0x1700, "thermal management" },
126	{ 0x2000, "run mode/trace" },
127	{ 0x3000, NULL }
128};
129
130static const char *
131trapname(u_int vector)
132{
133	struct	powerpc_exception *pe;
134
135	for (pe = powerpc_exceptions; pe->vector != 0x3000; pe++) {
136		if (pe->vector == vector)
137			return (pe->name);
138	}
139
140	return ("unknown");
141}
142
143void
144trap(struct trapframe *frame)
145{
146	struct thread	*td;
147	struct proc	*p;
148#ifdef KDTRACE_HOOKS
149	uint32_t inst;
150#endif
151	int		sig, type, user;
152	u_int		ucode;
153	ksiginfo_t	ksi;
154
155	PCPU_INC(cnt.v_trap);
156
157	td = curthread;
158	p = td->td_proc;
159
160	type = ucode = frame->exc;
161	sig = 0;
162	user = frame->srr1 & PSL_PR;
163
164	CTR3(KTR_TRAP, "trap: %s type=%s (%s)", td->td_name,
165	    trapname(type), user ? "user" : "kernel");
166
167#ifdef KDTRACE_HOOKS
168	/*
169	 * A trap can occur while DTrace executes a probe. Before
170	 * executing the probe, DTrace blocks re-scheduling and sets
171	 * a flag in its per-cpu flags to indicate that it doesn't
172	 * want to fault. On returning from the probe, the no-fault
173	 * flag is cleared and finally re-scheduling is enabled.
174	 *
175	 * If the DTrace kernel module has registered a trap handler,
176	 * call it and if it returns non-zero, assume that it has
177	 * handled the trap and modified the trap frame so that this
178	 * function can return normally.
179	 */
180	if (dtrace_trap_func != NULL && (*dtrace_trap_func)(frame, type) != 0)
181		return;
182#endif
183
184	if (user) {
185		td->td_pticks = 0;
186		td->td_frame = frame;
187		if (td->td_ucred != p->p_ucred)
188			cred_update_thread(td);
189
190		/* User Mode Traps */
191		switch (type) {
192		case EXC_RUNMODETRC:
193		case EXC_TRC:
194			frame->srr1 &= ~PSL_SE;
195			sig = SIGTRAP;
196			ucode = TRAP_TRACE;
197			break;
198
199#ifdef __powerpc64__
200		case EXC_ISE:
201		case EXC_DSE:
202			if (handle_user_slb_spill(&p->p_vmspace->vm_pmap,
203			    (type == EXC_ISE) ? frame->srr0 :
204			    frame->cpu.aim.dar) != 0) {
205				sig = SIGSEGV;
206				ucode = SEGV_MAPERR;
207			}
208			break;
209#endif
210		case EXC_DSI:
211		case EXC_ISI:
212			sig = trap_pfault(frame, 1);
213			if (sig == SIGSEGV)
214				ucode = SEGV_MAPERR;
215			break;
216
217		case EXC_SC:
218			syscall(frame);
219			break;
220
221		case EXC_FPU:
222			KASSERT((td->td_pcb->pcb_flags & PCB_FPU) != PCB_FPU,
223			    ("FPU already enabled for thread"));
224			enable_fpu(td);
225			break;
226
227		case EXC_VEC:
228			KASSERT((td->td_pcb->pcb_flags & PCB_VEC) != PCB_VEC,
229			    ("Altivec already enabled for thread"));
230			enable_vec(td);
231			break;
232
233		case EXC_VECAST_G4:
234		case EXC_VECAST_G5:
235			/*
236			 * We get a VPU assist exception for IEEE mode
237			 * vector operations on denormalized floats.
238			 * Emulating this is a giant pain, so for now,
239			 * just switch off IEEE mode and treat them as
240			 * zero.
241			 */
242
243			save_vec(td);
244			td->td_pcb->pcb_vec.vscr |= ALTIVEC_VSCR_NJ;
245			enable_vec(td);
246			break;
247
248		case EXC_ALI:
249			if (fix_unaligned(td, frame) != 0) {
250				sig = SIGBUS;
251				ucode = BUS_ADRALN;
252			}
253			else
254				frame->srr0 += 4;
255			break;
256
257		case EXC_PGM:
258			/* Identify the trap reason */
259			if (frame->srr1 & EXC_PGM_TRAP) {
260#ifdef KDTRACE_HOOKS
261				inst = fuword32((const void *)frame->srr0);
262				if (inst == 0x0FFFDDDD &&
263				    dtrace_pid_probe_ptr != NULL) {
264					struct reg regs;
265					fill_regs(td, &regs);
266					(*dtrace_pid_probe_ptr)(&regs);
267					break;
268				}
269#endif
270 				sig = SIGTRAP;
271				ucode = TRAP_BRKPT;
272			} else {
273				sig = ppc_instr_emulate(frame, td->td_pcb);
274				if (sig == SIGILL) {
275					if (frame->srr1 & EXC_PGM_PRIV)
276						ucode = ILL_PRVOPC;
277					else if (frame->srr1 & EXC_PGM_ILLEGAL)
278						ucode = ILL_ILLOPC;
279				} else if (sig == SIGFPE)
280					ucode = FPE_FLTINV;	/* Punt for now, invalid operation. */
281			}
282			break;
283
284		case EXC_MCHK:
285			/*
286			 * Note that this may not be recoverable for the user
287			 * process, depending on the type of machine check,
288			 * but it at least prevents the kernel from dying.
289			 */
290			sig = SIGBUS;
291			ucode = BUS_OBJERR;
292			break;
293
294		default:
295			trap_fatal(frame);
296		}
297	} else {
298		/* Kernel Mode Traps */
299
300		KASSERT(cold || td->td_ucred != NULL,
301		    ("kernel trap doesn't have ucred"));
302		switch (type) {
303#ifdef KDTRACE_HOOKS
304		case EXC_PGM:
305			if (frame->srr1 & EXC_PGM_TRAP) {
306				if (*(uint32_t *)frame->srr0 == EXC_DTRACE) {
307					if (dtrace_invop_jump_addr != NULL) {
308						dtrace_invop_jump_addr(frame);
309						return;
310					}
311				}
312			}
313			break;
314#endif
315#ifdef __powerpc64__
316		case EXC_DSE:
317			if ((frame->cpu.aim.dar & SEGMENT_MASK) == USER_ADDR) {
318				__asm __volatile ("slbmte %0, %1" ::
319					"r"(td->td_pcb->pcb_cpu.aim.usr_vsid),
320					"r"(USER_SLB_SLBE));
321				return;
322			}
323			break;
324#endif
325		case EXC_DSI:
326			if (trap_pfault(frame, 0) == 0)
327 				return;
328			break;
329		case EXC_MCHK:
330			if (handle_onfault(frame))
331 				return;
332			break;
333		default:
334			break;
335		}
336		trap_fatal(frame);
337	}
338
339	if (sig != 0) {
340		if (p->p_sysent->sv_transtrap != NULL)
341			sig = (p->p_sysent->sv_transtrap)(sig, type);
342		ksiginfo_init_trap(&ksi);
343		ksi.ksi_signo = sig;
344		ksi.ksi_code = (int) ucode; /* XXX, not POSIX */
345		/* ksi.ksi_addr = ? */
346		ksi.ksi_trapno = type;
347		trapsignal(td, &ksi);
348	}
349
350	userret(td, frame);
351}
352
353static void
354trap_fatal(struct trapframe *frame)
355{
356
357	printtrap(frame->exc, frame, 1, (frame->srr1 & PSL_PR));
358#ifdef KDB
359	if ((debugger_on_panic || kdb_active) &&
360	    kdb_trap(frame->exc, 0, frame))
361		return;
362#endif
363	panic("%s trap", trapname(frame->exc));
364}
365
366static void
367printtrap(u_int vector, struct trapframe *frame, int isfatal, int user)
368{
369
370	printf("\n");
371	printf("%s %s trap:\n", isfatal ? "fatal" : "handled",
372	    user ? "user" : "kernel");
373	printf("\n");
374	printf("   exception       = 0x%x (%s)\n", vector, trapname(vector));
375	switch (vector) {
376	case EXC_DSE:
377	case EXC_DSI:
378		printf("   virtual address = 0x%" PRIxPTR "\n",
379		    frame->cpu.aim.dar);
380		printf("   dsisr           = 0x%" PRIxPTR "\n",
381		    frame->cpu.aim.dsisr);
382		break;
383	case EXC_ISE:
384	case EXC_ISI:
385		printf("   virtual address = 0x%" PRIxPTR "\n", frame->srr0);
386		break;
387	}
388	printf("   srr0            = 0x%" PRIxPTR "\n", frame->srr0);
389	printf("   srr1            = 0x%" PRIxPTR "\n", frame->srr1);
390	printf("   lr              = 0x%" PRIxPTR "\n", frame->lr);
391	printf("   curthread       = %p\n", curthread);
392	if (curthread != NULL)
393		printf("          pid = %d, comm = %s\n",
394		    curthread->td_proc->p_pid, curthread->td_name);
395	printf("\n");
396}
397
398/*
399 * Handles a fatal fault when we have onfault state to recover.  Returns
400 * non-zero if there was onfault recovery state available.
401 */
402static int
403handle_onfault(struct trapframe *frame)
404{
405	struct		thread *td;
406	faultbuf	*fb;
407
408	td = curthread;
409	fb = td->td_pcb->pcb_onfault;
410	if (fb != NULL) {
411		frame->srr0 = (*fb)[0];
412		frame->fixreg[1] = (*fb)[1];
413		frame->fixreg[2] = (*fb)[2];
414		frame->fixreg[3] = 1;
415		frame->cr = (*fb)[3];
416		bcopy(&(*fb)[4], &frame->fixreg[13],
417		    19 * sizeof(register_t));
418		return (1);
419	}
420	return (0);
421}
422
423int
424cpu_fetch_syscall_args(struct thread *td, struct syscall_args *sa)
425{
426	struct proc *p;
427	struct trapframe *frame;
428	caddr_t	params;
429	size_t argsz;
430	int error, n, i;
431
432	p = td->td_proc;
433	frame = td->td_frame;
434
435	sa->code = frame->fixreg[0];
436	params = (caddr_t)(frame->fixreg + FIRSTARG);
437	n = NARGREG;
438
439	if (sa->code == SYS_syscall) {
440		/*
441		 * code is first argument,
442		 * followed by actual args.
443		 */
444		sa->code = *(register_t *) params;
445		params += sizeof(register_t);
446		n -= 1;
447	} else if (sa->code == SYS___syscall) {
448		/*
449		 * Like syscall, but code is a quad,
450		 * so as to maintain quad alignment
451		 * for the rest of the args.
452		 */
453		if (SV_PROC_FLAG(p, SV_ILP32)) {
454			params += sizeof(register_t);
455			sa->code = *(register_t *) params;
456			params += sizeof(register_t);
457			n -= 2;
458		} else {
459			sa->code = *(register_t *) params;
460			params += sizeof(register_t);
461			n -= 1;
462		}
463	}
464
465 	if (p->p_sysent->sv_mask)
466		sa->code &= p->p_sysent->sv_mask;
467	if (sa->code >= p->p_sysent->sv_size)
468		sa->callp = &p->p_sysent->sv_table[0];
469	else
470		sa->callp = &p->p_sysent->sv_table[sa->code];
471
472	sa->narg = sa->callp->sy_narg;
473
474	if (SV_PROC_FLAG(p, SV_ILP32)) {
475		argsz = sizeof(uint32_t);
476
477		for (i = 0; i < n; i++)
478			sa->args[i] = ((u_register_t *)(params))[i] &
479			    0xffffffff;
480	} else {
481		argsz = sizeof(uint64_t);
482
483		for (i = 0; i < n; i++)
484			sa->args[i] = ((u_register_t *)(params))[i];
485	}
486
487	if (sa->narg > n)
488		error = copyin(MOREARGS(frame->fixreg[1]), sa->args + n,
489			       (sa->narg - n) * argsz);
490	else
491		error = 0;
492
493#ifdef __powerpc64__
494	if (SV_PROC_FLAG(p, SV_ILP32) && sa->narg > n) {
495		/* Expand the size of arguments copied from the stack */
496
497		for (i = sa->narg; i >= n; i--)
498			sa->args[i] = ((uint32_t *)(&sa->args[n]))[i-n];
499	}
500#endif
501
502	if (error == 0) {
503		td->td_retval[0] = 0;
504		td->td_retval[1] = frame->fixreg[FIRSTARG + 1];
505	}
506	return (error);
507}
508
509#include "../../kern/subr_syscall.c"
510
511void
512syscall(struct trapframe *frame)
513{
514	struct thread *td;
515	struct syscall_args sa;
516	int error;
517
518	td = curthread;
519	td->td_frame = frame;
520
521#ifdef __powerpc64__
522	/*
523	 * Speculatively restore last user SLB segment, which we know is
524	 * invalid already, since we are likely to do copyin()/copyout().
525	 */
526	__asm __volatile ("slbmte %0, %1; isync" ::
527            "r"(td->td_pcb->pcb_cpu.aim.usr_vsid), "r"(USER_SLB_SLBE));
528#endif
529
530	error = syscallenter(td, &sa);
531	syscallret(td, error, &sa);
532}
533
534#ifdef __powerpc64__
535/* Handle kernel SLB faults -- runs in real mode, all seat belts off */
536void
537handle_kernel_slb_spill(int type, register_t dar, register_t srr0)
538{
539	struct slb *slbcache;
540	uint64_t slbe, slbv;
541	uint64_t esid, addr;
542	int i;
543
544	addr = (type == EXC_ISE) ? srr0 : dar;
545	slbcache = PCPU_GET(slb);
546	esid = (uintptr_t)addr >> ADDR_SR_SHFT;
547	slbe = (esid << SLBE_ESID_SHIFT) | SLBE_VALID;
548
549	/* See if the hardware flushed this somehow (can happen in LPARs) */
550	for (i = 0; i < n_slbs; i++)
551		if (slbcache[i].slbe == (slbe | (uint64_t)i))
552			return;
553
554	/* Not in the map, needs to actually be added */
555	slbv = kernel_va_to_slbv(addr);
556	if (slbcache[USER_SLB_SLOT].slbe == 0) {
557		for (i = 0; i < n_slbs; i++) {
558			if (i == USER_SLB_SLOT)
559				continue;
560			if (!(slbcache[i].slbe & SLBE_VALID))
561				goto fillkernslb;
562		}
563
564		if (i == n_slbs)
565			slbcache[USER_SLB_SLOT].slbe = 1;
566	}
567
568	/* Sacrifice a random SLB entry that is not the user entry */
569	i = mftb() % n_slbs;
570	if (i == USER_SLB_SLOT)
571		i = (i+1) % n_slbs;
572
573fillkernslb:
574	/* Write new entry */
575	slbcache[i].slbv = slbv;
576	slbcache[i].slbe = slbe | (uint64_t)i;
577
578	/* Trap handler will restore from cache on exit */
579}
580
581static int
582handle_user_slb_spill(pmap_t pm, vm_offset_t addr)
583{
584	struct slb *user_entry;
585	uint64_t esid;
586	int i;
587
588	esid = (uintptr_t)addr >> ADDR_SR_SHFT;
589
590	PMAP_LOCK(pm);
591	user_entry = user_va_to_slb_entry(pm, addr);
592
593	if (user_entry == NULL) {
594		/* allocate_vsid auto-spills it */
595		(void)allocate_user_vsid(pm, esid, 0);
596	} else {
597		/*
598		 * Check that another CPU has not already mapped this.
599		 * XXX: Per-thread SLB caches would be better.
600		 */
601		for (i = 0; i < pm->pm_slb_len; i++)
602			if (pm->pm_slb[i] == user_entry)
603				break;
604
605		if (i == pm->pm_slb_len)
606			slb_insert_user(pm, user_entry);
607	}
608	PMAP_UNLOCK(pm);
609
610	return (0);
611}
612#endif
613
614static int
615trap_pfault(struct trapframe *frame, int user)
616{
617	vm_offset_t	eva, va;
618	struct		thread *td;
619	struct		proc *p;
620	vm_map_t	map;
621	vm_prot_t	ftype;
622	int		rv;
623	register_t	user_sr;
624
625	td = curthread;
626	p = td->td_proc;
627	if (frame->exc == EXC_ISI) {
628		eva = frame->srr0;
629		ftype = VM_PROT_EXECUTE;
630		if (frame->srr1 & SRR1_ISI_PFAULT)
631			ftype |= VM_PROT_READ;
632	} else {
633		eva = frame->cpu.aim.dar;
634		if (frame->cpu.aim.dsisr & DSISR_STORE)
635			ftype = VM_PROT_WRITE;
636		else
637			ftype = VM_PROT_READ;
638	}
639
640	if (user) {
641		map = &p->p_vmspace->vm_map;
642	} else {
643		if ((eva >> ADDR_SR_SHFT) == (USER_ADDR >> ADDR_SR_SHFT)) {
644			if (p->p_vmspace == NULL)
645				return (SIGSEGV);
646
647			map = &p->p_vmspace->vm_map;
648
649			user_sr = td->td_pcb->pcb_cpu.aim.usr_segm;
650			eva &= ADDR_PIDX | ADDR_POFF;
651			eva |= user_sr << ADDR_SR_SHFT;
652		} else {
653			map = kernel_map;
654		}
655	}
656	va = trunc_page(eva);
657
658	if (map != kernel_map) {
659		/*
660		 * Keep swapout from messing with us during this
661		 *	critical time.
662		 */
663		PROC_LOCK(p);
664		++p->p_lock;
665		PROC_UNLOCK(p);
666
667		/* Fault in the user page: */
668		rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
669
670		PROC_LOCK(p);
671		--p->p_lock;
672		PROC_UNLOCK(p);
673		/*
674		 * XXXDTRACE: add dtrace_doubletrap_func here?
675		 */
676	} else {
677		/*
678		 * Don't have to worry about process locking or stacks in the
679		 * kernel.
680		 */
681		rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL);
682	}
683
684	if (rv == KERN_SUCCESS)
685		return (0);
686
687	if (!user && handle_onfault(frame))
688		return (0);
689
690	return (SIGSEGV);
691}
692
693/*
694 * For now, this only deals with the particular unaligned access case
695 * that gcc tends to generate.  Eventually it should handle all of the
696 * possibilities that can happen on a 32-bit PowerPC in big-endian mode.
697 */
698
699static int
700fix_unaligned(struct thread *td, struct trapframe *frame)
701{
702	struct thread	*fputhread;
703	int		indicator, reg;
704	double		*fpr;
705
706	indicator = EXC_ALI_OPCODE_INDICATOR(frame->cpu.aim.dsisr);
707
708	switch (indicator) {
709	case EXC_ALI_LFD:
710	case EXC_ALI_STFD:
711		reg = EXC_ALI_RST(frame->cpu.aim.dsisr);
712		fpr = &td->td_pcb->pcb_fpu.fpr[reg];
713		fputhread = PCPU_GET(fputhread);
714
715		/* Juggle the FPU to ensure that we've initialized
716		 * the FPRs, and that their current state is in
717		 * the PCB.
718		 */
719		if (fputhread != td) {
720			if (fputhread)
721				save_fpu(fputhread);
722			enable_fpu(td);
723		}
724		save_fpu(td);
725
726		if (indicator == EXC_ALI_LFD) {
727			if (copyin((void *)frame->cpu.aim.dar, fpr,
728			    sizeof(double)) != 0)
729				return -1;
730			enable_fpu(td);
731		} else {
732			if (copyout(fpr, (void *)frame->cpu.aim.dar,
733			    sizeof(double)) != 0)
734				return -1;
735		}
736		return 0;
737		break;
738	}
739
740	return -1;
741}
742
743