npx.c revision 210518
1/*-
2 * Copyright (c) 1990 William Jolitz.
3 * Copyright (c) 1991 The Regents of the University of California.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 *    notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 *    notice, this list of conditions and the following disclaimer in the
13 *    documentation and/or other materials provided with the distribution.
14 * 4. Neither the name of the University nor the names of its contributors
15 *    may be used to endorse or promote products derived from this software
16 *    without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGE.
29 *
30 *	from: @(#)npx.c	7.2 (Berkeley) 5/12/91
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: head/sys/i386/isa/npx.c 210518 2010-07-26 22:16:36Z jkim $");
35
36#include "opt_cpu.h"
37#include "opt_isa.h"
38#include "opt_npx.h"
39
40#include <sys/param.h>
41#include <sys/systm.h>
42#include <sys/bus.h>
43#include <sys/kernel.h>
44#include <sys/lock.h>
45#include <sys/malloc.h>
46#include <sys/module.h>
47#include <sys/mutex.h>
48#include <sys/mutex.h>
49#include <sys/proc.h>
50#include <sys/smp.h>
51#include <sys/sysctl.h>
52#include <machine/bus.h>
53#include <sys/rman.h>
54#ifdef NPX_DEBUG
55#include <sys/syslog.h>
56#endif
57#include <sys/signalvar.h>
58
59#include <machine/asmacros.h>
60#include <machine/cputypes.h>
61#include <machine/frame.h>
62#include <machine/md_var.h>
63#include <machine/pcb.h>
64#include <machine/psl.h>
65#include <machine/resource.h>
66#include <machine/specialreg.h>
67#include <machine/segments.h>
68#include <machine/ucontext.h>
69
70#include <machine/intr_machdep.h>
71#ifdef XEN
72#include <machine/xen/xen-os.h>
73#include <xen/hypervisor.h>
74#endif
75
76#ifdef DEV_ISA
77#include <isa/isavar.h>
78#endif
79
80#if !defined(CPU_DISABLE_SSE) && defined(I686_CPU)
81#define CPU_ENABLE_SSE
82#endif
83
84/*
85 * 387 and 287 Numeric Coprocessor Extension (NPX) Driver.
86 */
87
88#if defined(__GNUCLIKE_ASM) && !defined(lint)
89
90#define	fldcw(addr)		__asm __volatile("fldcw %0" : : "m" (*(addr)))
91#define	fnclex()		__asm __volatile("fnclex")
92#define	fninit()		__asm __volatile("fninit")
93#define	fnsave(addr)		__asm __volatile("fnsave %0" : "=m" (*(addr)))
94#define	fnstcw(addr)		__asm __volatile("fnstcw %0" : "=m" (*(addr)))
95#define	fnstsw(addr)		__asm __volatile("fnstsw %0" : "=am" (*(addr)))
96#define	fp_divide_by_0()	__asm __volatile( \
97				    "fldz; fld1; fdiv %st,%st(1); fnop")
98#define	frstor(addr)		__asm __volatile("frstor %0" : : "m" (*(addr)))
99#ifdef CPU_ENABLE_SSE
100#define	fxrstor(addr)		__asm __volatile("fxrstor %0" : : "m" (*(addr)))
101#define	fxsave(addr)		__asm __volatile("fxsave %0" : "=m" (*(addr)))
102#define	ldmxcsr(r)		__asm __volatile("ldmxcsr %0" : : "m" (r))
103#endif
104#ifdef XEN
105#define	start_emulating()	(HYPERVISOR_fpu_taskswitch(1))
106#define	stop_emulating()	(HYPERVISOR_fpu_taskswitch(0))
107#else
108#define	start_emulating()	__asm __volatile( \
109				    "smsw %%ax; orb %0,%%al; lmsw %%ax" \
110				    : : "n" (CR0_TS) : "ax")
111#define	stop_emulating()	__asm __volatile("clts")
112#endif
113#else	/* !(__GNUCLIKE_ASM && !lint) */
114
115void	fldcw(caddr_t addr);
116void	fnclex(void);
117void	fninit(void);
118void	fnsave(caddr_t addr);
119void	fnstcw(caddr_t addr);
120void	fnstsw(caddr_t addr);
121void	fp_divide_by_0(void);
122void	frstor(caddr_t addr);
123#ifdef CPU_ENABLE_SSE
124void	fxsave(caddr_t addr);
125void	fxrstor(caddr_t addr);
126#endif
127void	start_emulating(void);
128void	stop_emulating(void);
129
130#endif	/* __GNUCLIKE_ASM && !lint */
131
132#ifdef CPU_ENABLE_SSE
133#define GET_FPU_CW(thread) \
134	(cpu_fxsr ? \
135		(thread)->td_pcb->pcb_save->sv_xmm.sv_env.en_cw : \
136		(thread)->td_pcb->pcb_save->sv_87.sv_env.en_cw)
137#define GET_FPU_SW(thread) \
138	(cpu_fxsr ? \
139		(thread)->td_pcb->pcb_save->sv_xmm.sv_env.en_sw : \
140		(thread)->td_pcb->pcb_save->sv_87.sv_env.en_sw)
141#define SET_FPU_CW(savefpu, value) do { \
142	if (cpu_fxsr) \
143		(savefpu)->sv_xmm.sv_env.en_cw = (value); \
144	else \
145		(savefpu)->sv_87.sv_env.en_cw = (value); \
146} while (0)
147#else /* CPU_ENABLE_SSE */
148#define GET_FPU_CW(thread) \
149	(thread->td_pcb->pcb_save->sv_87.sv_env.en_cw)
150#define GET_FPU_SW(thread) \
151	(thread->td_pcb->pcb_save->sv_87.sv_env.en_sw)
152#define SET_FPU_CW(savefpu, value) \
153	(savefpu)->sv_87.sv_env.en_cw = (value)
154#endif /* CPU_ENABLE_SSE */
155
156typedef u_char bool_t;
157
158#ifdef CPU_ENABLE_SSE
159static	void	fpu_clean_state(void);
160#endif
161
162static	void	fpusave(union savefpu *);
163static	void	fpurstor(union savefpu *);
164static	int	npx_attach(device_t dev);
165static	void	npx_identify(driver_t *driver, device_t parent);
166static	int	npx_probe(device_t dev);
167
168int	hw_float;
169
170SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD,
171    &hw_float, 0, "Floating point instructions executed in hardware");
172
173static	volatile u_int		npx_traps_while_probing;
174static	union savefpu		npx_initialstate;
175
176alias_for_inthand_t probetrap;
177__asm("								\n\
178	.text							\n\
179	.p2align 2,0x90						\n\
180	.type	" __XSTRING(CNAME(probetrap)) ",@function	\n\
181" __XSTRING(CNAME(probetrap)) ":				\n\
182	ss							\n\
183	incl	" __XSTRING(CNAME(npx_traps_while_probing)) "	\n\
184	fnclex							\n\
185	iret							\n\
186");
187
188/*
189 * Identify routine.  Create a connection point on our parent for probing.
190 */
191static void
192npx_identify(driver, parent)
193	driver_t *driver;
194	device_t parent;
195{
196	device_t child;
197
198	child = BUS_ADD_CHILD(parent, 0, "npx", 0);
199	if (child == NULL)
200		panic("npx_identify");
201}
202
203/*
204 * Probe routine.  Set flags to tell npxattach() what to do.  Set up an
205 * interrupt handler if npx needs to use interrupts.
206 */
207static int
208npx_probe(device_t dev)
209{
210	struct gate_descriptor save_idt_npxtrap;
211	u_short control, status;
212
213	device_set_desc(dev, "math processor");
214
215	/*
216	 * Modern CPUs all have an FPU that uses the INT16 interface
217	 * and provide a simple way to verify that, so handle the
218	 * common case right away.
219	 */
220	if (cpu_feature & CPUID_FPU) {
221		hw_float = 1;
222		device_quiet(dev);
223		return (0);
224	}
225
226	save_idt_npxtrap = idt[IDT_MF];
227	setidt(IDT_MF, probetrap, SDT_SYS386TGT, SEL_KPL,
228	    GSEL(GCODE_SEL, SEL_KPL));
229
230	/*
231	 * Don't trap while we're probing.
232	 */
233	stop_emulating();
234
235	/*
236	 * Finish resetting the coprocessor, if any.  If there is an error
237	 * pending, then we may get a bogus IRQ13, but npx_intr() will handle
238	 * it OK.  Bogus halts have never been observed, but we enabled
239	 * IRQ13 and cleared the BUSY# latch early to handle them anyway.
240	 */
241	fninit();
242
243	/*
244	 * Don't use fwait here because it might hang.
245	 * Don't use fnop here because it usually hangs if there is no FPU.
246	 */
247	DELAY(1000);		/* wait for any IRQ13 */
248#ifdef DIAGNOSTIC
249	if (npx_traps_while_probing != 0)
250		printf("fninit caused %u bogus npx trap(s)\n",
251		       npx_traps_while_probing);
252#endif
253	/*
254	 * Check for a status of mostly zero.
255	 */
256	status = 0x5a5a;
257	fnstsw(&status);
258	if ((status & 0xb8ff) == 0) {
259		/*
260		 * Good, now check for a proper control word.
261		 */
262		control = 0x5a5a;
263		fnstcw(&control);
264		if ((control & 0x1f3f) == 0x033f) {
265			/*
266			 * We have an npx, now divide by 0 to see if exception
267			 * 16 works.
268			 */
269			control &= ~(1 << 2);	/* enable divide by 0 trap */
270			fldcw(&control);
271#ifdef FPU_ERROR_BROKEN
272			/*
273			 * FPU error signal doesn't work on some CPU
274			 * accelerator board.
275			 */
276			hw_float = 1;
277			return (0);
278#endif
279			npx_traps_while_probing = 0;
280			fp_divide_by_0();
281			if (npx_traps_while_probing != 0) {
282				/*
283				 * Good, exception 16 works.
284				 */
285				hw_float = 1;
286				goto cleanup;
287			}
288			device_printf(dev,
289	"FPU does not use exception 16 for error reporting\n");
290			goto cleanup;
291		}
292	}
293
294	/*
295	 * Probe failed.  Floating point simply won't work.
296	 * Notify user and disable FPU/MMX/SSE instruction execution.
297	 */
298	device_printf(dev, "WARNING: no FPU!\n");
299	__asm __volatile("smsw %%ax; orb %0,%%al; lmsw %%ax" : :
300	    "n" (CR0_EM | CR0_MP) : "ax");
301
302cleanup:
303	idt[IDT_MF] = save_idt_npxtrap;
304	return (hw_float ? 0 : ENXIO);
305}
306
307/*
308 * Attach routine - announce which it is, and wire into system
309 */
310static int
311npx_attach(device_t dev)
312{
313
314	npxinit();
315	critical_enter();
316	stop_emulating();
317	fpusave(&npx_initialstate);
318	start_emulating();
319#ifdef CPU_ENABLE_SSE
320	if (cpu_fxsr) {
321		if (npx_initialstate.sv_xmm.sv_env.en_mxcsr_mask)
322			cpu_mxcsr_mask =
323			    npx_initialstate.sv_xmm.sv_env.en_mxcsr_mask;
324		else
325			cpu_mxcsr_mask = 0xFFBF;
326		bzero(npx_initialstate.sv_xmm.sv_fp,
327		    sizeof(npx_initialstate.sv_xmm.sv_fp));
328		bzero(npx_initialstate.sv_xmm.sv_xmm,
329		    sizeof(npx_initialstate.sv_xmm.sv_xmm));
330		/* XXX might need even more zeroing. */
331	} else
332#endif
333		bzero(npx_initialstate.sv_87.sv_ac,
334		    sizeof(npx_initialstate.sv_87.sv_ac));
335	critical_exit();
336
337	return (0);
338}
339
340/*
341 * Initialize floating point unit.
342 */
343void
344npxinit(void)
345{
346	static union savefpu dummy;
347	register_t savecrit;
348	u_short control;
349
350	if (!hw_float)
351		return;
352	/*
353	 * fninit has the same h/w bugs as fnsave.  Use the detoxified
354	 * fnsave to throw away any junk in the fpu.  npxsave() initializes
355	 * the fpu and sets fpcurthread = NULL as important side effects.
356	 *
357	 * It is too early for critical_enter() to work on AP.
358	 */
359	savecrit = intr_disable();
360	npxsave(&dummy);
361	stop_emulating();
362#ifdef CPU_ENABLE_SSE
363	/* XXX npxsave() doesn't actually initialize the fpu in the SSE case. */
364	if (cpu_fxsr)
365		fninit();
366#endif
367	control = __INITIAL_NPXCW__;
368	fldcw(&control);
369	start_emulating();
370	intr_restore(savecrit);
371}
372
373/*
374 * Free coprocessor (if we have it).
375 */
376void
377npxexit(td)
378	struct thread *td;
379{
380
381	critical_enter();
382	if (curthread == PCPU_GET(fpcurthread))
383		npxsave(PCPU_GET(curpcb)->pcb_save);
384	critical_exit();
385#ifdef NPX_DEBUG
386	if (hw_float) {
387		u_int	masked_exceptions;
388
389		masked_exceptions = GET_FPU_CW(td) & GET_FPU_SW(td) & 0x7f;
390		/*
391		 * Log exceptions that would have trapped with the old
392		 * control word (overflow, divide by 0, and invalid operand).
393		 */
394		if (masked_exceptions & 0x0d)
395			log(LOG_ERR,
396	"pid %d (%s) exited with masked floating point exceptions 0x%02x\n",
397			    td->td_proc->p_pid, td->td_proc->p_comm,
398			    masked_exceptions);
399	}
400#endif
401}
402
403int
404npxformat()
405{
406
407	if (!hw_float)
408		return (_MC_FPFMT_NODEV);
409#ifdef	CPU_ENABLE_SSE
410	if (cpu_fxsr)
411		return (_MC_FPFMT_XMM);
412#endif
413	return (_MC_FPFMT_387);
414}
415
416/*
417 * The following mechanism is used to ensure that the FPE_... value
418 * that is passed as a trapcode to the signal handler of the user
419 * process does not have more than one bit set.
420 *
421 * Multiple bits may be set if the user process modifies the control
422 * word while a status word bit is already set.  While this is a sign
423 * of bad coding, we have no choise than to narrow them down to one
424 * bit, since we must not send a trapcode that is not exactly one of
425 * the FPE_ macros.
426 *
427 * The mechanism has a static table with 127 entries.  Each combination
428 * of the 7 FPU status word exception bits directly translates to a
429 * position in this table, where a single FPE_... value is stored.
430 * This FPE_... value stored there is considered the "most important"
431 * of the exception bits and will be sent as the signal code.  The
432 * precedence of the bits is based upon Intel Document "Numerical
433 * Applications", Chapter "Special Computational Situations".
434 *
435 * The macro to choose one of these values does these steps: 1) Throw
436 * away status word bits that cannot be masked.  2) Throw away the bits
437 * currently masked in the control word, assuming the user isn't
438 * interested in them anymore.  3) Reinsert status word bit 7 (stack
439 * fault) if it is set, which cannot be masked but must be presered.
440 * 4) Use the remaining bits to point into the trapcode table.
441 *
442 * The 6 maskable bits in order of their preference, as stated in the
443 * above referenced Intel manual:
444 * 1  Invalid operation (FP_X_INV)
445 * 1a   Stack underflow
446 * 1b   Stack overflow
447 * 1c   Operand of unsupported format
448 * 1d   SNaN operand.
449 * 2  QNaN operand (not an exception, irrelavant here)
450 * 3  Any other invalid-operation not mentioned above or zero divide
451 *      (FP_X_INV, FP_X_DZ)
452 * 4  Denormal operand (FP_X_DNML)
453 * 5  Numeric over/underflow (FP_X_OFL, FP_X_UFL)
454 * 6  Inexact result (FP_X_IMP)
455 */
456static char fpetable[128] = {
457	0,
458	FPE_FLTINV,	/*  1 - INV */
459	FPE_FLTUND,	/*  2 - DNML */
460	FPE_FLTINV,	/*  3 - INV | DNML */
461	FPE_FLTDIV,	/*  4 - DZ */
462	FPE_FLTINV,	/*  5 - INV | DZ */
463	FPE_FLTDIV,	/*  6 - DNML | DZ */
464	FPE_FLTINV,	/*  7 - INV | DNML | DZ */
465	FPE_FLTOVF,	/*  8 - OFL */
466	FPE_FLTINV,	/*  9 - INV | OFL */
467	FPE_FLTUND,	/*  A - DNML | OFL */
468	FPE_FLTINV,	/*  B - INV | DNML | OFL */
469	FPE_FLTDIV,	/*  C - DZ | OFL */
470	FPE_FLTINV,	/*  D - INV | DZ | OFL */
471	FPE_FLTDIV,	/*  E - DNML | DZ | OFL */
472	FPE_FLTINV,	/*  F - INV | DNML | DZ | OFL */
473	FPE_FLTUND,	/* 10 - UFL */
474	FPE_FLTINV,	/* 11 - INV | UFL */
475	FPE_FLTUND,	/* 12 - DNML | UFL */
476	FPE_FLTINV,	/* 13 - INV | DNML | UFL */
477	FPE_FLTDIV,	/* 14 - DZ | UFL */
478	FPE_FLTINV,	/* 15 - INV | DZ | UFL */
479	FPE_FLTDIV,	/* 16 - DNML | DZ | UFL */
480	FPE_FLTINV,	/* 17 - INV | DNML | DZ | UFL */
481	FPE_FLTOVF,	/* 18 - OFL | UFL */
482	FPE_FLTINV,	/* 19 - INV | OFL | UFL */
483	FPE_FLTUND,	/* 1A - DNML | OFL | UFL */
484	FPE_FLTINV,	/* 1B - INV | DNML | OFL | UFL */
485	FPE_FLTDIV,	/* 1C - DZ | OFL | UFL */
486	FPE_FLTINV,	/* 1D - INV | DZ | OFL | UFL */
487	FPE_FLTDIV,	/* 1E - DNML | DZ | OFL | UFL */
488	FPE_FLTINV,	/* 1F - INV | DNML | DZ | OFL | UFL */
489	FPE_FLTRES,	/* 20 - IMP */
490	FPE_FLTINV,	/* 21 - INV | IMP */
491	FPE_FLTUND,	/* 22 - DNML | IMP */
492	FPE_FLTINV,	/* 23 - INV | DNML | IMP */
493	FPE_FLTDIV,	/* 24 - DZ | IMP */
494	FPE_FLTINV,	/* 25 - INV | DZ | IMP */
495	FPE_FLTDIV,	/* 26 - DNML | DZ | IMP */
496	FPE_FLTINV,	/* 27 - INV | DNML | DZ | IMP */
497	FPE_FLTOVF,	/* 28 - OFL | IMP */
498	FPE_FLTINV,	/* 29 - INV | OFL | IMP */
499	FPE_FLTUND,	/* 2A - DNML | OFL | IMP */
500	FPE_FLTINV,	/* 2B - INV | DNML | OFL | IMP */
501	FPE_FLTDIV,	/* 2C - DZ | OFL | IMP */
502	FPE_FLTINV,	/* 2D - INV | DZ | OFL | IMP */
503	FPE_FLTDIV,	/* 2E - DNML | DZ | OFL | IMP */
504	FPE_FLTINV,	/* 2F - INV | DNML | DZ | OFL | IMP */
505	FPE_FLTUND,	/* 30 - UFL | IMP */
506	FPE_FLTINV,	/* 31 - INV | UFL | IMP */
507	FPE_FLTUND,	/* 32 - DNML | UFL | IMP */
508	FPE_FLTINV,	/* 33 - INV | DNML | UFL | IMP */
509	FPE_FLTDIV,	/* 34 - DZ | UFL | IMP */
510	FPE_FLTINV,	/* 35 - INV | DZ | UFL | IMP */
511	FPE_FLTDIV,	/* 36 - DNML | DZ | UFL | IMP */
512	FPE_FLTINV,	/* 37 - INV | DNML | DZ | UFL | IMP */
513	FPE_FLTOVF,	/* 38 - OFL | UFL | IMP */
514	FPE_FLTINV,	/* 39 - INV | OFL | UFL | IMP */
515	FPE_FLTUND,	/* 3A - DNML | OFL | UFL | IMP */
516	FPE_FLTINV,	/* 3B - INV | DNML | OFL | UFL | IMP */
517	FPE_FLTDIV,	/* 3C - DZ | OFL | UFL | IMP */
518	FPE_FLTINV,	/* 3D - INV | DZ | OFL | UFL | IMP */
519	FPE_FLTDIV,	/* 3E - DNML | DZ | OFL | UFL | IMP */
520	FPE_FLTINV,	/* 3F - INV | DNML | DZ | OFL | UFL | IMP */
521	FPE_FLTSUB,	/* 40 - STK */
522	FPE_FLTSUB,	/* 41 - INV | STK */
523	FPE_FLTUND,	/* 42 - DNML | STK */
524	FPE_FLTSUB,	/* 43 - INV | DNML | STK */
525	FPE_FLTDIV,	/* 44 - DZ | STK */
526	FPE_FLTSUB,	/* 45 - INV | DZ | STK */
527	FPE_FLTDIV,	/* 46 - DNML | DZ | STK */
528	FPE_FLTSUB,	/* 47 - INV | DNML | DZ | STK */
529	FPE_FLTOVF,	/* 48 - OFL | STK */
530	FPE_FLTSUB,	/* 49 - INV | OFL | STK */
531	FPE_FLTUND,	/* 4A - DNML | OFL | STK */
532	FPE_FLTSUB,	/* 4B - INV | DNML | OFL | STK */
533	FPE_FLTDIV,	/* 4C - DZ | OFL | STK */
534	FPE_FLTSUB,	/* 4D - INV | DZ | OFL | STK */
535	FPE_FLTDIV,	/* 4E - DNML | DZ | OFL | STK */
536	FPE_FLTSUB,	/* 4F - INV | DNML | DZ | OFL | STK */
537	FPE_FLTUND,	/* 50 - UFL | STK */
538	FPE_FLTSUB,	/* 51 - INV | UFL | STK */
539	FPE_FLTUND,	/* 52 - DNML | UFL | STK */
540	FPE_FLTSUB,	/* 53 - INV | DNML | UFL | STK */
541	FPE_FLTDIV,	/* 54 - DZ | UFL | STK */
542	FPE_FLTSUB,	/* 55 - INV | DZ | UFL | STK */
543	FPE_FLTDIV,	/* 56 - DNML | DZ | UFL | STK */
544	FPE_FLTSUB,	/* 57 - INV | DNML | DZ | UFL | STK */
545	FPE_FLTOVF,	/* 58 - OFL | UFL | STK */
546	FPE_FLTSUB,	/* 59 - INV | OFL | UFL | STK */
547	FPE_FLTUND,	/* 5A - DNML | OFL | UFL | STK */
548	FPE_FLTSUB,	/* 5B - INV | DNML | OFL | UFL | STK */
549	FPE_FLTDIV,	/* 5C - DZ | OFL | UFL | STK */
550	FPE_FLTSUB,	/* 5D - INV | DZ | OFL | UFL | STK */
551	FPE_FLTDIV,	/* 5E - DNML | DZ | OFL | UFL | STK */
552	FPE_FLTSUB,	/* 5F - INV | DNML | DZ | OFL | UFL | STK */
553	FPE_FLTRES,	/* 60 - IMP | STK */
554	FPE_FLTSUB,	/* 61 - INV | IMP | STK */
555	FPE_FLTUND,	/* 62 - DNML | IMP | STK */
556	FPE_FLTSUB,	/* 63 - INV | DNML | IMP | STK */
557	FPE_FLTDIV,	/* 64 - DZ | IMP | STK */
558	FPE_FLTSUB,	/* 65 - INV | DZ | IMP | STK */
559	FPE_FLTDIV,	/* 66 - DNML | DZ | IMP | STK */
560	FPE_FLTSUB,	/* 67 - INV | DNML | DZ | IMP | STK */
561	FPE_FLTOVF,	/* 68 - OFL | IMP | STK */
562	FPE_FLTSUB,	/* 69 - INV | OFL | IMP | STK */
563	FPE_FLTUND,	/* 6A - DNML | OFL | IMP | STK */
564	FPE_FLTSUB,	/* 6B - INV | DNML | OFL | IMP | STK */
565	FPE_FLTDIV,	/* 6C - DZ | OFL | IMP | STK */
566	FPE_FLTSUB,	/* 6D - INV | DZ | OFL | IMP | STK */
567	FPE_FLTDIV,	/* 6E - DNML | DZ | OFL | IMP | STK */
568	FPE_FLTSUB,	/* 6F - INV | DNML | DZ | OFL | IMP | STK */
569	FPE_FLTUND,	/* 70 - UFL | IMP | STK */
570	FPE_FLTSUB,	/* 71 - INV | UFL | IMP | STK */
571	FPE_FLTUND,	/* 72 - DNML | UFL | IMP | STK */
572	FPE_FLTSUB,	/* 73 - INV | DNML | UFL | IMP | STK */
573	FPE_FLTDIV,	/* 74 - DZ | UFL | IMP | STK */
574	FPE_FLTSUB,	/* 75 - INV | DZ | UFL | IMP | STK */
575	FPE_FLTDIV,	/* 76 - DNML | DZ | UFL | IMP | STK */
576	FPE_FLTSUB,	/* 77 - INV | DNML | DZ | UFL | IMP | STK */
577	FPE_FLTOVF,	/* 78 - OFL | UFL | IMP | STK */
578	FPE_FLTSUB,	/* 79 - INV | OFL | UFL | IMP | STK */
579	FPE_FLTUND,	/* 7A - DNML | OFL | UFL | IMP | STK */
580	FPE_FLTSUB,	/* 7B - INV | DNML | OFL | UFL | IMP | STK */
581	FPE_FLTDIV,	/* 7C - DZ | OFL | UFL | IMP | STK */
582	FPE_FLTSUB,	/* 7D - INV | DZ | OFL | UFL | IMP | STK */
583	FPE_FLTDIV,	/* 7E - DNML | DZ | OFL | UFL | IMP | STK */
584	FPE_FLTSUB,	/* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */
585};
586
587/*
588 * Preserve the FP status word, clear FP exceptions, then generate a SIGFPE.
589 *
590 * Clearing exceptions is necessary mainly to avoid IRQ13 bugs.  We now
591 * depend on longjmp() restoring a usable state.  Restoring the state
592 * or examining it might fail if we didn't clear exceptions.
593 *
594 * The error code chosen will be one of the FPE_... macros. It will be
595 * sent as the second argument to old BSD-style signal handlers and as
596 * "siginfo_t->si_code" (second argument) to SA_SIGINFO signal handlers.
597 *
598 * XXX the FP state is not preserved across signal handlers.  So signal
599 * handlers cannot afford to do FP unless they preserve the state or
600 * longjmp() out.  Both preserving the state and longjmp()ing may be
601 * destroyed by IRQ13 bugs.  Clearing FP exceptions is not an acceptable
602 * solution for signals other than SIGFPE.
603 */
604int
605npxtrap()
606{
607	u_short control, status;
608
609	if (!hw_float) {
610		printf("npxtrap: fpcurthread = %p, curthread = %p, hw_float = %d\n",
611		       PCPU_GET(fpcurthread), curthread, hw_float);
612		panic("npxtrap from nowhere");
613	}
614	critical_enter();
615
616	/*
617	 * Interrupt handling (for another interrupt) may have pushed the
618	 * state to memory.  Fetch the relevant parts of the state from
619	 * wherever they are.
620	 */
621	if (PCPU_GET(fpcurthread) != curthread) {
622		control = GET_FPU_CW(curthread);
623		status = GET_FPU_SW(curthread);
624	} else {
625		fnstcw(&control);
626		fnstsw(&status);
627	}
628
629	if (PCPU_GET(fpcurthread) == curthread)
630		fnclex();
631	critical_exit();
632	return (fpetable[status & ((~control & 0x3f) | 0x40)]);
633}
634
635/*
636 * Implement device not available (DNA) exception
637 *
638 * It would be better to switch FP context here (if curthread != fpcurthread)
639 * and not necessarily for every context switch, but it is too hard to
640 * access foreign pcb's.
641 */
642
643static int err_count = 0;
644
645int
646npxdna(void)
647{
648	struct pcb *pcb;
649
650	if (!hw_float)
651		return (0);
652	critical_enter();
653	if (PCPU_GET(fpcurthread) == curthread) {
654		printf("npxdna: fpcurthread == curthread %d times\n",
655		    ++err_count);
656		stop_emulating();
657		critical_exit();
658		return (1);
659	}
660	if (PCPU_GET(fpcurthread) != NULL) {
661		printf("npxdna: fpcurthread = %p (%d), curthread = %p (%d)\n",
662		       PCPU_GET(fpcurthread),
663		       PCPU_GET(fpcurthread)->td_proc->p_pid,
664		       curthread, curthread->td_proc->p_pid);
665		panic("npxdna");
666	}
667	stop_emulating();
668	/*
669	 * Record new context early in case frstor causes an IRQ13.
670	 */
671	PCPU_SET(fpcurthread, curthread);
672	pcb = PCPU_GET(curpcb);
673
674#ifdef CPU_ENABLE_SSE
675	if (cpu_fxsr)
676		fpu_clean_state();
677#endif
678
679	if ((pcb->pcb_flags & PCB_NPXINITDONE) == 0) {
680		/*
681		 * This is the first time this thread has used the FPU or
682		 * the PCB doesn't contain a clean FPU state.  Explicitly
683		 * load an initial state.
684		 */
685		fpurstor(&npx_initialstate);
686		if (pcb->pcb_initial_npxcw != __INITIAL_NPXCW__)
687			fldcw(&pcb->pcb_initial_npxcw);
688		pcb->pcb_flags |= PCB_NPXINITDONE;
689		if (PCB_USER_FPU(pcb))
690			pcb->pcb_flags |= PCB_NPXUSERINITDONE;
691	} else {
692		/*
693		 * The following fpurstor() may cause an IRQ13 when the
694		 * state being restored has a pending error.  The error will
695		 * appear to have been triggered by the current (npx) user
696		 * instruction even when that instruction is a no-wait
697		 * instruction that should not trigger an error (e.g.,
698		 * fnclex).  On at least one 486 system all of the no-wait
699		 * instructions are broken the same as frstor, so our
700		 * treatment does not amplify the breakage.  On at least
701		 * one 386/Cyrix 387 system, fnclex works correctly while
702		 * frstor and fnsave are broken, so our treatment breaks
703		 * fnclex if it is the first FPU instruction after a context
704		 * switch.
705		 */
706		fpurstor(pcb->pcb_save);
707	}
708	critical_exit();
709
710	return (1);
711}
712
713/*
714 * Wrapper for fnsave instruction, partly to handle hardware bugs.  When npx
715 * exceptions are reported via IRQ13, spurious IRQ13's may be triggered by
716 * no-wait npx instructions.  See the Intel application note AP-578 for
717 * details.  This doesn't cause any additional complications here.  IRQ13's
718 * are inherently asynchronous unless the CPU is frozen to deliver them --
719 * one that started in userland may be delivered many instructions later,
720 * after the process has entered the kernel.  It may even be delivered after
721 * the fnsave here completes.  A spurious IRQ13 for the fnsave is handled in
722 * the same way as a very-late-arriving non-spurious IRQ13 from user mode:
723 * it is normally ignored at first because we set fpcurthread to NULL; it is
724 * normally retriggered in npxdna() after return to user mode.
725 *
726 * npxsave() must be called with interrupts disabled, so that it clears
727 * fpcurthread atomically with saving the state.  We require callers to do the
728 * disabling, since most callers need to disable interrupts anyway to call
729 * npxsave() atomically with checking fpcurthread.
730 *
731 * A previous version of npxsave() went to great lengths to excecute fnsave
732 * with interrupts enabled in case executing it froze the CPU.  This case
733 * can't happen, at least for Intel CPU/NPX's.  Spurious IRQ13's don't imply
734 * spurious freezes.
735 */
736void
737npxsave(addr)
738	union savefpu *addr;
739{
740
741	stop_emulating();
742	fpusave(addr);
743
744	start_emulating();
745	PCPU_SET(fpcurthread, NULL);
746}
747
748void
749npxdrop()
750{
751	struct thread *td;
752
753	/*
754	 * Discard pending exceptions in the !cpu_fxsr case so that unmasked
755	 * ones don't cause a panic on the next frstor.
756	 */
757#ifdef CPU_ENABLE_SSE
758	if (!cpu_fxsr)
759#endif
760		fnclex();
761
762	td = PCPU_GET(fpcurthread);
763	KASSERT(td == curthread, ("fpudrop: fpcurthread != curthread"));
764	CRITICAL_ASSERT(td);
765	PCPU_SET(fpcurthread, NULL);
766	td->td_pcb->pcb_flags &= ~PCB_NPXINITDONE;
767	start_emulating();
768}
769
770/*
771 * Get the state of the FPU without dropping ownership (if possible).
772 * It returns the FPU ownership status.
773 */
774int
775npxgetregs(struct thread *td, union savefpu *addr)
776{
777	struct pcb *pcb;
778
779	if (!hw_float)
780		return (_MC_FPOWNED_NONE);
781
782	pcb = td->td_pcb;
783	if ((pcb->pcb_flags & PCB_NPXINITDONE) == 0) {
784		bcopy(&npx_initialstate, addr, sizeof(npx_initialstate));
785		SET_FPU_CW(addr, pcb->pcb_initial_npxcw);
786		return (_MC_FPOWNED_NONE);
787	}
788	critical_enter();
789	if (td == PCPU_GET(fpcurthread)) {
790		fpusave(addr);
791#ifdef CPU_ENABLE_SSE
792		if (!cpu_fxsr)
793#endif
794			/*
795			 * fnsave initializes the FPU and destroys whatever
796			 * context it contains.  Make sure the FPU owner
797			 * starts with a clean state next time.
798			 */
799			npxdrop();
800		critical_exit();
801		return (_MC_FPOWNED_FPU);
802	} else {
803		critical_exit();
804		bcopy(pcb->pcb_save, addr, sizeof(*addr));
805		return (_MC_FPOWNED_PCB);
806	}
807}
808
809int
810npxgetuserregs(struct thread *td, union savefpu *addr)
811{
812	struct pcb *pcb;
813
814	if (!hw_float)
815		return (_MC_FPOWNED_NONE);
816
817	pcb = td->td_pcb;
818	if ((pcb->pcb_flags & PCB_NPXUSERINITDONE) == 0) {
819		bcopy(&npx_initialstate, addr, sizeof(npx_initialstate));
820		SET_FPU_CW(addr, pcb->pcb_initial_npxcw);
821		return (_MC_FPOWNED_NONE);
822	}
823	critical_enter();
824	if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) {
825		fpusave(addr);
826#ifdef CPU_ENABLE_SSE
827		if (!cpu_fxsr)
828#endif
829			/*
830			 * fnsave initializes the FPU and destroys whatever
831			 * context it contains.  Make sure the FPU owner
832			 * starts with a clean state next time.
833			 */
834			npxdrop();
835		critical_exit();
836		return (_MC_FPOWNED_FPU);
837	} else {
838		critical_exit();
839		bcopy(&pcb->pcb_user_save, addr, sizeof(*addr));
840		return (_MC_FPOWNED_PCB);
841	}
842}
843
844/*
845 * Set the state of the FPU.
846 */
847void
848npxsetregs(struct thread *td, union savefpu *addr)
849{
850	struct pcb *pcb;
851
852	if (!hw_float)
853		return;
854
855	pcb = td->td_pcb;
856	critical_enter();
857	if (td == PCPU_GET(fpcurthread)) {
858#ifdef CPU_ENABLE_SSE
859		if (!cpu_fxsr)
860#endif
861			fnclex();	/* As in npxdrop(). */
862		fpurstor(addr);
863		critical_exit();
864	} else {
865		critical_exit();
866		bcopy(addr, pcb->pcb_save, sizeof(*addr));
867	}
868	if (PCB_USER_FPU(pcb))
869		pcb->pcb_flags |= PCB_NPXUSERINITDONE;
870	pcb->pcb_flags |= PCB_NPXINITDONE;
871}
872
873void
874npxsetuserregs(struct thread *td, union savefpu *addr)
875{
876	struct pcb *pcb;
877
878	if (!hw_float)
879		return;
880
881	pcb = td->td_pcb;
882	critical_enter();
883	if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) {
884#ifdef CPU_ENABLE_SSE
885		if (!cpu_fxsr)
886#endif
887			fnclex();	/* As in npxdrop(). */
888		fpurstor(addr);
889		critical_exit();
890		pcb->pcb_flags |= PCB_NPXUSERINITDONE | PCB_NPXINITDONE;
891	} else {
892		critical_exit();
893		bcopy(addr, &pcb->pcb_user_save, sizeof(*addr));
894		if (PCB_USER_FPU(pcb))
895			pcb->pcb_flags |= PCB_NPXINITDONE;
896		pcb->pcb_flags |= PCB_NPXUSERINITDONE;
897	}
898}
899
900static void
901fpusave(addr)
902	union savefpu *addr;
903{
904
905#ifdef CPU_ENABLE_SSE
906	if (cpu_fxsr)
907		fxsave(addr);
908	else
909#endif
910		fnsave(addr);
911}
912
913#ifdef CPU_ENABLE_SSE
914/*
915 * On AuthenticAMD processors, the fxrstor instruction does not restore
916 * the x87's stored last instruction pointer, last data pointer, and last
917 * opcode values, except in the rare case in which the exception summary
918 * (ES) bit in the x87 status word is set to 1.
919 *
920 * In order to avoid leaking this information across processes, we clean
921 * these values by performing a dummy load before executing fxrstor().
922 */
923static void
924fpu_clean_state(void)
925{
926	static float dummy_variable = 0.0;
927	u_short status;
928
929	/*
930	 * Clear the ES bit in the x87 status word if it is currently
931	 * set, in order to avoid causing a fault in the upcoming load.
932	 */
933	fnstsw(&status);
934	if (status & 0x80)
935		fnclex();
936
937	/*
938	 * Load the dummy variable into the x87 stack.  This mangles
939	 * the x87 stack, but we don't care since we're about to call
940	 * fxrstor() anyway.
941	 */
942	__asm __volatile("ffree %%st(7); fld %0" : : "m" (dummy_variable));
943}
944#endif /* CPU_ENABLE_SSE */
945
946static void
947fpurstor(addr)
948	union savefpu *addr;
949{
950
951#ifdef CPU_ENABLE_SSE
952	if (cpu_fxsr)
953		fxrstor(addr);
954	else
955#endif
956		frstor(addr);
957}
958
959static device_method_t npx_methods[] = {
960	/* Device interface */
961	DEVMETHOD(device_identify,	npx_identify),
962	DEVMETHOD(device_probe,		npx_probe),
963	DEVMETHOD(device_attach,	npx_attach),
964	DEVMETHOD(device_detach,	bus_generic_detach),
965	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
966	DEVMETHOD(device_suspend,	bus_generic_suspend),
967	DEVMETHOD(device_resume,	bus_generic_resume),
968
969	{ 0, 0 }
970};
971
972static driver_t npx_driver = {
973	"npx",
974	npx_methods,
975	1,			/* no softc */
976};
977
978static devclass_t npx_devclass;
979
980/*
981 * We prefer to attach to the root nexus so that the usual case (exception 16)
982 * doesn't describe the processor as being `on isa'.
983 */
984DRIVER_MODULE(npx, nexus, npx_driver, npx_devclass, 0, 0);
985
986#ifdef DEV_ISA
987/*
988 * This sucks up the legacy ISA support assignments from PNPBIOS/ACPI.
989 */
990static struct isa_pnp_id npxisa_ids[] = {
991	{ 0x040cd041, "Legacy ISA coprocessor support" }, /* PNP0C04 */
992	{ 0 }
993};
994
995static int
996npxisa_probe(device_t dev)
997{
998	int result;
999	if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, npxisa_ids)) <= 0) {
1000		device_quiet(dev);
1001	}
1002	return(result);
1003}
1004
1005static int
1006npxisa_attach(device_t dev)
1007{
1008	return (0);
1009}
1010
1011static device_method_t npxisa_methods[] = {
1012	/* Device interface */
1013	DEVMETHOD(device_probe,		npxisa_probe),
1014	DEVMETHOD(device_attach,	npxisa_attach),
1015	DEVMETHOD(device_detach,	bus_generic_detach),
1016	DEVMETHOD(device_shutdown,	bus_generic_shutdown),
1017	DEVMETHOD(device_suspend,	bus_generic_suspend),
1018	DEVMETHOD(device_resume,	bus_generic_resume),
1019
1020	{ 0, 0 }
1021};
1022
1023static driver_t npxisa_driver = {
1024	"npxisa",
1025	npxisa_methods,
1026	1,			/* no softc */
1027};
1028
1029static devclass_t npxisa_devclass;
1030
1031DRIVER_MODULE(npxisa, isa, npxisa_driver, npxisa_devclass, 0, 0);
1032#ifndef PC98
1033DRIVER_MODULE(npxisa, acpi, npxisa_driver, npxisa_devclass, 0, 0);
1034#endif
1035#endif /* DEV_ISA */
1036
1037int
1038fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags)
1039{
1040	struct pcb *pcb;
1041
1042	pcb = td->td_pcb;
1043	KASSERT(!PCB_USER_FPU(pcb) || pcb->pcb_save == &pcb->pcb_user_save,
1044	    ("mangled pcb_save"));
1045	ctx->flags = 0;
1046	if ((pcb->pcb_flags & PCB_NPXINITDONE) != 0)
1047		ctx->flags |= FPU_KERN_CTX_NPXINITDONE;
1048	npxexit(td);
1049	ctx->prev = pcb->pcb_save;
1050	pcb->pcb_save = &ctx->hwstate;
1051	pcb->pcb_flags |= PCB_KERNNPX;
1052	pcb->pcb_flags &= ~PCB_NPXINITDONE;
1053	return (0);
1054}
1055
1056int
1057fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx)
1058{
1059	struct pcb *pcb;
1060
1061	pcb = td->td_pcb;
1062	critical_enter();
1063	if (curthread == PCPU_GET(fpcurthread))
1064		npxdrop();
1065	critical_exit();
1066	pcb->pcb_save = ctx->prev;
1067	if (pcb->pcb_save == &pcb->pcb_user_save) {
1068		if ((pcb->pcb_flags & PCB_NPXUSERINITDONE) != 0)
1069			pcb->pcb_flags |= PCB_NPXINITDONE;
1070		else
1071			pcb->pcb_flags &= ~PCB_NPXINITDONE;
1072		pcb->pcb_flags &= ~PCB_KERNNPX;
1073	} else {
1074		if ((ctx->flags & FPU_KERN_CTX_NPXINITDONE) != 0)
1075			pcb->pcb_flags |= PCB_NPXINITDONE;
1076		else
1077			pcb->pcb_flags &= ~PCB_NPXINITDONE;
1078		KASSERT(!PCB_USER_FPU(pcb), ("unpaired fpu_kern_leave"));
1079	}
1080	return (0);
1081}
1082
1083int
1084fpu_kern_thread(u_int flags)
1085{
1086	struct pcb *pcb;
1087
1088	pcb = PCPU_GET(curpcb);
1089	KASSERT((curthread->td_pflags & TDP_KTHREAD) != 0,
1090	    ("Only kthread may use fpu_kern_thread"));
1091	KASSERT(pcb->pcb_save == &pcb->pcb_user_save, ("mangled pcb_save"));
1092	KASSERT(PCB_USER_FPU(pcb), ("recursive call"));
1093
1094	pcb->pcb_flags |= PCB_KERNNPX;
1095	return (0);
1096}
1097
1098int
1099is_fpu_kern_thread(u_int flags)
1100{
1101
1102	if ((curthread->td_pflags & TDP_KTHREAD) == 0)
1103		return (0);
1104	return ((PCPU_GET(curpcb)->pcb_flags & PCB_KERNNPX) != 0);
1105}
1106