npx.c revision 208833
1/*- 2 * Copyright (c) 1990 William Jolitz. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 4. Neither the name of the University nor the names of its contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * from: @(#)npx.c 7.2 (Berkeley) 5/12/91 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/i386/isa/npx.c 208833 2010-06-05 15:59:59Z kib $"); 35 36#include "opt_cpu.h" 37#include "opt_isa.h" 38#include "opt_npx.h" 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/bus.h> 43#include <sys/kernel.h> 44#include <sys/lock.h> 45#include <sys/malloc.h> 46#include <sys/module.h> 47#include <sys/mutex.h> 48#include <sys/mutex.h> 49#include <sys/proc.h> 50#include <sys/smp.h> 51#include <sys/sysctl.h> 52#include <machine/bus.h> 53#include <sys/rman.h> 54#ifdef NPX_DEBUG 55#include <sys/syslog.h> 56#endif 57#include <sys/signalvar.h> 58 59#include <machine/asmacros.h> 60#include <machine/cputypes.h> 61#include <machine/frame.h> 62#include <machine/md_var.h> 63#include <machine/pcb.h> 64#include <machine/psl.h> 65#include <machine/resource.h> 66#include <machine/specialreg.h> 67#include <machine/segments.h> 68#include <machine/ucontext.h> 69 70#include <machine/intr_machdep.h> 71#ifdef XEN 72#include <machine/xen/xen-os.h> 73#include <xen/hypervisor.h> 74#endif 75 76#ifdef DEV_ISA 77#include <isa/isavar.h> 78#endif 79 80#if !defined(CPU_DISABLE_SSE) && defined(I686_CPU) 81#define CPU_ENABLE_SSE 82#endif 83 84/* 85 * 387 and 287 Numeric Coprocessor Extension (NPX) Driver. 86 */ 87 88/* Configuration flags. */ 89#define NPX_DISABLE_I586_OPTIMIZED_BCOPY (1 << 0) 90#define NPX_DISABLE_I586_OPTIMIZED_BZERO (1 << 1) 91#define NPX_DISABLE_I586_OPTIMIZED_COPYIO (1 << 2) 92 93#if defined(__GNUCLIKE_ASM) && !defined(lint) 94 95#define fldcw(addr) __asm("fldcw %0" : : "m" (*(addr))) 96#define fnclex() __asm("fnclex") 97#define fninit() __asm("fninit") 98#define fnsave(addr) __asm __volatile("fnsave %0" : "=m" (*(addr))) 99#define fnstcw(addr) __asm __volatile("fnstcw %0" : "=m" (*(addr))) 100#define fnstsw(addr) __asm __volatile("fnstsw %0" : "=m" (*(addr))) 101#define fp_divide_by_0() __asm("fldz; fld1; fdiv %st,%st(1); fnop") 102#define frstor(addr) __asm("frstor %0" : : "m" (*(addr))) 103#ifdef CPU_ENABLE_SSE 104#define fxrstor(addr) __asm("fxrstor %0" : : "m" (*(addr))) 105#define fxsave(addr) __asm __volatile("fxsave %0" : "=m" (*(addr))) 106#define ldmxcsr(__csr) __asm __volatile("ldmxcsr %0" : : "m" (__csr)) 107#endif 108#ifdef XEN 109#define start_emulating() (HYPERVISOR_fpu_taskswitch(1)) 110#define stop_emulating() (HYPERVISOR_fpu_taskswitch(0)) 111#else 112#define start_emulating() __asm("smsw %%ax; orb %0,%%al; lmsw %%ax" \ 113 : : "n" (CR0_TS) : "ax") 114#define stop_emulating() __asm("clts") 115#endif 116#else /* !(__GNUCLIKE_ASM && !lint) */ 117 118void fldcw(caddr_t addr); 119void fnclex(void); 120void fninit(void); 121void fnsave(caddr_t addr); 122void fnstcw(caddr_t addr); 123void fnstsw(caddr_t addr); 124void fp_divide_by_0(void); 125void frstor(caddr_t addr); 126#ifdef CPU_ENABLE_SSE 127void fxsave(caddr_t addr); 128void fxrstor(caddr_t addr); 129#endif 130void start_emulating(void); 131void stop_emulating(void); 132 133#endif /* __GNUCLIKE_ASM && !lint */ 134 135#ifdef CPU_ENABLE_SSE 136#define GET_FPU_CW(thread) \ 137 (cpu_fxsr ? \ 138 (thread)->td_pcb->pcb_save->sv_xmm.sv_env.en_cw : \ 139 (thread)->td_pcb->pcb_save->sv_87.sv_env.en_cw) 140#define GET_FPU_SW(thread) \ 141 (cpu_fxsr ? \ 142 (thread)->td_pcb->pcb_save->sv_xmm.sv_env.en_sw : \ 143 (thread)->td_pcb->pcb_save->sv_87.sv_env.en_sw) 144#define SET_FPU_CW(savefpu, value) do { \ 145 if (cpu_fxsr) \ 146 (savefpu)->sv_xmm.sv_env.en_cw = (value); \ 147 else \ 148 (savefpu)->sv_87.sv_env.en_cw = (value); \ 149} while (0) 150#else /* CPU_ENABLE_SSE */ 151#define GET_FPU_CW(thread) \ 152 (thread->td_pcb->pcb_save->sv_87.sv_env.en_cw) 153#define GET_FPU_SW(thread) \ 154 (thread->td_pcb->pcb_save->sv_87.sv_env.en_sw) 155#define SET_FPU_CW(savefpu, value) \ 156 (savefpu)->sv_87.sv_env.en_cw = (value) 157#endif /* CPU_ENABLE_SSE */ 158 159typedef u_char bool_t; 160 161#ifdef CPU_ENABLE_SSE 162static void fpu_clean_state(void); 163#endif 164 165static void fpusave(union savefpu *); 166static void fpurstor(union savefpu *); 167static int npx_attach(device_t dev); 168static void npx_identify(driver_t *driver, device_t parent); 169static int npx_intr(void *); 170static int npx_probe(device_t dev); 171#ifdef I586_CPU_XXX 172static long timezero(const char *funcname, 173 void (*func)(void *buf, size_t len)); 174#endif /* I586_CPU */ 175 176int hw_float; /* XXX currently just alias for npx_exists */ 177 178SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD, 179 &hw_float, 0, "Floating point instructions executed in hardware"); 180 181static volatile u_int npx_intrs_while_probing; 182static volatile u_int npx_traps_while_probing; 183 184static union savefpu npx_initialstate; 185static bool_t npx_ex16; 186static bool_t npx_exists; 187static bool_t npx_irq13; 188 189alias_for_inthand_t probetrap; 190__asm(" \n\ 191 .text \n\ 192 .p2align 2,0x90 \n\ 193 .type " __XSTRING(CNAME(probetrap)) ",@function \n\ 194" __XSTRING(CNAME(probetrap)) ": \n\ 195 ss \n\ 196 incl " __XSTRING(CNAME(npx_traps_while_probing)) " \n\ 197 fnclex \n\ 198 iret \n\ 199"); 200 201/* 202 * Identify routine. Create a connection point on our parent for probing. 203 */ 204static void 205npx_identify(driver, parent) 206 driver_t *driver; 207 device_t parent; 208{ 209 device_t child; 210 211 child = BUS_ADD_CHILD(parent, 0, "npx", 0); 212 if (child == NULL) 213 panic("npx_identify"); 214} 215 216/* 217 * Do minimal handling of npx interrupts to convert them to traps. 218 */ 219static int 220npx_intr(dummy) 221 void *dummy; 222{ 223 struct thread *td; 224 225 npx_intrs_while_probing++; 226 227 /* 228 * The BUSY# latch must be cleared in all cases so that the next 229 * unmasked npx exception causes an interrupt. 230 */ 231 outb(IO_NPX, 0); 232 233 /* 234 * fpcurthread is normally non-null here. In that case, schedule an 235 * AST to finish the exception handling in the correct context 236 * (this interrupt may occur after the thread has entered the 237 * kernel via a syscall or an interrupt). Otherwise, the npx 238 * state of the thread that caused this interrupt must have been 239 * pushed to the thread's pcb, and clearing of the busy latch 240 * above has finished the (essentially null) handling of this 241 * interrupt. Control will eventually return to the instruction 242 * that caused it and it will repeat. We will eventually (usually 243 * soon) win the race to handle the interrupt properly. 244 */ 245 td = PCPU_GET(fpcurthread); 246 if (td != NULL) { 247 td->td_pcb->pcb_flags |= PCB_NPXTRAP; 248 thread_lock(td); 249 td->td_flags |= TDF_ASTPENDING; 250 thread_unlock(td); 251 } 252 return (FILTER_HANDLED); 253} 254 255/* 256 * Probe routine. Set flags to tell npxattach() what to do. Set up an 257 * interrupt handler if npx needs to use interrupts. 258 */ 259static int 260npx_probe(dev) 261 device_t dev; 262{ 263 struct gate_descriptor save_idt_npxtrap; 264 struct resource *ioport_res, *irq_res; 265 void *irq_cookie; 266 int ioport_rid, irq_num, irq_rid; 267 u_short control; 268 u_short status; 269 270 device_set_desc(dev, "math processor"); 271 272 /* 273 * Modern CPUs all have an FPU that uses the INT16 interface 274 * and provide a simple way to verify that, so handle the 275 * common case right away. 276 */ 277 if (cpu_feature & CPUID_FPU) { 278 hw_float = npx_exists = 1; 279 npx_ex16 = 1; 280 device_quiet(dev); 281 return (0); 282 } 283 284 save_idt_npxtrap = idt[IDT_MF]; 285 setidt(IDT_MF, probetrap, SDT_SYS386TGT, SEL_KPL, 286 GSEL(GCODE_SEL, SEL_KPL)); 287 ioport_rid = 0; 288 ioport_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &ioport_rid, 289 IO_NPX, IO_NPX + IO_NPXSIZE - 1, IO_NPXSIZE, RF_ACTIVE); 290 if (ioport_res == NULL) 291 panic("npx: can't get ports"); 292 if (resource_int_value("npx", 0, "irq", &irq_num) != 0) 293 irq_num = IRQ_NPX; 294 irq_rid = 0; 295 irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &irq_rid, irq_num, 296 irq_num, 1, RF_ACTIVE); 297 if (irq_res != NULL) { 298 if (bus_setup_intr(dev, irq_res, INTR_TYPE_MISC, 299 npx_intr, NULL, NULL, &irq_cookie) != 0) 300 panic("npx: can't create intr"); 301 } 302 303 /* 304 * Partially reset the coprocessor, if any. Some BIOS's don't reset 305 * it after a warm boot. 306 */ 307 npx_full_reset(); 308 outb(IO_NPX, 0); 309 310 /* 311 * Don't trap while we're probing. 312 */ 313 stop_emulating(); 314 315 /* 316 * Finish resetting the coprocessor, if any. If there is an error 317 * pending, then we may get a bogus IRQ13, but npx_intr() will handle 318 * it OK. Bogus halts have never been observed, but we enabled 319 * IRQ13 and cleared the BUSY# latch early to handle them anyway. 320 */ 321 fninit(); 322 323 /* 324 * Don't use fwait here because it might hang. 325 * Don't use fnop here because it usually hangs if there is no FPU. 326 */ 327 DELAY(1000); /* wait for any IRQ13 */ 328#ifdef DIAGNOSTIC 329 if (npx_intrs_while_probing != 0) 330 printf("fninit caused %u bogus npx interrupt(s)\n", 331 npx_intrs_while_probing); 332 if (npx_traps_while_probing != 0) 333 printf("fninit caused %u bogus npx trap(s)\n", 334 npx_traps_while_probing); 335#endif 336 /* 337 * Check for a status of mostly zero. 338 */ 339 status = 0x5a5a; 340 fnstsw(&status); 341 if ((status & 0xb8ff) == 0) { 342 /* 343 * Good, now check for a proper control word. 344 */ 345 control = 0x5a5a; 346 fnstcw(&control); 347 if ((control & 0x1f3f) == 0x033f) { 348 hw_float = npx_exists = 1; 349 /* 350 * We have an npx, now divide by 0 to see if exception 351 * 16 works. 352 */ 353 control &= ~(1 << 2); /* enable divide by 0 trap */ 354 fldcw(&control); 355#ifdef FPU_ERROR_BROKEN 356 /* 357 * FPU error signal doesn't work on some CPU 358 * accelerator board. 359 */ 360 npx_ex16 = 1; 361 return (0); 362#endif 363 npx_traps_while_probing = npx_intrs_while_probing = 0; 364 fp_divide_by_0(); 365 DELAY(1000); /* wait for any IRQ13 */ 366 if (npx_traps_while_probing != 0) { 367 /* 368 * Good, exception 16 works. 369 */ 370 npx_ex16 = 1; 371 goto no_irq13; 372 } 373 if (npx_intrs_while_probing != 0) { 374 /* 375 * Bad, we are stuck with IRQ13. 376 */ 377 npx_irq13 = 1; 378 idt[IDT_MF] = save_idt_npxtrap; 379#ifdef SMP 380 if (mp_ncpus > 1) 381 panic("npx0 cannot use IRQ 13 on an SMP system"); 382#endif 383 return (0); 384 } 385 /* 386 * Worse, even IRQ13 is broken. 387 */ 388 } 389 } 390 391 /* Probe failed. Floating point simply won't work. */ 392 device_printf(dev, "WARNING: no FPU!\n"); 393 394 /* FALLTHROUGH */ 395no_irq13: 396 idt[IDT_MF] = save_idt_npxtrap; 397 if (irq_res != NULL) { 398 bus_teardown_intr(dev, irq_res, irq_cookie); 399 bus_release_resource(dev, SYS_RES_IRQ, irq_rid, irq_res); 400 } 401 bus_release_resource(dev, SYS_RES_IOPORT, ioport_rid, ioport_res); 402 return (npx_exists ? 0 : ENXIO); 403} 404 405/* 406 * Attach routine - announce which it is, and wire into system 407 */ 408static int 409npx_attach(dev) 410 device_t dev; 411{ 412 int flags; 413 register_t s; 414 415 flags = device_get_flags(dev); 416 417 if (npx_irq13) 418 device_printf(dev, "IRQ 13 interface\n"); 419 else if (!device_is_quiet(dev) || bootverbose) 420 device_printf(dev, "INT 16 interface\n"); 421 422 npxinit(); 423 424 s = intr_disable(); 425 stop_emulating(); 426 fpusave(&npx_initialstate); 427 start_emulating(); 428#ifdef CPU_ENABLE_SSE 429 if (cpu_fxsr) { 430 if (npx_initialstate.sv_xmm.sv_env.en_mxcsr_mask) 431 cpu_mxcsr_mask = 432 npx_initialstate.sv_xmm.sv_env.en_mxcsr_mask; 433 else 434 cpu_mxcsr_mask = 0xFFBF; 435 bzero(npx_initialstate.sv_xmm.sv_fp, 436 sizeof(npx_initialstate.sv_xmm.sv_fp)); 437 bzero(npx_initialstate.sv_xmm.sv_xmm, 438 sizeof(npx_initialstate.sv_xmm.sv_xmm)); 439 /* XXX might need even more zeroing. */ 440 } else 441#endif 442 bzero(npx_initialstate.sv_87.sv_ac, 443 sizeof(npx_initialstate.sv_87.sv_ac)); 444 intr_restore(s); 445#ifdef I586_CPU_XXX 446 if (cpu_class == CPUCLASS_586 && npx_ex16 && 447 timezero("i586_bzero()", i586_bzero) < 448 timezero("bzero()", bzero) * 4 / 5) { 449 if (!(flags & NPX_DISABLE_I586_OPTIMIZED_BCOPY)) 450 bcopy_vector = i586_bcopy; 451 if (!(flags & NPX_DISABLE_I586_OPTIMIZED_BZERO)) 452 bzero_vector = i586_bzero; 453 if (!(flags & NPX_DISABLE_I586_OPTIMIZED_COPYIO)) { 454 copyin_vector = i586_copyin; 455 copyout_vector = i586_copyout; 456 } 457 } 458#endif 459 460 return (0); /* XXX unused */ 461} 462 463/* 464 * Initialize floating point unit. 465 */ 466void 467npxinit(void) 468{ 469 static union savefpu dummy; 470 register_t savecrit; 471 u_short control; 472 473 if (!npx_exists) 474 return; 475 /* 476 * fninit has the same h/w bugs as fnsave. Use the detoxified 477 * fnsave to throw away any junk in the fpu. npxsave() initializes 478 * the fpu and sets fpcurthread = NULL as important side effects. 479 */ 480 savecrit = intr_disable(); 481 npxsave(&dummy); 482 stop_emulating(); 483#ifdef CPU_ENABLE_SSE 484 /* XXX npxsave() doesn't actually initialize the fpu in the SSE case. */ 485 if (cpu_fxsr) 486 fninit(); 487#endif 488 control = __INITIAL_NPXCW__; 489 fldcw(&control); 490 start_emulating(); 491 intr_restore(savecrit); 492} 493 494/* 495 * Free coprocessor (if we have it). 496 */ 497void 498npxexit(td) 499 struct thread *td; 500{ 501 register_t savecrit; 502 503 savecrit = intr_disable(); 504 if (curthread == PCPU_GET(fpcurthread)) 505 npxsave(PCPU_GET(curpcb)->pcb_save); 506 intr_restore(savecrit); 507#ifdef NPX_DEBUG 508 if (npx_exists) { 509 u_int masked_exceptions; 510 511 masked_exceptions = GET_FPU_CW(td) & GET_FPU_SW(td) & 0x7f; 512 /* 513 * Log exceptions that would have trapped with the old 514 * control word (overflow, divide by 0, and invalid operand). 515 */ 516 if (masked_exceptions & 0x0d) 517 log(LOG_ERR, 518 "pid %d (%s) exited with masked floating point exceptions 0x%02x\n", 519 td->td_proc->p_pid, td->td_proc->p_comm, 520 masked_exceptions); 521 } 522#endif 523} 524 525int 526npxformat() 527{ 528 529 if (!npx_exists) 530 return (_MC_FPFMT_NODEV); 531#ifdef CPU_ENABLE_SSE 532 if (cpu_fxsr) 533 return (_MC_FPFMT_XMM); 534#endif 535 return (_MC_FPFMT_387); 536} 537 538/* 539 * The following mechanism is used to ensure that the FPE_... value 540 * that is passed as a trapcode to the signal handler of the user 541 * process does not have more than one bit set. 542 * 543 * Multiple bits may be set if the user process modifies the control 544 * word while a status word bit is already set. While this is a sign 545 * of bad coding, we have no choise than to narrow them down to one 546 * bit, since we must not send a trapcode that is not exactly one of 547 * the FPE_ macros. 548 * 549 * The mechanism has a static table with 127 entries. Each combination 550 * of the 7 FPU status word exception bits directly translates to a 551 * position in this table, where a single FPE_... value is stored. 552 * This FPE_... value stored there is considered the "most important" 553 * of the exception bits and will be sent as the signal code. The 554 * precedence of the bits is based upon Intel Document "Numerical 555 * Applications", Chapter "Special Computational Situations". 556 * 557 * The macro to choose one of these values does these steps: 1) Throw 558 * away status word bits that cannot be masked. 2) Throw away the bits 559 * currently masked in the control word, assuming the user isn't 560 * interested in them anymore. 3) Reinsert status word bit 7 (stack 561 * fault) if it is set, which cannot be masked but must be presered. 562 * 4) Use the remaining bits to point into the trapcode table. 563 * 564 * The 6 maskable bits in order of their preference, as stated in the 565 * above referenced Intel manual: 566 * 1 Invalid operation (FP_X_INV) 567 * 1a Stack underflow 568 * 1b Stack overflow 569 * 1c Operand of unsupported format 570 * 1d SNaN operand. 571 * 2 QNaN operand (not an exception, irrelavant here) 572 * 3 Any other invalid-operation not mentioned above or zero divide 573 * (FP_X_INV, FP_X_DZ) 574 * 4 Denormal operand (FP_X_DNML) 575 * 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL) 576 * 6 Inexact result (FP_X_IMP) 577 */ 578static char fpetable[128] = { 579 0, 580 FPE_FLTINV, /* 1 - INV */ 581 FPE_FLTUND, /* 2 - DNML */ 582 FPE_FLTINV, /* 3 - INV | DNML */ 583 FPE_FLTDIV, /* 4 - DZ */ 584 FPE_FLTINV, /* 5 - INV | DZ */ 585 FPE_FLTDIV, /* 6 - DNML | DZ */ 586 FPE_FLTINV, /* 7 - INV | DNML | DZ */ 587 FPE_FLTOVF, /* 8 - OFL */ 588 FPE_FLTINV, /* 9 - INV | OFL */ 589 FPE_FLTUND, /* A - DNML | OFL */ 590 FPE_FLTINV, /* B - INV | DNML | OFL */ 591 FPE_FLTDIV, /* C - DZ | OFL */ 592 FPE_FLTINV, /* D - INV | DZ | OFL */ 593 FPE_FLTDIV, /* E - DNML | DZ | OFL */ 594 FPE_FLTINV, /* F - INV | DNML | DZ | OFL */ 595 FPE_FLTUND, /* 10 - UFL */ 596 FPE_FLTINV, /* 11 - INV | UFL */ 597 FPE_FLTUND, /* 12 - DNML | UFL */ 598 FPE_FLTINV, /* 13 - INV | DNML | UFL */ 599 FPE_FLTDIV, /* 14 - DZ | UFL */ 600 FPE_FLTINV, /* 15 - INV | DZ | UFL */ 601 FPE_FLTDIV, /* 16 - DNML | DZ | UFL */ 602 FPE_FLTINV, /* 17 - INV | DNML | DZ | UFL */ 603 FPE_FLTOVF, /* 18 - OFL | UFL */ 604 FPE_FLTINV, /* 19 - INV | OFL | UFL */ 605 FPE_FLTUND, /* 1A - DNML | OFL | UFL */ 606 FPE_FLTINV, /* 1B - INV | DNML | OFL | UFL */ 607 FPE_FLTDIV, /* 1C - DZ | OFL | UFL */ 608 FPE_FLTINV, /* 1D - INV | DZ | OFL | UFL */ 609 FPE_FLTDIV, /* 1E - DNML | DZ | OFL | UFL */ 610 FPE_FLTINV, /* 1F - INV | DNML | DZ | OFL | UFL */ 611 FPE_FLTRES, /* 20 - IMP */ 612 FPE_FLTINV, /* 21 - INV | IMP */ 613 FPE_FLTUND, /* 22 - DNML | IMP */ 614 FPE_FLTINV, /* 23 - INV | DNML | IMP */ 615 FPE_FLTDIV, /* 24 - DZ | IMP */ 616 FPE_FLTINV, /* 25 - INV | DZ | IMP */ 617 FPE_FLTDIV, /* 26 - DNML | DZ | IMP */ 618 FPE_FLTINV, /* 27 - INV | DNML | DZ | IMP */ 619 FPE_FLTOVF, /* 28 - OFL | IMP */ 620 FPE_FLTINV, /* 29 - INV | OFL | IMP */ 621 FPE_FLTUND, /* 2A - DNML | OFL | IMP */ 622 FPE_FLTINV, /* 2B - INV | DNML | OFL | IMP */ 623 FPE_FLTDIV, /* 2C - DZ | OFL | IMP */ 624 FPE_FLTINV, /* 2D - INV | DZ | OFL | IMP */ 625 FPE_FLTDIV, /* 2E - DNML | DZ | OFL | IMP */ 626 FPE_FLTINV, /* 2F - INV | DNML | DZ | OFL | IMP */ 627 FPE_FLTUND, /* 30 - UFL | IMP */ 628 FPE_FLTINV, /* 31 - INV | UFL | IMP */ 629 FPE_FLTUND, /* 32 - DNML | UFL | IMP */ 630 FPE_FLTINV, /* 33 - INV | DNML | UFL | IMP */ 631 FPE_FLTDIV, /* 34 - DZ | UFL | IMP */ 632 FPE_FLTINV, /* 35 - INV | DZ | UFL | IMP */ 633 FPE_FLTDIV, /* 36 - DNML | DZ | UFL | IMP */ 634 FPE_FLTINV, /* 37 - INV | DNML | DZ | UFL | IMP */ 635 FPE_FLTOVF, /* 38 - OFL | UFL | IMP */ 636 FPE_FLTINV, /* 39 - INV | OFL | UFL | IMP */ 637 FPE_FLTUND, /* 3A - DNML | OFL | UFL | IMP */ 638 FPE_FLTINV, /* 3B - INV | DNML | OFL | UFL | IMP */ 639 FPE_FLTDIV, /* 3C - DZ | OFL | UFL | IMP */ 640 FPE_FLTINV, /* 3D - INV | DZ | OFL | UFL | IMP */ 641 FPE_FLTDIV, /* 3E - DNML | DZ | OFL | UFL | IMP */ 642 FPE_FLTINV, /* 3F - INV | DNML | DZ | OFL | UFL | IMP */ 643 FPE_FLTSUB, /* 40 - STK */ 644 FPE_FLTSUB, /* 41 - INV | STK */ 645 FPE_FLTUND, /* 42 - DNML | STK */ 646 FPE_FLTSUB, /* 43 - INV | DNML | STK */ 647 FPE_FLTDIV, /* 44 - DZ | STK */ 648 FPE_FLTSUB, /* 45 - INV | DZ | STK */ 649 FPE_FLTDIV, /* 46 - DNML | DZ | STK */ 650 FPE_FLTSUB, /* 47 - INV | DNML | DZ | STK */ 651 FPE_FLTOVF, /* 48 - OFL | STK */ 652 FPE_FLTSUB, /* 49 - INV | OFL | STK */ 653 FPE_FLTUND, /* 4A - DNML | OFL | STK */ 654 FPE_FLTSUB, /* 4B - INV | DNML | OFL | STK */ 655 FPE_FLTDIV, /* 4C - DZ | OFL | STK */ 656 FPE_FLTSUB, /* 4D - INV | DZ | OFL | STK */ 657 FPE_FLTDIV, /* 4E - DNML | DZ | OFL | STK */ 658 FPE_FLTSUB, /* 4F - INV | DNML | DZ | OFL | STK */ 659 FPE_FLTUND, /* 50 - UFL | STK */ 660 FPE_FLTSUB, /* 51 - INV | UFL | STK */ 661 FPE_FLTUND, /* 52 - DNML | UFL | STK */ 662 FPE_FLTSUB, /* 53 - INV | DNML | UFL | STK */ 663 FPE_FLTDIV, /* 54 - DZ | UFL | STK */ 664 FPE_FLTSUB, /* 55 - INV | DZ | UFL | STK */ 665 FPE_FLTDIV, /* 56 - DNML | DZ | UFL | STK */ 666 FPE_FLTSUB, /* 57 - INV | DNML | DZ | UFL | STK */ 667 FPE_FLTOVF, /* 58 - OFL | UFL | STK */ 668 FPE_FLTSUB, /* 59 - INV | OFL | UFL | STK */ 669 FPE_FLTUND, /* 5A - DNML | OFL | UFL | STK */ 670 FPE_FLTSUB, /* 5B - INV | DNML | OFL | UFL | STK */ 671 FPE_FLTDIV, /* 5C - DZ | OFL | UFL | STK */ 672 FPE_FLTSUB, /* 5D - INV | DZ | OFL | UFL | STK */ 673 FPE_FLTDIV, /* 5E - DNML | DZ | OFL | UFL | STK */ 674 FPE_FLTSUB, /* 5F - INV | DNML | DZ | OFL | UFL | STK */ 675 FPE_FLTRES, /* 60 - IMP | STK */ 676 FPE_FLTSUB, /* 61 - INV | IMP | STK */ 677 FPE_FLTUND, /* 62 - DNML | IMP | STK */ 678 FPE_FLTSUB, /* 63 - INV | DNML | IMP | STK */ 679 FPE_FLTDIV, /* 64 - DZ | IMP | STK */ 680 FPE_FLTSUB, /* 65 - INV | DZ | IMP | STK */ 681 FPE_FLTDIV, /* 66 - DNML | DZ | IMP | STK */ 682 FPE_FLTSUB, /* 67 - INV | DNML | DZ | IMP | STK */ 683 FPE_FLTOVF, /* 68 - OFL | IMP | STK */ 684 FPE_FLTSUB, /* 69 - INV | OFL | IMP | STK */ 685 FPE_FLTUND, /* 6A - DNML | OFL | IMP | STK */ 686 FPE_FLTSUB, /* 6B - INV | DNML | OFL | IMP | STK */ 687 FPE_FLTDIV, /* 6C - DZ | OFL | IMP | STK */ 688 FPE_FLTSUB, /* 6D - INV | DZ | OFL | IMP | STK */ 689 FPE_FLTDIV, /* 6E - DNML | DZ | OFL | IMP | STK */ 690 FPE_FLTSUB, /* 6F - INV | DNML | DZ | OFL | IMP | STK */ 691 FPE_FLTUND, /* 70 - UFL | IMP | STK */ 692 FPE_FLTSUB, /* 71 - INV | UFL | IMP | STK */ 693 FPE_FLTUND, /* 72 - DNML | UFL | IMP | STK */ 694 FPE_FLTSUB, /* 73 - INV | DNML | UFL | IMP | STK */ 695 FPE_FLTDIV, /* 74 - DZ | UFL | IMP | STK */ 696 FPE_FLTSUB, /* 75 - INV | DZ | UFL | IMP | STK */ 697 FPE_FLTDIV, /* 76 - DNML | DZ | UFL | IMP | STK */ 698 FPE_FLTSUB, /* 77 - INV | DNML | DZ | UFL | IMP | STK */ 699 FPE_FLTOVF, /* 78 - OFL | UFL | IMP | STK */ 700 FPE_FLTSUB, /* 79 - INV | OFL | UFL | IMP | STK */ 701 FPE_FLTUND, /* 7A - DNML | OFL | UFL | IMP | STK */ 702 FPE_FLTSUB, /* 7B - INV | DNML | OFL | UFL | IMP | STK */ 703 FPE_FLTDIV, /* 7C - DZ | OFL | UFL | IMP | STK */ 704 FPE_FLTSUB, /* 7D - INV | DZ | OFL | UFL | IMP | STK */ 705 FPE_FLTDIV, /* 7E - DNML | DZ | OFL | UFL | IMP | STK */ 706 FPE_FLTSUB, /* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */ 707}; 708 709/* 710 * Preserve the FP status word, clear FP exceptions, then generate a SIGFPE. 711 * 712 * Clearing exceptions is necessary mainly to avoid IRQ13 bugs. We now 713 * depend on longjmp() restoring a usable state. Restoring the state 714 * or examining it might fail if we didn't clear exceptions. 715 * 716 * The error code chosen will be one of the FPE_... macros. It will be 717 * sent as the second argument to old BSD-style signal handlers and as 718 * "siginfo_t->si_code" (second argument) to SA_SIGINFO signal handlers. 719 * 720 * XXX the FP state is not preserved across signal handlers. So signal 721 * handlers cannot afford to do FP unless they preserve the state or 722 * longjmp() out. Both preserving the state and longjmp()ing may be 723 * destroyed by IRQ13 bugs. Clearing FP exceptions is not an acceptable 724 * solution for signals other than SIGFPE. 725 */ 726int 727npxtrap() 728{ 729 register_t savecrit; 730 u_short control, status; 731 732 if (!npx_exists) { 733 printf("npxtrap: fpcurthread = %p, curthread = %p, npx_exists = %d\n", 734 PCPU_GET(fpcurthread), curthread, npx_exists); 735 panic("npxtrap from nowhere"); 736 } 737 savecrit = intr_disable(); 738 739 /* 740 * Interrupt handling (for another interrupt) may have pushed the 741 * state to memory. Fetch the relevant parts of the state from 742 * wherever they are. 743 */ 744 if (PCPU_GET(fpcurthread) != curthread) { 745 control = GET_FPU_CW(curthread); 746 status = GET_FPU_SW(curthread); 747 } else { 748 fnstcw(&control); 749 fnstsw(&status); 750 } 751 752 if (PCPU_GET(fpcurthread) == curthread) 753 fnclex(); 754 intr_restore(savecrit); 755 return (fpetable[status & ((~control & 0x3f) | 0x40)]); 756} 757 758/* 759 * Implement device not available (DNA) exception 760 * 761 * It would be better to switch FP context here (if curthread != fpcurthread) 762 * and not necessarily for every context switch, but it is too hard to 763 * access foreign pcb's. 764 */ 765 766static int err_count = 0; 767 768int 769npxdna(void) 770{ 771 struct pcb *pcb; 772 register_t s; 773 774 if (!npx_exists) 775 return (0); 776 if (PCPU_GET(fpcurthread) == curthread) { 777 printf("npxdna: fpcurthread == curthread %d times\n", 778 ++err_count); 779 stop_emulating(); 780 return (1); 781 } 782 if (PCPU_GET(fpcurthread) != NULL) { 783 printf("npxdna: fpcurthread = %p (%d), curthread = %p (%d)\n", 784 PCPU_GET(fpcurthread), 785 PCPU_GET(fpcurthread)->td_proc->p_pid, 786 curthread, curthread->td_proc->p_pid); 787 panic("npxdna"); 788 } 789 s = intr_disable(); 790 stop_emulating(); 791 /* 792 * Record new context early in case frstor causes an IRQ13. 793 */ 794 PCPU_SET(fpcurthread, curthread); 795 pcb = PCPU_GET(curpcb); 796 797#ifdef CPU_ENABLE_SSE 798 if (cpu_fxsr) 799 fpu_clean_state(); 800#endif 801 802 if ((pcb->pcb_flags & PCB_NPXINITDONE) == 0) { 803 /* 804 * This is the first time this thread has used the FPU or 805 * the PCB doesn't contain a clean FPU state. Explicitly 806 * load an initial state. 807 */ 808 fpurstor(&npx_initialstate); 809 if (pcb->pcb_initial_npxcw != __INITIAL_NPXCW__) 810 fldcw(&pcb->pcb_initial_npxcw); 811 pcb->pcb_flags |= PCB_NPXINITDONE; 812 if (PCB_USER_FPU(pcb)) 813 pcb->pcb_flags |= PCB_NPXUSERINITDONE; 814 } else { 815 /* 816 * The following fpurstor() may cause an IRQ13 when the 817 * state being restored has a pending error. The error will 818 * appear to have been triggered by the current (npx) user 819 * instruction even when that instruction is a no-wait 820 * instruction that should not trigger an error (e.g., 821 * fnclex). On at least one 486 system all of the no-wait 822 * instructions are broken the same as frstor, so our 823 * treatment does not amplify the breakage. On at least 824 * one 386/Cyrix 387 system, fnclex works correctly while 825 * frstor and fnsave are broken, so our treatment breaks 826 * fnclex if it is the first FPU instruction after a context 827 * switch. 828 */ 829 fpurstor(pcb->pcb_save); 830 } 831 intr_restore(s); 832 833 return (1); 834} 835 836/* 837 * Wrapper for fnsave instruction, partly to handle hardware bugs. When npx 838 * exceptions are reported via IRQ13, spurious IRQ13's may be triggered by 839 * no-wait npx instructions. See the Intel application note AP-578 for 840 * details. This doesn't cause any additional complications here. IRQ13's 841 * are inherently asynchronous unless the CPU is frozen to deliver them -- 842 * one that started in userland may be delivered many instructions later, 843 * after the process has entered the kernel. It may even be delivered after 844 * the fnsave here completes. A spurious IRQ13 for the fnsave is handled in 845 * the same way as a very-late-arriving non-spurious IRQ13 from user mode: 846 * it is normally ignored at first because we set fpcurthread to NULL; it is 847 * normally retriggered in npxdna() after return to user mode. 848 * 849 * npxsave() must be called with interrupts disabled, so that it clears 850 * fpcurthread atomically with saving the state. We require callers to do the 851 * disabling, since most callers need to disable interrupts anyway to call 852 * npxsave() atomically with checking fpcurthread. 853 * 854 * A previous version of npxsave() went to great lengths to excecute fnsave 855 * with interrupts enabled in case executing it froze the CPU. This case 856 * can't happen, at least for Intel CPU/NPX's. Spurious IRQ13's don't imply 857 * spurious freezes. 858 */ 859void 860npxsave(addr) 861 union savefpu *addr; 862{ 863 864 stop_emulating(); 865 fpusave(addr); 866 867 start_emulating(); 868 PCPU_SET(fpcurthread, NULL); 869} 870 871/* 872 * This should be called with interrupts disabled and only when the owning 873 * FPU thread is non-null. 874 */ 875void 876npxdrop() 877{ 878 struct thread *td; 879 880 /* 881 * Discard pending exceptions in the !cpu_fxsr case so that unmasked 882 * ones don't cause a panic on the next frstor. 883 */ 884#ifdef CPU_ENABLE_SSE 885 if (!cpu_fxsr) 886#endif 887 fnclex(); 888 889 td = PCPU_GET(fpcurthread); 890 PCPU_SET(fpcurthread, NULL); 891 td->td_pcb->pcb_flags &= ~PCB_NPXINITDONE; 892 start_emulating(); 893} 894 895/* 896 * Get the state of the FPU without dropping ownership (if possible). 897 * It returns the FPU ownership status. 898 */ 899int 900npxgetregs(struct thread *td, union savefpu *addr) 901{ 902 struct pcb *pcb; 903 register_t s; 904 905 if (!npx_exists) 906 return (_MC_FPOWNED_NONE); 907 908 pcb = td->td_pcb; 909 if ((pcb->pcb_flags & PCB_NPXINITDONE) == 0) { 910 bcopy(&npx_initialstate, addr, sizeof(npx_initialstate)); 911 SET_FPU_CW(addr, pcb->pcb_initial_npxcw); 912 return (_MC_FPOWNED_NONE); 913 } 914 s = intr_disable(); 915 if (td == PCPU_GET(fpcurthread)) { 916 fpusave(addr); 917#ifdef CPU_ENABLE_SSE 918 if (!cpu_fxsr) 919#endif 920 /* 921 * fnsave initializes the FPU and destroys whatever 922 * context it contains. Make sure the FPU owner 923 * starts with a clean state next time. 924 */ 925 npxdrop(); 926 intr_restore(s); 927 return (_MC_FPOWNED_FPU); 928 } else { 929 intr_restore(s); 930 bcopy(pcb->pcb_save, addr, sizeof(*addr)); 931 return (_MC_FPOWNED_PCB); 932 } 933} 934 935int 936npxgetuserregs(struct thread *td, union savefpu *addr) 937{ 938 struct pcb *pcb; 939 register_t s; 940 941 if (!npx_exists) 942 return (_MC_FPOWNED_NONE); 943 944 pcb = td->td_pcb; 945 if ((pcb->pcb_flags & PCB_NPXUSERINITDONE) == 0) { 946 bcopy(&npx_initialstate, addr, sizeof(npx_initialstate)); 947 SET_FPU_CW(addr, pcb->pcb_initial_npxcw); 948 return (_MC_FPOWNED_NONE); 949 } 950 s = intr_disable(); 951 if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) { 952 fpusave(addr); 953#ifdef CPU_ENABLE_SSE 954 if (!cpu_fxsr) 955#endif 956 /* 957 * fnsave initializes the FPU and destroys whatever 958 * context it contains. Make sure the FPU owner 959 * starts with a clean state next time. 960 */ 961 npxdrop(); 962 intr_restore(s); 963 return (_MC_FPOWNED_FPU); 964 } else { 965 intr_restore(s); 966 bcopy(&pcb->pcb_user_save, addr, sizeof(*addr)); 967 return (_MC_FPOWNED_PCB); 968 } 969} 970 971/* 972 * Set the state of the FPU. 973 */ 974void 975npxsetregs(struct thread *td, union savefpu *addr) 976{ 977 struct pcb *pcb; 978 register_t s; 979 980 if (!npx_exists) 981 return; 982 983 pcb = td->td_pcb; 984 s = intr_disable(); 985 if (td == PCPU_GET(fpcurthread)) { 986#ifdef CPU_ENABLE_SSE 987 if (!cpu_fxsr) 988#endif 989 fnclex(); /* As in npxdrop(). */ 990 fpurstor(addr); 991 intr_restore(s); 992 } else { 993 intr_restore(s); 994 bcopy(addr, pcb->pcb_save, sizeof(*addr)); 995 } 996 if (PCB_USER_FPU(pcb)) 997 pcb->pcb_flags |= PCB_NPXUSERINITDONE; 998 pcb->pcb_flags |= PCB_NPXINITDONE; 999} 1000 1001void 1002npxsetuserregs(struct thread *td, union savefpu *addr) 1003{ 1004 struct pcb *pcb; 1005 register_t s; 1006 1007 if (!npx_exists) 1008 return; 1009 1010 pcb = td->td_pcb; 1011 s = intr_disable(); 1012 if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) { 1013#ifdef CPU_ENABLE_SSE 1014 if (!cpu_fxsr) 1015#endif 1016 fnclex(); /* As in npxdrop(). */ 1017 fpurstor(addr); 1018 intr_restore(s); 1019 pcb->pcb_flags |= PCB_NPXUSERINITDONE | PCB_NPXINITDONE; 1020 } else { 1021 intr_restore(s); 1022 bcopy(addr, &pcb->pcb_user_save, sizeof(*addr)); 1023 if (PCB_USER_FPU(pcb)) 1024 pcb->pcb_flags |= PCB_NPXINITDONE; 1025 pcb->pcb_flags |= PCB_NPXUSERINITDONE; 1026 } 1027} 1028 1029static void 1030fpusave(addr) 1031 union savefpu *addr; 1032{ 1033 1034#ifdef CPU_ENABLE_SSE 1035 if (cpu_fxsr) 1036 fxsave(addr); 1037 else 1038#endif 1039 fnsave(addr); 1040} 1041 1042#ifdef CPU_ENABLE_SSE 1043/* 1044 * On AuthenticAMD processors, the fxrstor instruction does not restore 1045 * the x87's stored last instruction pointer, last data pointer, and last 1046 * opcode values, except in the rare case in which the exception summary 1047 * (ES) bit in the x87 status word is set to 1. 1048 * 1049 * In order to avoid leaking this information across processes, we clean 1050 * these values by performing a dummy load before executing fxrstor(). 1051 */ 1052static void 1053fpu_clean_state(void) 1054{ 1055 static float dummy_variable = 0.0; 1056 u_short status; 1057 1058 /* 1059 * Clear the ES bit in the x87 status word if it is currently 1060 * set, in order to avoid causing a fault in the upcoming load. 1061 */ 1062 fnstsw(&status); 1063 if (status & 0x80) 1064 fnclex(); 1065 1066 /* 1067 * Load the dummy variable into the x87 stack. This mangles 1068 * the x87 stack, but we don't care since we're about to call 1069 * fxrstor() anyway. 1070 */ 1071 __asm __volatile("ffree %%st(7); fld %0" : : "m" (dummy_variable)); 1072} 1073#endif /* CPU_ENABLE_SSE */ 1074 1075static void 1076fpurstor(addr) 1077 union savefpu *addr; 1078{ 1079 1080#ifdef CPU_ENABLE_SSE 1081 if (cpu_fxsr) 1082 fxrstor(addr); 1083 else 1084#endif 1085 frstor(addr); 1086} 1087 1088#ifdef I586_CPU_XXX 1089static long 1090timezero(funcname, func) 1091 const char *funcname; 1092 void (*func)(void *buf, size_t len); 1093 1094{ 1095 void *buf; 1096#define BUFSIZE 1048576 1097 long usec; 1098 struct timeval finish, start; 1099 1100 buf = malloc(BUFSIZE, M_TEMP, M_NOWAIT); 1101 if (buf == NULL) 1102 return (BUFSIZE); 1103 microtime(&start); 1104 (*func)(buf, BUFSIZE); 1105 microtime(&finish); 1106 usec = 1000000 * (finish.tv_sec - start.tv_sec) + 1107 finish.tv_usec - start.tv_usec; 1108 if (usec <= 0) 1109 usec = 1; 1110 if (bootverbose) 1111 printf("%s bandwidth = %u kBps\n", funcname, 1112 (u_int32_t)(((BUFSIZE >> 10) * 1000000) / usec)); 1113 free(buf, M_TEMP); 1114 return (usec); 1115} 1116#endif /* I586_CPU */ 1117 1118static device_method_t npx_methods[] = { 1119 /* Device interface */ 1120 DEVMETHOD(device_identify, npx_identify), 1121 DEVMETHOD(device_probe, npx_probe), 1122 DEVMETHOD(device_attach, npx_attach), 1123 DEVMETHOD(device_detach, bus_generic_detach), 1124 DEVMETHOD(device_shutdown, bus_generic_shutdown), 1125 DEVMETHOD(device_suspend, bus_generic_suspend), 1126 DEVMETHOD(device_resume, bus_generic_resume), 1127 1128 { 0, 0 } 1129}; 1130 1131static driver_t npx_driver = { 1132 "npx", 1133 npx_methods, 1134 1, /* no softc */ 1135}; 1136 1137static devclass_t npx_devclass; 1138 1139/* 1140 * We prefer to attach to the root nexus so that the usual case (exception 16) 1141 * doesn't describe the processor as being `on isa'. 1142 */ 1143DRIVER_MODULE(npx, nexus, npx_driver, npx_devclass, 0, 0); 1144 1145#ifdef DEV_ISA 1146/* 1147 * This sucks up the legacy ISA support assignments from PNPBIOS/ACPI. 1148 */ 1149static struct isa_pnp_id npxisa_ids[] = { 1150 { 0x040cd041, "Legacy ISA coprocessor support" }, /* PNP0C04 */ 1151 { 0 } 1152}; 1153 1154static int 1155npxisa_probe(device_t dev) 1156{ 1157 int result; 1158 if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, npxisa_ids)) <= 0) { 1159 device_quiet(dev); 1160 } 1161 return(result); 1162} 1163 1164static int 1165npxisa_attach(device_t dev) 1166{ 1167 return (0); 1168} 1169 1170static device_method_t npxisa_methods[] = { 1171 /* Device interface */ 1172 DEVMETHOD(device_probe, npxisa_probe), 1173 DEVMETHOD(device_attach, npxisa_attach), 1174 DEVMETHOD(device_detach, bus_generic_detach), 1175 DEVMETHOD(device_shutdown, bus_generic_shutdown), 1176 DEVMETHOD(device_suspend, bus_generic_suspend), 1177 DEVMETHOD(device_resume, bus_generic_resume), 1178 1179 { 0, 0 } 1180}; 1181 1182static driver_t npxisa_driver = { 1183 "npxisa", 1184 npxisa_methods, 1185 1, /* no softc */ 1186}; 1187 1188static devclass_t npxisa_devclass; 1189 1190DRIVER_MODULE(npxisa, isa, npxisa_driver, npxisa_devclass, 0, 0); 1191#ifndef PC98 1192DRIVER_MODULE(npxisa, acpi, npxisa_driver, npxisa_devclass, 0, 0); 1193#endif 1194#endif /* DEV_ISA */ 1195 1196int 1197fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags) 1198{ 1199 struct pcb *pcb; 1200 1201 pcb = td->td_pcb; 1202 KASSERT(!PCB_USER_FPU(pcb) || pcb->pcb_save == &pcb->pcb_user_save, 1203 ("mangled pcb_save")); 1204 ctx->flags = 0; 1205 if ((pcb->pcb_flags & PCB_NPXINITDONE) != 0) 1206 ctx->flags |= FPU_KERN_CTX_NPXINITDONE; 1207 npxexit(td); 1208 ctx->prev = pcb->pcb_save; 1209 pcb->pcb_save = &ctx->hwstate; 1210 pcb->pcb_flags |= PCB_KERNNPX; 1211 pcb->pcb_flags &= ~PCB_NPXINITDONE; 1212 return (0); 1213} 1214 1215int 1216fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx) 1217{ 1218 struct pcb *pcb; 1219 register_t savecrit; 1220 1221 pcb = td->td_pcb; 1222 savecrit = intr_disable(); 1223 if (curthread == PCPU_GET(fpcurthread)) 1224 npxdrop(); 1225 intr_restore(savecrit); 1226 pcb->pcb_save = ctx->prev; 1227 if (pcb->pcb_save == &pcb->pcb_user_save) { 1228 if ((pcb->pcb_flags & PCB_NPXUSERINITDONE) != 0) 1229 pcb->pcb_flags |= PCB_NPXINITDONE; 1230 else 1231 pcb->pcb_flags &= ~PCB_NPXINITDONE; 1232 pcb->pcb_flags &= ~PCB_KERNNPX; 1233 } else { 1234 if ((ctx->flags & FPU_KERN_CTX_NPXINITDONE) != 0) 1235 pcb->pcb_flags |= PCB_NPXINITDONE; 1236 else 1237 pcb->pcb_flags &= ~PCB_NPXINITDONE; 1238 KASSERT(!PCB_USER_FPU(pcb), ("unpaired fpu_kern_leave")); 1239 } 1240 return (0); 1241} 1242 1243int 1244fpu_kern_thread(u_int flags) 1245{ 1246 struct pcb *pcb; 1247 1248 pcb = PCPU_GET(curpcb); 1249 KASSERT((curthread->td_pflags & TDP_KTHREAD) != 0, 1250 ("Only kthread may use fpu_kern_thread")); 1251 KASSERT(pcb->pcb_save == &pcb->pcb_user_save, ("mangled pcb_save")); 1252 KASSERT(PCB_USER_FPU(pcb), ("recursive call")); 1253 1254 pcb->pcb_flags |= PCB_KERNNPX; 1255 return (0); 1256} 1257 1258int 1259is_fpu_kern_thread(u_int flags) 1260{ 1261 1262 if ((curthread->td_pflags & TDP_KTHREAD) == 0) 1263 return (0); 1264 return ((PCPU_GET(curpcb)->pcb_flags & PCB_KERNNPX) != 0); 1265} 1266