npx.c revision 209460
1/*- 2 * Copyright (c) 1990 William Jolitz. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 4. Neither the name of the University nor the names of its contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * from: @(#)npx.c 7.2 (Berkeley) 5/12/91 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: head/sys/i386/isa/npx.c 209460 2010-06-23 10:40:28Z kib $"); 35 36#include "opt_cpu.h" 37#include "opt_isa.h" 38#include "opt_npx.h" 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/bus.h> 43#include <sys/kernel.h> 44#include <sys/lock.h> 45#include <sys/malloc.h> 46#include <sys/module.h> 47#include <sys/mutex.h> 48#include <sys/mutex.h> 49#include <sys/proc.h> 50#include <sys/smp.h> 51#include <sys/sysctl.h> 52#include <machine/bus.h> 53#include <sys/rman.h> 54#ifdef NPX_DEBUG 55#include <sys/syslog.h> 56#endif 57#include <sys/signalvar.h> 58 59#include <machine/asmacros.h> 60#include <machine/cputypes.h> 61#include <machine/frame.h> 62#include <machine/md_var.h> 63#include <machine/pcb.h> 64#include <machine/psl.h> 65#include <machine/resource.h> 66#include <machine/specialreg.h> 67#include <machine/segments.h> 68#include <machine/ucontext.h> 69 70#include <machine/intr_machdep.h> 71#ifdef XEN 72#include <machine/xen/xen-os.h> 73#include <xen/hypervisor.h> 74#endif 75 76#ifdef DEV_ISA 77#include <isa/isavar.h> 78#endif 79 80#if !defined(CPU_DISABLE_SSE) && defined(I686_CPU) 81#define CPU_ENABLE_SSE 82#endif 83 84/* 85 * 387 and 287 Numeric Coprocessor Extension (NPX) Driver. 86 */ 87 88#if defined(__GNUCLIKE_ASM) && !defined(lint) 89 90#define fldcw(addr) __asm("fldcw %0" : : "m" (*(addr))) 91#define fnclex() __asm("fnclex") 92#define fninit() __asm("fninit") 93#define fnsave(addr) __asm __volatile("fnsave %0" : "=m" (*(addr))) 94#define fnstcw(addr) __asm __volatile("fnstcw %0" : "=m" (*(addr))) 95#define fnstsw(addr) __asm __volatile("fnstsw %0" : "=m" (*(addr))) 96#define fp_divide_by_0() __asm("fldz; fld1; fdiv %st,%st(1); fnop") 97#define frstor(addr) __asm("frstor %0" : : "m" (*(addr))) 98#ifdef CPU_ENABLE_SSE 99#define fxrstor(addr) __asm("fxrstor %0" : : "m" (*(addr))) 100#define fxsave(addr) __asm __volatile("fxsave %0" : "=m" (*(addr))) 101#define ldmxcsr(__csr) __asm __volatile("ldmxcsr %0" : : "m" (__csr)) 102#endif 103#ifdef XEN 104#define start_emulating() (HYPERVISOR_fpu_taskswitch(1)) 105#define stop_emulating() (HYPERVISOR_fpu_taskswitch(0)) 106#else 107#define start_emulating() __asm("smsw %%ax; orb %0,%%al; lmsw %%ax" \ 108 : : "n" (CR0_TS) : "ax") 109#define stop_emulating() __asm("clts") 110#endif 111#else /* !(__GNUCLIKE_ASM && !lint) */ 112 113void fldcw(caddr_t addr); 114void fnclex(void); 115void fninit(void); 116void fnsave(caddr_t addr); 117void fnstcw(caddr_t addr); 118void fnstsw(caddr_t addr); 119void fp_divide_by_0(void); 120void frstor(caddr_t addr); 121#ifdef CPU_ENABLE_SSE 122void fxsave(caddr_t addr); 123void fxrstor(caddr_t addr); 124#endif 125void start_emulating(void); 126void stop_emulating(void); 127 128#endif /* __GNUCLIKE_ASM && !lint */ 129 130#ifdef CPU_ENABLE_SSE 131#define GET_FPU_CW(thread) \ 132 (cpu_fxsr ? \ 133 (thread)->td_pcb->pcb_save->sv_xmm.sv_env.en_cw : \ 134 (thread)->td_pcb->pcb_save->sv_87.sv_env.en_cw) 135#define GET_FPU_SW(thread) \ 136 (cpu_fxsr ? \ 137 (thread)->td_pcb->pcb_save->sv_xmm.sv_env.en_sw : \ 138 (thread)->td_pcb->pcb_save->sv_87.sv_env.en_sw) 139#define SET_FPU_CW(savefpu, value) do { \ 140 if (cpu_fxsr) \ 141 (savefpu)->sv_xmm.sv_env.en_cw = (value); \ 142 else \ 143 (savefpu)->sv_87.sv_env.en_cw = (value); \ 144} while (0) 145#else /* CPU_ENABLE_SSE */ 146#define GET_FPU_CW(thread) \ 147 (thread->td_pcb->pcb_save->sv_87.sv_env.en_cw) 148#define GET_FPU_SW(thread) \ 149 (thread->td_pcb->pcb_save->sv_87.sv_env.en_sw) 150#define SET_FPU_CW(savefpu, value) \ 151 (savefpu)->sv_87.sv_env.en_cw = (value) 152#endif /* CPU_ENABLE_SSE */ 153 154typedef u_char bool_t; 155 156#ifdef CPU_ENABLE_SSE 157static void fpu_clean_state(void); 158#endif 159 160static void fpusave(union savefpu *); 161static void fpurstor(union savefpu *); 162static int npx_attach(device_t dev); 163static void npx_identify(driver_t *driver, device_t parent); 164static int npx_intr(void *); 165static int npx_probe(device_t dev); 166 167int hw_float; /* XXX currently just alias for npx_exists */ 168 169SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD, 170 &hw_float, 0, "Floating point instructions executed in hardware"); 171 172static volatile u_int npx_intrs_while_probing; 173static volatile u_int npx_traps_while_probing; 174 175static union savefpu npx_initialstate; 176static bool_t npx_ex16; 177static bool_t npx_exists; 178static bool_t npx_irq13; 179 180alias_for_inthand_t probetrap; 181__asm(" \n\ 182 .text \n\ 183 .p2align 2,0x90 \n\ 184 .type " __XSTRING(CNAME(probetrap)) ",@function \n\ 185" __XSTRING(CNAME(probetrap)) ": \n\ 186 ss \n\ 187 incl " __XSTRING(CNAME(npx_traps_while_probing)) " \n\ 188 fnclex \n\ 189 iret \n\ 190"); 191 192/* 193 * Identify routine. Create a connection point on our parent for probing. 194 */ 195static void 196npx_identify(driver, parent) 197 driver_t *driver; 198 device_t parent; 199{ 200 device_t child; 201 202 child = BUS_ADD_CHILD(parent, 0, "npx", 0); 203 if (child == NULL) 204 panic("npx_identify"); 205} 206 207/* 208 * Do minimal handling of npx interrupts to convert them to traps. 209 */ 210static int 211npx_intr(dummy) 212 void *dummy; 213{ 214 struct thread *td; 215 216 npx_intrs_while_probing++; 217 218 /* 219 * The BUSY# latch must be cleared in all cases so that the next 220 * unmasked npx exception causes an interrupt. 221 */ 222 outb(IO_NPX, 0); 223 224 /* 225 * fpcurthread is normally non-null here. In that case, schedule an 226 * AST to finish the exception handling in the correct context 227 * (this interrupt may occur after the thread has entered the 228 * kernel via a syscall or an interrupt). Otherwise, the npx 229 * state of the thread that caused this interrupt must have been 230 * pushed to the thread's pcb, and clearing of the busy latch 231 * above has finished the (essentially null) handling of this 232 * interrupt. Control will eventually return to the instruction 233 * that caused it and it will repeat. We will eventually (usually 234 * soon) win the race to handle the interrupt properly. 235 */ 236 td = PCPU_GET(fpcurthread); 237 if (td != NULL) { 238 td->td_pcb->pcb_flags |= PCB_NPXTRAP; 239 thread_lock(td); 240 td->td_flags |= TDF_ASTPENDING; 241 thread_unlock(td); 242 } 243 return (FILTER_HANDLED); 244} 245 246/* 247 * Probe routine. Set flags to tell npxattach() what to do. Set up an 248 * interrupt handler if npx needs to use interrupts. 249 */ 250static int 251npx_probe(dev) 252 device_t dev; 253{ 254 struct gate_descriptor save_idt_npxtrap; 255 struct resource *ioport_res, *irq_res; 256 void *irq_cookie; 257 int ioport_rid, irq_num, irq_rid; 258 u_short control; 259 u_short status; 260 261 device_set_desc(dev, "math processor"); 262 263 /* 264 * Modern CPUs all have an FPU that uses the INT16 interface 265 * and provide a simple way to verify that, so handle the 266 * common case right away. 267 */ 268 if (cpu_feature & CPUID_FPU) { 269 hw_float = npx_exists = 1; 270 npx_ex16 = 1; 271 device_quiet(dev); 272 return (0); 273 } 274 275 save_idt_npxtrap = idt[IDT_MF]; 276 setidt(IDT_MF, probetrap, SDT_SYS386TGT, SEL_KPL, 277 GSEL(GCODE_SEL, SEL_KPL)); 278 ioport_rid = 0; 279 ioport_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &ioport_rid, 280 IO_NPX, IO_NPX + IO_NPXSIZE - 1, IO_NPXSIZE, RF_ACTIVE); 281 if (ioport_res == NULL) 282 panic("npx: can't get ports"); 283 if (resource_int_value("npx", 0, "irq", &irq_num) != 0) 284 irq_num = IRQ_NPX; 285 irq_rid = 0; 286 irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &irq_rid, irq_num, 287 irq_num, 1, RF_ACTIVE); 288 if (irq_res != NULL) { 289 if (bus_setup_intr(dev, irq_res, INTR_TYPE_MISC, 290 npx_intr, NULL, NULL, &irq_cookie) != 0) 291 panic("npx: can't create intr"); 292 } 293 294 /* 295 * Partially reset the coprocessor, if any. Some BIOS's don't reset 296 * it after a warm boot. 297 */ 298 npx_full_reset(); 299 outb(IO_NPX, 0); 300 301 /* 302 * Don't trap while we're probing. 303 */ 304 stop_emulating(); 305 306 /* 307 * Finish resetting the coprocessor, if any. If there is an error 308 * pending, then we may get a bogus IRQ13, but npx_intr() will handle 309 * it OK. Bogus halts have never been observed, but we enabled 310 * IRQ13 and cleared the BUSY# latch early to handle them anyway. 311 */ 312 fninit(); 313 314 /* 315 * Don't use fwait here because it might hang. 316 * Don't use fnop here because it usually hangs if there is no FPU. 317 */ 318 DELAY(1000); /* wait for any IRQ13 */ 319#ifdef DIAGNOSTIC 320 if (npx_intrs_while_probing != 0) 321 printf("fninit caused %u bogus npx interrupt(s)\n", 322 npx_intrs_while_probing); 323 if (npx_traps_while_probing != 0) 324 printf("fninit caused %u bogus npx trap(s)\n", 325 npx_traps_while_probing); 326#endif 327 /* 328 * Check for a status of mostly zero. 329 */ 330 status = 0x5a5a; 331 fnstsw(&status); 332 if ((status & 0xb8ff) == 0) { 333 /* 334 * Good, now check for a proper control word. 335 */ 336 control = 0x5a5a; 337 fnstcw(&control); 338 if ((control & 0x1f3f) == 0x033f) { 339 hw_float = npx_exists = 1; 340 /* 341 * We have an npx, now divide by 0 to see if exception 342 * 16 works. 343 */ 344 control &= ~(1 << 2); /* enable divide by 0 trap */ 345 fldcw(&control); 346#ifdef FPU_ERROR_BROKEN 347 /* 348 * FPU error signal doesn't work on some CPU 349 * accelerator board. 350 */ 351 npx_ex16 = 1; 352 return (0); 353#endif 354 npx_traps_while_probing = npx_intrs_while_probing = 0; 355 fp_divide_by_0(); 356 DELAY(1000); /* wait for any IRQ13 */ 357 if (npx_traps_while_probing != 0) { 358 /* 359 * Good, exception 16 works. 360 */ 361 npx_ex16 = 1; 362 goto no_irq13; 363 } 364 if (npx_intrs_while_probing != 0) { 365 /* 366 * Bad, we are stuck with IRQ13. 367 */ 368 npx_irq13 = 1; 369 idt[IDT_MF] = save_idt_npxtrap; 370#ifdef SMP 371 if (mp_ncpus > 1) 372 panic("npx0 cannot use IRQ 13 on an SMP system"); 373#endif 374 return (0); 375 } 376 /* 377 * Worse, even IRQ13 is broken. 378 */ 379 } 380 } 381 382 /* Probe failed. Floating point simply won't work. */ 383 device_printf(dev, "WARNING: no FPU!\n"); 384 385 /* FALLTHROUGH */ 386no_irq13: 387 idt[IDT_MF] = save_idt_npxtrap; 388 if (irq_res != NULL) { 389 bus_teardown_intr(dev, irq_res, irq_cookie); 390 bus_release_resource(dev, SYS_RES_IRQ, irq_rid, irq_res); 391 } 392 bus_release_resource(dev, SYS_RES_IOPORT, ioport_rid, ioport_res); 393 return (npx_exists ? 0 : ENXIO); 394} 395 396/* 397 * Attach routine - announce which it is, and wire into system 398 */ 399static int 400npx_attach(dev) 401 device_t dev; 402{ 403 int flags; 404 register_t s; 405 406 flags = device_get_flags(dev); 407 408 if (npx_irq13) 409 device_printf(dev, "IRQ 13 interface\n"); 410 else if (!device_is_quiet(dev) || bootverbose) 411 device_printf(dev, "INT 16 interface\n"); 412 413 npxinit(); 414 415 s = intr_disable(); 416 stop_emulating(); 417 fpusave(&npx_initialstate); 418 start_emulating(); 419#ifdef CPU_ENABLE_SSE 420 if (cpu_fxsr) { 421 if (npx_initialstate.sv_xmm.sv_env.en_mxcsr_mask) 422 cpu_mxcsr_mask = 423 npx_initialstate.sv_xmm.sv_env.en_mxcsr_mask; 424 else 425 cpu_mxcsr_mask = 0xFFBF; 426 bzero(npx_initialstate.sv_xmm.sv_fp, 427 sizeof(npx_initialstate.sv_xmm.sv_fp)); 428 bzero(npx_initialstate.sv_xmm.sv_xmm, 429 sizeof(npx_initialstate.sv_xmm.sv_xmm)); 430 /* XXX might need even more zeroing. */ 431 } else 432#endif 433 bzero(npx_initialstate.sv_87.sv_ac, 434 sizeof(npx_initialstate.sv_87.sv_ac)); 435 intr_restore(s); 436 437 return (0); 438} 439 440/* 441 * Initialize floating point unit. 442 */ 443void 444npxinit(void) 445{ 446 static union savefpu dummy; 447 register_t savecrit; 448 u_short control; 449 450 if (!npx_exists) 451 return; 452 /* 453 * fninit has the same h/w bugs as fnsave. Use the detoxified 454 * fnsave to throw away any junk in the fpu. npxsave() initializes 455 * the fpu and sets fpcurthread = NULL as important side effects. 456 */ 457 savecrit = intr_disable(); 458 npxsave(&dummy); 459 stop_emulating(); 460#ifdef CPU_ENABLE_SSE 461 /* XXX npxsave() doesn't actually initialize the fpu in the SSE case. */ 462 if (cpu_fxsr) 463 fninit(); 464#endif 465 control = __INITIAL_NPXCW__; 466 fldcw(&control); 467 start_emulating(); 468 intr_restore(savecrit); 469} 470 471/* 472 * Free coprocessor (if we have it). 473 */ 474void 475npxexit(td) 476 struct thread *td; 477{ 478 register_t savecrit; 479 480 savecrit = intr_disable(); 481 if (curthread == PCPU_GET(fpcurthread)) 482 npxsave(PCPU_GET(curpcb)->pcb_save); 483 intr_restore(savecrit); 484#ifdef NPX_DEBUG 485 if (npx_exists) { 486 u_int masked_exceptions; 487 488 masked_exceptions = GET_FPU_CW(td) & GET_FPU_SW(td) & 0x7f; 489 /* 490 * Log exceptions that would have trapped with the old 491 * control word (overflow, divide by 0, and invalid operand). 492 */ 493 if (masked_exceptions & 0x0d) 494 log(LOG_ERR, 495 "pid %d (%s) exited with masked floating point exceptions 0x%02x\n", 496 td->td_proc->p_pid, td->td_proc->p_comm, 497 masked_exceptions); 498 } 499#endif 500} 501 502int 503npxformat() 504{ 505 506 if (!npx_exists) 507 return (_MC_FPFMT_NODEV); 508#ifdef CPU_ENABLE_SSE 509 if (cpu_fxsr) 510 return (_MC_FPFMT_XMM); 511#endif 512 return (_MC_FPFMT_387); 513} 514 515/* 516 * The following mechanism is used to ensure that the FPE_... value 517 * that is passed as a trapcode to the signal handler of the user 518 * process does not have more than one bit set. 519 * 520 * Multiple bits may be set if the user process modifies the control 521 * word while a status word bit is already set. While this is a sign 522 * of bad coding, we have no choise than to narrow them down to one 523 * bit, since we must not send a trapcode that is not exactly one of 524 * the FPE_ macros. 525 * 526 * The mechanism has a static table with 127 entries. Each combination 527 * of the 7 FPU status word exception bits directly translates to a 528 * position in this table, where a single FPE_... value is stored. 529 * This FPE_... value stored there is considered the "most important" 530 * of the exception bits and will be sent as the signal code. The 531 * precedence of the bits is based upon Intel Document "Numerical 532 * Applications", Chapter "Special Computational Situations". 533 * 534 * The macro to choose one of these values does these steps: 1) Throw 535 * away status word bits that cannot be masked. 2) Throw away the bits 536 * currently masked in the control word, assuming the user isn't 537 * interested in them anymore. 3) Reinsert status word bit 7 (stack 538 * fault) if it is set, which cannot be masked but must be presered. 539 * 4) Use the remaining bits to point into the trapcode table. 540 * 541 * The 6 maskable bits in order of their preference, as stated in the 542 * above referenced Intel manual: 543 * 1 Invalid operation (FP_X_INV) 544 * 1a Stack underflow 545 * 1b Stack overflow 546 * 1c Operand of unsupported format 547 * 1d SNaN operand. 548 * 2 QNaN operand (not an exception, irrelavant here) 549 * 3 Any other invalid-operation not mentioned above or zero divide 550 * (FP_X_INV, FP_X_DZ) 551 * 4 Denormal operand (FP_X_DNML) 552 * 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL) 553 * 6 Inexact result (FP_X_IMP) 554 */ 555static char fpetable[128] = { 556 0, 557 FPE_FLTINV, /* 1 - INV */ 558 FPE_FLTUND, /* 2 - DNML */ 559 FPE_FLTINV, /* 3 - INV | DNML */ 560 FPE_FLTDIV, /* 4 - DZ */ 561 FPE_FLTINV, /* 5 - INV | DZ */ 562 FPE_FLTDIV, /* 6 - DNML | DZ */ 563 FPE_FLTINV, /* 7 - INV | DNML | DZ */ 564 FPE_FLTOVF, /* 8 - OFL */ 565 FPE_FLTINV, /* 9 - INV | OFL */ 566 FPE_FLTUND, /* A - DNML | OFL */ 567 FPE_FLTINV, /* B - INV | DNML | OFL */ 568 FPE_FLTDIV, /* C - DZ | OFL */ 569 FPE_FLTINV, /* D - INV | DZ | OFL */ 570 FPE_FLTDIV, /* E - DNML | DZ | OFL */ 571 FPE_FLTINV, /* F - INV | DNML | DZ | OFL */ 572 FPE_FLTUND, /* 10 - UFL */ 573 FPE_FLTINV, /* 11 - INV | UFL */ 574 FPE_FLTUND, /* 12 - DNML | UFL */ 575 FPE_FLTINV, /* 13 - INV | DNML | UFL */ 576 FPE_FLTDIV, /* 14 - DZ | UFL */ 577 FPE_FLTINV, /* 15 - INV | DZ | UFL */ 578 FPE_FLTDIV, /* 16 - DNML | DZ | UFL */ 579 FPE_FLTINV, /* 17 - INV | DNML | DZ | UFL */ 580 FPE_FLTOVF, /* 18 - OFL | UFL */ 581 FPE_FLTINV, /* 19 - INV | OFL | UFL */ 582 FPE_FLTUND, /* 1A - DNML | OFL | UFL */ 583 FPE_FLTINV, /* 1B - INV | DNML | OFL | UFL */ 584 FPE_FLTDIV, /* 1C - DZ | OFL | UFL */ 585 FPE_FLTINV, /* 1D - INV | DZ | OFL | UFL */ 586 FPE_FLTDIV, /* 1E - DNML | DZ | OFL | UFL */ 587 FPE_FLTINV, /* 1F - INV | DNML | DZ | OFL | UFL */ 588 FPE_FLTRES, /* 20 - IMP */ 589 FPE_FLTINV, /* 21 - INV | IMP */ 590 FPE_FLTUND, /* 22 - DNML | IMP */ 591 FPE_FLTINV, /* 23 - INV | DNML | IMP */ 592 FPE_FLTDIV, /* 24 - DZ | IMP */ 593 FPE_FLTINV, /* 25 - INV | DZ | IMP */ 594 FPE_FLTDIV, /* 26 - DNML | DZ | IMP */ 595 FPE_FLTINV, /* 27 - INV | DNML | DZ | IMP */ 596 FPE_FLTOVF, /* 28 - OFL | IMP */ 597 FPE_FLTINV, /* 29 - INV | OFL | IMP */ 598 FPE_FLTUND, /* 2A - DNML | OFL | IMP */ 599 FPE_FLTINV, /* 2B - INV | DNML | OFL | IMP */ 600 FPE_FLTDIV, /* 2C - DZ | OFL | IMP */ 601 FPE_FLTINV, /* 2D - INV | DZ | OFL | IMP */ 602 FPE_FLTDIV, /* 2E - DNML | DZ | OFL | IMP */ 603 FPE_FLTINV, /* 2F - INV | DNML | DZ | OFL | IMP */ 604 FPE_FLTUND, /* 30 - UFL | IMP */ 605 FPE_FLTINV, /* 31 - INV | UFL | IMP */ 606 FPE_FLTUND, /* 32 - DNML | UFL | IMP */ 607 FPE_FLTINV, /* 33 - INV | DNML | UFL | IMP */ 608 FPE_FLTDIV, /* 34 - DZ | UFL | IMP */ 609 FPE_FLTINV, /* 35 - INV | DZ | UFL | IMP */ 610 FPE_FLTDIV, /* 36 - DNML | DZ | UFL | IMP */ 611 FPE_FLTINV, /* 37 - INV | DNML | DZ | UFL | IMP */ 612 FPE_FLTOVF, /* 38 - OFL | UFL | IMP */ 613 FPE_FLTINV, /* 39 - INV | OFL | UFL | IMP */ 614 FPE_FLTUND, /* 3A - DNML | OFL | UFL | IMP */ 615 FPE_FLTINV, /* 3B - INV | DNML | OFL | UFL | IMP */ 616 FPE_FLTDIV, /* 3C - DZ | OFL | UFL | IMP */ 617 FPE_FLTINV, /* 3D - INV | DZ | OFL | UFL | IMP */ 618 FPE_FLTDIV, /* 3E - DNML | DZ | OFL | UFL | IMP */ 619 FPE_FLTINV, /* 3F - INV | DNML | DZ | OFL | UFL | IMP */ 620 FPE_FLTSUB, /* 40 - STK */ 621 FPE_FLTSUB, /* 41 - INV | STK */ 622 FPE_FLTUND, /* 42 - DNML | STK */ 623 FPE_FLTSUB, /* 43 - INV | DNML | STK */ 624 FPE_FLTDIV, /* 44 - DZ | STK */ 625 FPE_FLTSUB, /* 45 - INV | DZ | STK */ 626 FPE_FLTDIV, /* 46 - DNML | DZ | STK */ 627 FPE_FLTSUB, /* 47 - INV | DNML | DZ | STK */ 628 FPE_FLTOVF, /* 48 - OFL | STK */ 629 FPE_FLTSUB, /* 49 - INV | OFL | STK */ 630 FPE_FLTUND, /* 4A - DNML | OFL | STK */ 631 FPE_FLTSUB, /* 4B - INV | DNML | OFL | STK */ 632 FPE_FLTDIV, /* 4C - DZ | OFL | STK */ 633 FPE_FLTSUB, /* 4D - INV | DZ | OFL | STK */ 634 FPE_FLTDIV, /* 4E - DNML | DZ | OFL | STK */ 635 FPE_FLTSUB, /* 4F - INV | DNML | DZ | OFL | STK */ 636 FPE_FLTUND, /* 50 - UFL | STK */ 637 FPE_FLTSUB, /* 51 - INV | UFL | STK */ 638 FPE_FLTUND, /* 52 - DNML | UFL | STK */ 639 FPE_FLTSUB, /* 53 - INV | DNML | UFL | STK */ 640 FPE_FLTDIV, /* 54 - DZ | UFL | STK */ 641 FPE_FLTSUB, /* 55 - INV | DZ | UFL | STK */ 642 FPE_FLTDIV, /* 56 - DNML | DZ | UFL | STK */ 643 FPE_FLTSUB, /* 57 - INV | DNML | DZ | UFL | STK */ 644 FPE_FLTOVF, /* 58 - OFL | UFL | STK */ 645 FPE_FLTSUB, /* 59 - INV | OFL | UFL | STK */ 646 FPE_FLTUND, /* 5A - DNML | OFL | UFL | STK */ 647 FPE_FLTSUB, /* 5B - INV | DNML | OFL | UFL | STK */ 648 FPE_FLTDIV, /* 5C - DZ | OFL | UFL | STK */ 649 FPE_FLTSUB, /* 5D - INV | DZ | OFL | UFL | STK */ 650 FPE_FLTDIV, /* 5E - DNML | DZ | OFL | UFL | STK */ 651 FPE_FLTSUB, /* 5F - INV | DNML | DZ | OFL | UFL | STK */ 652 FPE_FLTRES, /* 60 - IMP | STK */ 653 FPE_FLTSUB, /* 61 - INV | IMP | STK */ 654 FPE_FLTUND, /* 62 - DNML | IMP | STK */ 655 FPE_FLTSUB, /* 63 - INV | DNML | IMP | STK */ 656 FPE_FLTDIV, /* 64 - DZ | IMP | STK */ 657 FPE_FLTSUB, /* 65 - INV | DZ | IMP | STK */ 658 FPE_FLTDIV, /* 66 - DNML | DZ | IMP | STK */ 659 FPE_FLTSUB, /* 67 - INV | DNML | DZ | IMP | STK */ 660 FPE_FLTOVF, /* 68 - OFL | IMP | STK */ 661 FPE_FLTSUB, /* 69 - INV | OFL | IMP | STK */ 662 FPE_FLTUND, /* 6A - DNML | OFL | IMP | STK */ 663 FPE_FLTSUB, /* 6B - INV | DNML | OFL | IMP | STK */ 664 FPE_FLTDIV, /* 6C - DZ | OFL | IMP | STK */ 665 FPE_FLTSUB, /* 6D - INV | DZ | OFL | IMP | STK */ 666 FPE_FLTDIV, /* 6E - DNML | DZ | OFL | IMP | STK */ 667 FPE_FLTSUB, /* 6F - INV | DNML | DZ | OFL | IMP | STK */ 668 FPE_FLTUND, /* 70 - UFL | IMP | STK */ 669 FPE_FLTSUB, /* 71 - INV | UFL | IMP | STK */ 670 FPE_FLTUND, /* 72 - DNML | UFL | IMP | STK */ 671 FPE_FLTSUB, /* 73 - INV | DNML | UFL | IMP | STK */ 672 FPE_FLTDIV, /* 74 - DZ | UFL | IMP | STK */ 673 FPE_FLTSUB, /* 75 - INV | DZ | UFL | IMP | STK */ 674 FPE_FLTDIV, /* 76 - DNML | DZ | UFL | IMP | STK */ 675 FPE_FLTSUB, /* 77 - INV | DNML | DZ | UFL | IMP | STK */ 676 FPE_FLTOVF, /* 78 - OFL | UFL | IMP | STK */ 677 FPE_FLTSUB, /* 79 - INV | OFL | UFL | IMP | STK */ 678 FPE_FLTUND, /* 7A - DNML | OFL | UFL | IMP | STK */ 679 FPE_FLTSUB, /* 7B - INV | DNML | OFL | UFL | IMP | STK */ 680 FPE_FLTDIV, /* 7C - DZ | OFL | UFL | IMP | STK */ 681 FPE_FLTSUB, /* 7D - INV | DZ | OFL | UFL | IMP | STK */ 682 FPE_FLTDIV, /* 7E - DNML | DZ | OFL | UFL | IMP | STK */ 683 FPE_FLTSUB, /* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */ 684}; 685 686/* 687 * Preserve the FP status word, clear FP exceptions, then generate a SIGFPE. 688 * 689 * Clearing exceptions is necessary mainly to avoid IRQ13 bugs. We now 690 * depend on longjmp() restoring a usable state. Restoring the state 691 * or examining it might fail if we didn't clear exceptions. 692 * 693 * The error code chosen will be one of the FPE_... macros. It will be 694 * sent as the second argument to old BSD-style signal handlers and as 695 * "siginfo_t->si_code" (second argument) to SA_SIGINFO signal handlers. 696 * 697 * XXX the FP state is not preserved across signal handlers. So signal 698 * handlers cannot afford to do FP unless they preserve the state or 699 * longjmp() out. Both preserving the state and longjmp()ing may be 700 * destroyed by IRQ13 bugs. Clearing FP exceptions is not an acceptable 701 * solution for signals other than SIGFPE. 702 */ 703int 704npxtrap() 705{ 706 register_t savecrit; 707 u_short control, status; 708 709 if (!npx_exists) { 710 printf("npxtrap: fpcurthread = %p, curthread = %p, npx_exists = %d\n", 711 PCPU_GET(fpcurthread), curthread, npx_exists); 712 panic("npxtrap from nowhere"); 713 } 714 savecrit = intr_disable(); 715 716 /* 717 * Interrupt handling (for another interrupt) may have pushed the 718 * state to memory. Fetch the relevant parts of the state from 719 * wherever they are. 720 */ 721 if (PCPU_GET(fpcurthread) != curthread) { 722 control = GET_FPU_CW(curthread); 723 status = GET_FPU_SW(curthread); 724 } else { 725 fnstcw(&control); 726 fnstsw(&status); 727 } 728 729 if (PCPU_GET(fpcurthread) == curthread) 730 fnclex(); 731 intr_restore(savecrit); 732 return (fpetable[status & ((~control & 0x3f) | 0x40)]); 733} 734 735/* 736 * Implement device not available (DNA) exception 737 * 738 * It would be better to switch FP context here (if curthread != fpcurthread) 739 * and not necessarily for every context switch, but it is too hard to 740 * access foreign pcb's. 741 */ 742 743static int err_count = 0; 744 745int 746npxdna(void) 747{ 748 struct pcb *pcb; 749 register_t s; 750 751 if (!npx_exists) 752 return (0); 753 if (PCPU_GET(fpcurthread) == curthread) { 754 printf("npxdna: fpcurthread == curthread %d times\n", 755 ++err_count); 756 stop_emulating(); 757 return (1); 758 } 759 if (PCPU_GET(fpcurthread) != NULL) { 760 printf("npxdna: fpcurthread = %p (%d), curthread = %p (%d)\n", 761 PCPU_GET(fpcurthread), 762 PCPU_GET(fpcurthread)->td_proc->p_pid, 763 curthread, curthread->td_proc->p_pid); 764 panic("npxdna"); 765 } 766 s = intr_disable(); 767 stop_emulating(); 768 /* 769 * Record new context early in case frstor causes an IRQ13. 770 */ 771 PCPU_SET(fpcurthread, curthread); 772 pcb = PCPU_GET(curpcb); 773 774#ifdef CPU_ENABLE_SSE 775 if (cpu_fxsr) 776 fpu_clean_state(); 777#endif 778 779 if ((pcb->pcb_flags & PCB_NPXINITDONE) == 0) { 780 /* 781 * This is the first time this thread has used the FPU or 782 * the PCB doesn't contain a clean FPU state. Explicitly 783 * load an initial state. 784 */ 785 fpurstor(&npx_initialstate); 786 if (pcb->pcb_initial_npxcw != __INITIAL_NPXCW__) 787 fldcw(&pcb->pcb_initial_npxcw); 788 pcb->pcb_flags |= PCB_NPXINITDONE; 789 if (PCB_USER_FPU(pcb)) 790 pcb->pcb_flags |= PCB_NPXUSERINITDONE; 791 } else { 792 /* 793 * The following fpurstor() may cause an IRQ13 when the 794 * state being restored has a pending error. The error will 795 * appear to have been triggered by the current (npx) user 796 * instruction even when that instruction is a no-wait 797 * instruction that should not trigger an error (e.g., 798 * fnclex). On at least one 486 system all of the no-wait 799 * instructions are broken the same as frstor, so our 800 * treatment does not amplify the breakage. On at least 801 * one 386/Cyrix 387 system, fnclex works correctly while 802 * frstor and fnsave are broken, so our treatment breaks 803 * fnclex if it is the first FPU instruction after a context 804 * switch. 805 */ 806 fpurstor(pcb->pcb_save); 807 } 808 intr_restore(s); 809 810 return (1); 811} 812 813/* 814 * Wrapper for fnsave instruction, partly to handle hardware bugs. When npx 815 * exceptions are reported via IRQ13, spurious IRQ13's may be triggered by 816 * no-wait npx instructions. See the Intel application note AP-578 for 817 * details. This doesn't cause any additional complications here. IRQ13's 818 * are inherently asynchronous unless the CPU is frozen to deliver them -- 819 * one that started in userland may be delivered many instructions later, 820 * after the process has entered the kernel. It may even be delivered after 821 * the fnsave here completes. A spurious IRQ13 for the fnsave is handled in 822 * the same way as a very-late-arriving non-spurious IRQ13 from user mode: 823 * it is normally ignored at first because we set fpcurthread to NULL; it is 824 * normally retriggered in npxdna() after return to user mode. 825 * 826 * npxsave() must be called with interrupts disabled, so that it clears 827 * fpcurthread atomically with saving the state. We require callers to do the 828 * disabling, since most callers need to disable interrupts anyway to call 829 * npxsave() atomically with checking fpcurthread. 830 * 831 * A previous version of npxsave() went to great lengths to excecute fnsave 832 * with interrupts enabled in case executing it froze the CPU. This case 833 * can't happen, at least for Intel CPU/NPX's. Spurious IRQ13's don't imply 834 * spurious freezes. 835 */ 836void 837npxsave(addr) 838 union savefpu *addr; 839{ 840 841 stop_emulating(); 842 fpusave(addr); 843 844 start_emulating(); 845 PCPU_SET(fpcurthread, NULL); 846} 847 848/* 849 * This should be called with interrupts disabled and only when the owning 850 * FPU thread is non-null. 851 */ 852void 853npxdrop() 854{ 855 struct thread *td; 856 857 /* 858 * Discard pending exceptions in the !cpu_fxsr case so that unmasked 859 * ones don't cause a panic on the next frstor. 860 */ 861#ifdef CPU_ENABLE_SSE 862 if (!cpu_fxsr) 863#endif 864 fnclex(); 865 866 td = PCPU_GET(fpcurthread); 867 PCPU_SET(fpcurthread, NULL); 868 td->td_pcb->pcb_flags &= ~PCB_NPXINITDONE; 869 start_emulating(); 870} 871 872/* 873 * Get the state of the FPU without dropping ownership (if possible). 874 * It returns the FPU ownership status. 875 */ 876int 877npxgetregs(struct thread *td, union savefpu *addr) 878{ 879 struct pcb *pcb; 880 register_t s; 881 882 if (!npx_exists) 883 return (_MC_FPOWNED_NONE); 884 885 pcb = td->td_pcb; 886 if ((pcb->pcb_flags & PCB_NPXINITDONE) == 0) { 887 bcopy(&npx_initialstate, addr, sizeof(npx_initialstate)); 888 SET_FPU_CW(addr, pcb->pcb_initial_npxcw); 889 return (_MC_FPOWNED_NONE); 890 } 891 s = intr_disable(); 892 if (td == PCPU_GET(fpcurthread)) { 893 fpusave(addr); 894#ifdef CPU_ENABLE_SSE 895 if (!cpu_fxsr) 896#endif 897 /* 898 * fnsave initializes the FPU and destroys whatever 899 * context it contains. Make sure the FPU owner 900 * starts with a clean state next time. 901 */ 902 npxdrop(); 903 intr_restore(s); 904 return (_MC_FPOWNED_FPU); 905 } else { 906 intr_restore(s); 907 bcopy(pcb->pcb_save, addr, sizeof(*addr)); 908 return (_MC_FPOWNED_PCB); 909 } 910} 911 912int 913npxgetuserregs(struct thread *td, union savefpu *addr) 914{ 915 struct pcb *pcb; 916 register_t s; 917 918 if (!npx_exists) 919 return (_MC_FPOWNED_NONE); 920 921 pcb = td->td_pcb; 922 if ((pcb->pcb_flags & PCB_NPXUSERINITDONE) == 0) { 923 bcopy(&npx_initialstate, addr, sizeof(npx_initialstate)); 924 SET_FPU_CW(addr, pcb->pcb_initial_npxcw); 925 return (_MC_FPOWNED_NONE); 926 } 927 s = intr_disable(); 928 if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) { 929 fpusave(addr); 930#ifdef CPU_ENABLE_SSE 931 if (!cpu_fxsr) 932#endif 933 /* 934 * fnsave initializes the FPU and destroys whatever 935 * context it contains. Make sure the FPU owner 936 * starts with a clean state next time. 937 */ 938 npxdrop(); 939 intr_restore(s); 940 return (_MC_FPOWNED_FPU); 941 } else { 942 intr_restore(s); 943 bcopy(&pcb->pcb_user_save, addr, sizeof(*addr)); 944 return (_MC_FPOWNED_PCB); 945 } 946} 947 948/* 949 * Set the state of the FPU. 950 */ 951void 952npxsetregs(struct thread *td, union savefpu *addr) 953{ 954 struct pcb *pcb; 955 register_t s; 956 957 if (!npx_exists) 958 return; 959 960 pcb = td->td_pcb; 961 s = intr_disable(); 962 if (td == PCPU_GET(fpcurthread)) { 963#ifdef CPU_ENABLE_SSE 964 if (!cpu_fxsr) 965#endif 966 fnclex(); /* As in npxdrop(). */ 967 fpurstor(addr); 968 intr_restore(s); 969 } else { 970 intr_restore(s); 971 bcopy(addr, pcb->pcb_save, sizeof(*addr)); 972 } 973 if (PCB_USER_FPU(pcb)) 974 pcb->pcb_flags |= PCB_NPXUSERINITDONE; 975 pcb->pcb_flags |= PCB_NPXINITDONE; 976} 977 978void 979npxsetuserregs(struct thread *td, union savefpu *addr) 980{ 981 struct pcb *pcb; 982 register_t s; 983 984 if (!npx_exists) 985 return; 986 987 pcb = td->td_pcb; 988 s = intr_disable(); 989 if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) { 990#ifdef CPU_ENABLE_SSE 991 if (!cpu_fxsr) 992#endif 993 fnclex(); /* As in npxdrop(). */ 994 fpurstor(addr); 995 intr_restore(s); 996 pcb->pcb_flags |= PCB_NPXUSERINITDONE | PCB_NPXINITDONE; 997 } else { 998 intr_restore(s); 999 bcopy(addr, &pcb->pcb_user_save, sizeof(*addr)); 1000 if (PCB_USER_FPU(pcb)) 1001 pcb->pcb_flags |= PCB_NPXINITDONE; 1002 pcb->pcb_flags |= PCB_NPXUSERINITDONE; 1003 } 1004} 1005 1006static void 1007fpusave(addr) 1008 union savefpu *addr; 1009{ 1010 1011#ifdef CPU_ENABLE_SSE 1012 if (cpu_fxsr) 1013 fxsave(addr); 1014 else 1015#endif 1016 fnsave(addr); 1017} 1018 1019#ifdef CPU_ENABLE_SSE 1020/* 1021 * On AuthenticAMD processors, the fxrstor instruction does not restore 1022 * the x87's stored last instruction pointer, last data pointer, and last 1023 * opcode values, except in the rare case in which the exception summary 1024 * (ES) bit in the x87 status word is set to 1. 1025 * 1026 * In order to avoid leaking this information across processes, we clean 1027 * these values by performing a dummy load before executing fxrstor(). 1028 */ 1029static void 1030fpu_clean_state(void) 1031{ 1032 static float dummy_variable = 0.0; 1033 u_short status; 1034 1035 /* 1036 * Clear the ES bit in the x87 status word if it is currently 1037 * set, in order to avoid causing a fault in the upcoming load. 1038 */ 1039 fnstsw(&status); 1040 if (status & 0x80) 1041 fnclex(); 1042 1043 /* 1044 * Load the dummy variable into the x87 stack. This mangles 1045 * the x87 stack, but we don't care since we're about to call 1046 * fxrstor() anyway. 1047 */ 1048 __asm __volatile("ffree %%st(7); fld %0" : : "m" (dummy_variable)); 1049} 1050#endif /* CPU_ENABLE_SSE */ 1051 1052static void 1053fpurstor(addr) 1054 union savefpu *addr; 1055{ 1056 1057#ifdef CPU_ENABLE_SSE 1058 if (cpu_fxsr) 1059 fxrstor(addr); 1060 else 1061#endif 1062 frstor(addr); 1063} 1064 1065static device_method_t npx_methods[] = { 1066 /* Device interface */ 1067 DEVMETHOD(device_identify, npx_identify), 1068 DEVMETHOD(device_probe, npx_probe), 1069 DEVMETHOD(device_attach, npx_attach), 1070 DEVMETHOD(device_detach, bus_generic_detach), 1071 DEVMETHOD(device_shutdown, bus_generic_shutdown), 1072 DEVMETHOD(device_suspend, bus_generic_suspend), 1073 DEVMETHOD(device_resume, bus_generic_resume), 1074 1075 { 0, 0 } 1076}; 1077 1078static driver_t npx_driver = { 1079 "npx", 1080 npx_methods, 1081 1, /* no softc */ 1082}; 1083 1084static devclass_t npx_devclass; 1085 1086/* 1087 * We prefer to attach to the root nexus so that the usual case (exception 16) 1088 * doesn't describe the processor as being `on isa'. 1089 */ 1090DRIVER_MODULE(npx, nexus, npx_driver, npx_devclass, 0, 0); 1091 1092#ifdef DEV_ISA 1093/* 1094 * This sucks up the legacy ISA support assignments from PNPBIOS/ACPI. 1095 */ 1096static struct isa_pnp_id npxisa_ids[] = { 1097 { 0x040cd041, "Legacy ISA coprocessor support" }, /* PNP0C04 */ 1098 { 0 } 1099}; 1100 1101static int 1102npxisa_probe(device_t dev) 1103{ 1104 int result; 1105 if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, npxisa_ids)) <= 0) { 1106 device_quiet(dev); 1107 } 1108 return(result); 1109} 1110 1111static int 1112npxisa_attach(device_t dev) 1113{ 1114 return (0); 1115} 1116 1117static device_method_t npxisa_methods[] = { 1118 /* Device interface */ 1119 DEVMETHOD(device_probe, npxisa_probe), 1120 DEVMETHOD(device_attach, npxisa_attach), 1121 DEVMETHOD(device_detach, bus_generic_detach), 1122 DEVMETHOD(device_shutdown, bus_generic_shutdown), 1123 DEVMETHOD(device_suspend, bus_generic_suspend), 1124 DEVMETHOD(device_resume, bus_generic_resume), 1125 1126 { 0, 0 } 1127}; 1128 1129static driver_t npxisa_driver = { 1130 "npxisa", 1131 npxisa_methods, 1132 1, /* no softc */ 1133}; 1134 1135static devclass_t npxisa_devclass; 1136 1137DRIVER_MODULE(npxisa, isa, npxisa_driver, npxisa_devclass, 0, 0); 1138#ifndef PC98 1139DRIVER_MODULE(npxisa, acpi, npxisa_driver, npxisa_devclass, 0, 0); 1140#endif 1141#endif /* DEV_ISA */ 1142 1143int 1144fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags) 1145{ 1146 struct pcb *pcb; 1147 1148 pcb = td->td_pcb; 1149 KASSERT(!PCB_USER_FPU(pcb) || pcb->pcb_save == &pcb->pcb_user_save, 1150 ("mangled pcb_save")); 1151 ctx->flags = 0; 1152 if ((pcb->pcb_flags & PCB_NPXINITDONE) != 0) 1153 ctx->flags |= FPU_KERN_CTX_NPXINITDONE; 1154 npxexit(td); 1155 ctx->prev = pcb->pcb_save; 1156 pcb->pcb_save = &ctx->hwstate; 1157 pcb->pcb_flags |= PCB_KERNNPX; 1158 pcb->pcb_flags &= ~PCB_NPXINITDONE; 1159 return (0); 1160} 1161 1162int 1163fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx) 1164{ 1165 struct pcb *pcb; 1166 register_t savecrit; 1167 1168 pcb = td->td_pcb; 1169 savecrit = intr_disable(); 1170 if (curthread == PCPU_GET(fpcurthread)) 1171 npxdrop(); 1172 intr_restore(savecrit); 1173 pcb->pcb_save = ctx->prev; 1174 if (pcb->pcb_save == &pcb->pcb_user_save) { 1175 if ((pcb->pcb_flags & PCB_NPXUSERINITDONE) != 0) 1176 pcb->pcb_flags |= PCB_NPXINITDONE; 1177 else 1178 pcb->pcb_flags &= ~PCB_NPXINITDONE; 1179 pcb->pcb_flags &= ~PCB_KERNNPX; 1180 } else { 1181 if ((ctx->flags & FPU_KERN_CTX_NPXINITDONE) != 0) 1182 pcb->pcb_flags |= PCB_NPXINITDONE; 1183 else 1184 pcb->pcb_flags &= ~PCB_NPXINITDONE; 1185 KASSERT(!PCB_USER_FPU(pcb), ("unpaired fpu_kern_leave")); 1186 } 1187 return (0); 1188} 1189 1190int 1191fpu_kern_thread(u_int flags) 1192{ 1193 struct pcb *pcb; 1194 1195 pcb = PCPU_GET(curpcb); 1196 KASSERT((curthread->td_pflags & TDP_KTHREAD) != 0, 1197 ("Only kthread may use fpu_kern_thread")); 1198 KASSERT(pcb->pcb_save == &pcb->pcb_user_save, ("mangled pcb_save")); 1199 KASSERT(PCB_USER_FPU(pcb), ("recursive call")); 1200 1201 pcb->pcb_flags |= PCB_KERNNPX; 1202 return (0); 1203} 1204 1205int 1206is_fpu_kern_thread(u_int flags) 1207{ 1208 1209 if ((curthread->td_pflags & TDP_KTHREAD) == 0) 1210 return (0); 1211 return ((PCPU_GET(curpcb)->pcb_flags & PCB_KERNNPX) != 0); 1212} 1213