1/*- 2 * Copyright (c) 1990 William Jolitz. 3 * Copyright (c) 1991 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 4. Neither the name of the University nor the names of its contributors 15 * may be used to endorse or promote products derived from this software 16 * without specific prior written permission. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * from: @(#)npx.c 7.2 (Berkeley) 5/12/91 31 */ 32 33#include <sys/cdefs.h> 34__FBSDID("$FreeBSD: stable/10/sys/i386/isa/npx.c 337245 2018-08-03 14:12:37Z kib $"); 35 36#include "opt_cpu.h" 37#include "opt_isa.h" 38#include "opt_npx.h" 39 40#include <sys/param.h> 41#include <sys/systm.h> 42#include <sys/bus.h> 43#include <sys/kernel.h> 44#include <sys/lock.h> 45#include <sys/malloc.h> 46#include <sys/module.h> 47#include <sys/mutex.h> 48#include <sys/mutex.h> 49#include <sys/proc.h> 50#include <sys/smp.h> 51#include <sys/sysctl.h> 52#include <machine/bus.h> 53#include <sys/rman.h> 54#ifdef NPX_DEBUG 55#include <sys/syslog.h> 56#endif 57#include <sys/signalvar.h> 58#include <vm/uma.h> 59 60#include <machine/asmacros.h> 61#include <machine/cputypes.h> 62#include <machine/frame.h> 63#include <machine/md_var.h> 64#include <machine/pcb.h> 65#include <machine/psl.h> 66#include <machine/resource.h> 67#include <machine/specialreg.h> 68#include <machine/segments.h> 69#include <machine/ucontext.h> 70 71#include <machine/intr_machdep.h> 72#ifdef XEN 73#include <xen/xen-os.h> 74#include <xen/hypervisor.h> 75#endif 76 77#ifdef DEV_ISA 78#include <isa/isavar.h> 79#endif 80 81#if !defined(CPU_DISABLE_SSE) && defined(I686_CPU) 82#define CPU_ENABLE_SSE 83#endif 84 85/* 86 * 387 and 287 Numeric Coprocessor Extension (NPX) Driver. 87 */ 88 89#if defined(__GNUCLIKE_ASM) && !defined(lint) 90 91#define fldcw(cw) __asm __volatile("fldcw %0" : : "m" (cw)) 92#define fnclex() __asm __volatile("fnclex") 93#define fninit() __asm __volatile("fninit") 94#define fnsave(addr) __asm __volatile("fnsave %0" : "=m" (*(addr))) 95#define fnstcw(addr) __asm __volatile("fnstcw %0" : "=m" (*(addr))) 96#define fnstsw(addr) __asm __volatile("fnstsw %0" : "=am" (*(addr))) 97#define fp_divide_by_0() __asm __volatile( \ 98 "fldz; fld1; fdiv %st,%st(1); fnop") 99#define frstor(addr) __asm __volatile("frstor %0" : : "m" (*(addr))) 100#ifdef CPU_ENABLE_SSE 101#define fxrstor(addr) __asm __volatile("fxrstor %0" : : "m" (*(addr))) 102#define fxsave(addr) __asm __volatile("fxsave %0" : "=m" (*(addr))) 103#define ldmxcsr(csr) __asm __volatile("ldmxcsr %0" : : "m" (csr)) 104#define stmxcsr(addr) __asm __volatile("stmxcsr %0" : : "m" (*(addr))) 105 106static __inline void 107xrstor(char *addr, uint64_t mask) 108{ 109 uint32_t low, hi; 110 111 low = mask; 112 hi = mask >> 32; 113 __asm __volatile("xrstor %0" : : "m" (*addr), "a" (low), "d" (hi)); 114} 115 116static __inline void 117xsave(char *addr, uint64_t mask) 118{ 119 uint32_t low, hi; 120 121 low = mask; 122 hi = mask >> 32; 123 __asm __volatile("xsave %0" : "=m" (*addr) : "a" (low), "d" (hi) : 124 "memory"); 125} 126 127static __inline void 128xsaveopt(char *addr, uint64_t mask) 129{ 130 uint32_t low, hi; 131 132 low = mask; 133 hi = mask >> 32; 134 __asm __volatile("xsaveopt %0" : "=m" (*addr) : "a" (low), "d" (hi) : 135 "memory"); 136} 137#endif 138#else /* !(__GNUCLIKE_ASM && !lint) */ 139 140void fldcw(u_short cw); 141void fnclex(void); 142void fninit(void); 143void fnsave(caddr_t addr); 144void fnstcw(caddr_t addr); 145void fnstsw(caddr_t addr); 146void fp_divide_by_0(void); 147void frstor(caddr_t addr); 148#ifdef CPU_ENABLE_SSE 149void fxsave(caddr_t addr); 150void fxrstor(caddr_t addr); 151void ldmxcsr(u_int csr); 152void stmxcsr(u_int *csr); 153void xrstor(char *addr, uint64_t mask); 154void xsave(char *addr, uint64_t mask); 155void xsaveopt(char *addr, uint64_t mask); 156#endif 157 158#endif /* __GNUCLIKE_ASM && !lint */ 159 160#ifdef XEN 161#define start_emulating() (HYPERVISOR_fpu_taskswitch(1)) 162#define stop_emulating() (HYPERVISOR_fpu_taskswitch(0)) 163#else 164#define start_emulating() load_cr0(rcr0() | CR0_TS) 165#define stop_emulating() clts() 166#endif 167 168#ifdef CPU_ENABLE_SSE 169#define GET_FPU_CW(thread) \ 170 (cpu_fxsr ? \ 171 (thread)->td_pcb->pcb_save->sv_xmm.sv_env.en_cw : \ 172 (thread)->td_pcb->pcb_save->sv_87.sv_env.en_cw) 173#define GET_FPU_SW(thread) \ 174 (cpu_fxsr ? \ 175 (thread)->td_pcb->pcb_save->sv_xmm.sv_env.en_sw : \ 176 (thread)->td_pcb->pcb_save->sv_87.sv_env.en_sw) 177#define SET_FPU_CW(savefpu, value) do { \ 178 if (cpu_fxsr) \ 179 (savefpu)->sv_xmm.sv_env.en_cw = (value); \ 180 else \ 181 (savefpu)->sv_87.sv_env.en_cw = (value); \ 182} while (0) 183#else /* CPU_ENABLE_SSE */ 184#define GET_FPU_CW(thread) \ 185 (thread->td_pcb->pcb_save->sv_87.sv_env.en_cw) 186#define GET_FPU_SW(thread) \ 187 (thread->td_pcb->pcb_save->sv_87.sv_env.en_sw) 188#define SET_FPU_CW(savefpu, value) \ 189 (savefpu)->sv_87.sv_env.en_cw = (value) 190#endif /* CPU_ENABLE_SSE */ 191 192#ifdef CPU_ENABLE_SSE 193CTASSERT(sizeof(union savefpu) == 512); 194CTASSERT(sizeof(struct xstate_hdr) == 64); 195CTASSERT(sizeof(struct savefpu_ymm) == 832); 196 197/* 198 * This requirement is to make it easier for asm code to calculate 199 * offset of the fpu save area from the pcb address. FPU save area 200 * must be 64-byte aligned. 201 */ 202CTASSERT(sizeof(struct pcb) % XSAVE_AREA_ALIGN == 0); 203 204/* 205 * Ensure the copy of XCR0 saved in a core is contained in the padding 206 * area. 207 */ 208CTASSERT(X86_XSTATE_XCR0_OFFSET >= offsetof(struct savexmm, sv_pad) && 209 X86_XSTATE_XCR0_OFFSET + sizeof(uint64_t) <= sizeof(struct savexmm)); 210 211static void fpu_clean_state(void); 212#endif 213 214static void fpusave(union savefpu *); 215static void fpurstor(union savefpu *); 216 217int hw_float; 218 219SYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD, 220 &hw_float, 0, "Floating point instructions executed in hardware"); 221 222int lazy_fpu_switch = 0; 223SYSCTL_INT(_hw, OID_AUTO, lazy_fpu_switch, CTLFLAG_RWTUN | CTLFLAG_NOFETCH, 224 &lazy_fpu_switch, 0, 225 "Lazily load FPU context after context switch"); 226 227#ifdef CPU_ENABLE_SSE 228int use_xsave; 229uint64_t xsave_mask; 230#endif 231static uma_zone_t fpu_save_area_zone; 232static union savefpu *npx_initialstate; 233 234#ifdef CPU_ENABLE_SSE 235struct xsave_area_elm_descr { 236 u_int offset; 237 u_int size; 238} *xsave_area_desc; 239 240static int use_xsaveopt; 241#endif 242 243static volatile u_int npx_traps_while_probing; 244 245alias_for_inthand_t probetrap; 246__asm(" \n\ 247 .text \n\ 248 .p2align 2,0x90 \n\ 249 .type " __XSTRING(CNAME(probetrap)) ",@function \n\ 250" __XSTRING(CNAME(probetrap)) ": \n\ 251 ss \n\ 252 incl " __XSTRING(CNAME(npx_traps_while_probing)) " \n\ 253 fnclex \n\ 254 iret \n\ 255"); 256 257/* 258 * Determine if an FPU is present and how to use it. 259 */ 260static int 261npx_probe(void) 262{ 263 struct gate_descriptor save_idt_npxtrap; 264 u_short control, status; 265 266 /* 267 * Modern CPUs all have an FPU that uses the INT16 interface 268 * and provide a simple way to verify that, so handle the 269 * common case right away. 270 */ 271 if (cpu_feature & CPUID_FPU) { 272 hw_float = 1; 273 return (1); 274 } 275 276 save_idt_npxtrap = idt[IDT_MF]; 277 setidt(IDT_MF, probetrap, SDT_SYS386TGT, SEL_KPL, 278 GSEL(GCODE_SEL, SEL_KPL)); 279 280 /* 281 * Don't trap while we're probing. 282 */ 283 stop_emulating(); 284 285 /* 286 * Finish resetting the coprocessor, if any. If there is an error 287 * pending, then we may get a bogus IRQ13, but npx_intr() will handle 288 * it OK. Bogus halts have never been observed, but we enabled 289 * IRQ13 and cleared the BUSY# latch early to handle them anyway. 290 */ 291 fninit(); 292 293 /* 294 * Don't use fwait here because it might hang. 295 * Don't use fnop here because it usually hangs if there is no FPU. 296 */ 297 DELAY(1000); /* wait for any IRQ13 */ 298#ifdef DIAGNOSTIC 299 if (npx_traps_while_probing != 0) 300 printf("fninit caused %u bogus npx trap(s)\n", 301 npx_traps_while_probing); 302#endif 303 /* 304 * Check for a status of mostly zero. 305 */ 306 status = 0x5a5a; 307 fnstsw(&status); 308 if ((status & 0xb8ff) == 0) { 309 /* 310 * Good, now check for a proper control word. 311 */ 312 control = 0x5a5a; 313 fnstcw(&control); 314 if ((control & 0x1f3f) == 0x033f) { 315 /* 316 * We have an npx, now divide by 0 to see if exception 317 * 16 works. 318 */ 319 control &= ~(1 << 2); /* enable divide by 0 trap */ 320 fldcw(control); 321#ifdef FPU_ERROR_BROKEN 322 /* 323 * FPU error signal doesn't work on some CPU 324 * accelerator board. 325 */ 326 hw_float = 1; 327 return (1); 328#endif 329 npx_traps_while_probing = 0; 330 fp_divide_by_0(); 331 if (npx_traps_while_probing != 0) { 332 /* 333 * Good, exception 16 works. 334 */ 335 hw_float = 1; 336 goto cleanup; 337 } 338 printf( 339 "FPU does not use exception 16 for error reporting\n"); 340 goto cleanup; 341 } 342 } 343 344 /* 345 * Probe failed. Floating point simply won't work. 346 * Notify user and disable FPU/MMX/SSE instruction execution. 347 */ 348 printf("WARNING: no FPU!\n"); 349 __asm __volatile("smsw %%ax; orb %0,%%al; lmsw %%ax" : : 350 "n" (CR0_EM | CR0_MP) : "ax"); 351 352cleanup: 353 idt[IDT_MF] = save_idt_npxtrap; 354 return (hw_float); 355} 356 357#ifdef CPU_ENABLE_SSE 358/* 359 * Enable XSAVE if supported and allowed by user. 360 * Calculate the xsave_mask. 361 */ 362static void 363npxinit_bsp1(void) 364{ 365 u_int cp[4]; 366 uint64_t xsave_mask_user; 367 368 TUNABLE_INT_FETCH("hw.lazy_fpu_switch", &lazy_fpu_switch); 369 if (cpu_fxsr && (cpu_feature2 & CPUID2_XSAVE) != 0) { 370 use_xsave = 1; 371 TUNABLE_INT_FETCH("hw.use_xsave", &use_xsave); 372 } 373 if (!use_xsave) 374 return; 375 376 cpuid_count(0xd, 0x0, cp); 377 xsave_mask = XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE; 378 if ((cp[0] & xsave_mask) != xsave_mask) 379 panic("CPU0 does not support X87 or SSE: %x", cp[0]); 380 xsave_mask = ((uint64_t)cp[3] << 32) | cp[0]; 381 xsave_mask_user = xsave_mask; 382 TUNABLE_QUAD_FETCH("hw.xsave_mask", &xsave_mask_user); 383 xsave_mask_user |= XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE; 384 xsave_mask &= xsave_mask_user; 385 if ((xsave_mask & XFEATURE_AVX512) != XFEATURE_AVX512) 386 xsave_mask &= ~XFEATURE_AVX512; 387 if ((xsave_mask & XFEATURE_MPX) != XFEATURE_MPX) 388 xsave_mask &= ~XFEATURE_MPX; 389 390 cpuid_count(0xd, 0x1, cp); 391 if ((cp[0] & CPUID_EXTSTATE_XSAVEOPT) != 0) 392 use_xsaveopt = 1; 393} 394#endif 395/* 396 397 * Calculate the fpu save area size. 398 */ 399static void 400npxinit_bsp2(void) 401{ 402#ifdef CPU_ENABLE_SSE 403 u_int cp[4]; 404 405 if (use_xsave) { 406 cpuid_count(0xd, 0x0, cp); 407 cpu_max_ext_state_size = cp[1]; 408 409 /* 410 * Reload the cpu_feature2, since we enabled OSXSAVE. 411 */ 412 do_cpuid(1, cp); 413 cpu_feature2 = cp[2]; 414 } else 415#endif 416 cpu_max_ext_state_size = sizeof(union savefpu); 417} 418 419/* 420 * Initialize floating point unit. 421 */ 422void 423npxinit(bool bsp) 424{ 425 static union savefpu dummy; 426 register_t saveintr; 427#ifdef CPU_ENABLE_SSE 428 u_int mxcsr; 429#endif 430 u_short control; 431 432 if (bsp) { 433 if (!npx_probe()) 434 return; 435#ifdef CPU_ENABLE_SSE 436 npxinit_bsp1(); 437#endif 438 } 439 440#ifdef CPU_ENABLE_SSE 441 if (use_xsave) { 442 load_cr4(rcr4() | CR4_XSAVE); 443 load_xcr(XCR0, xsave_mask); 444 } 445#endif 446 447 /* 448 * XCR0 shall be set up before CPU can report the save area size. 449 */ 450 if (bsp) 451 npxinit_bsp2(); 452 453 /* 454 * fninit has the same h/w bugs as fnsave. Use the detoxified 455 * fnsave to throw away any junk in the fpu. fpusave() initializes 456 * the fpu. 457 * 458 * It is too early for critical_enter() to work on AP. 459 */ 460 saveintr = intr_disable(); 461 stop_emulating(); 462#ifdef CPU_ENABLE_SSE 463 if (cpu_fxsr) 464 fninit(); 465 else 466#endif 467 fnsave(&dummy); 468 control = __INITIAL_NPXCW__; 469 fldcw(control); 470#ifdef CPU_ENABLE_SSE 471 if (cpu_fxsr) { 472 mxcsr = __INITIAL_MXCSR__; 473 ldmxcsr(mxcsr); 474 } 475#endif 476 start_emulating(); 477 intr_restore(saveintr); 478} 479 480/* 481 * On the boot CPU we generate a clean state that is used to 482 * initialize the floating point unit when it is first used by a 483 * process. 484 */ 485static void 486npxinitstate(void *arg __unused) 487{ 488 register_t saveintr; 489#ifdef CPU_ENABLE_SSE 490 int cp[4], i, max_ext_n; 491#endif 492 493 if (!hw_float) 494 return; 495 496 npx_initialstate = malloc(cpu_max_ext_state_size, M_DEVBUF, 497 M_WAITOK | M_ZERO); 498 saveintr = intr_disable(); 499 stop_emulating(); 500 501 fpusave(npx_initialstate); 502#ifdef CPU_ENABLE_SSE 503 if (cpu_fxsr) { 504 if (npx_initialstate->sv_xmm.sv_env.en_mxcsr_mask) 505 cpu_mxcsr_mask = 506 npx_initialstate->sv_xmm.sv_env.en_mxcsr_mask; 507 else 508 cpu_mxcsr_mask = 0xFFBF; 509 510 /* 511 * The fninit instruction does not modify XMM 512 * registers or x87 registers (MM/ST). The fpusave 513 * call dumped the garbage contained in the registers 514 * after reset to the initial state saved. Clear XMM 515 * and x87 registers file image to make the startup 516 * program state and signal handler XMM/x87 register 517 * content predictable. 518 */ 519 bzero(npx_initialstate->sv_xmm.sv_fp, 520 sizeof(npx_initialstate->sv_xmm.sv_fp)); 521 bzero(npx_initialstate->sv_xmm.sv_xmm, 522 sizeof(npx_initialstate->sv_xmm.sv_xmm)); 523 } else 524#endif 525 bzero(npx_initialstate->sv_87.sv_ac, 526 sizeof(npx_initialstate->sv_87.sv_ac)); 527 528#ifdef CPU_ENABLE_SSE 529 /* 530 * Create a table describing the layout of the CPU Extended 531 * Save Area. 532 */ 533 if (use_xsave) { 534 if (xsave_mask >> 32 != 0) 535 max_ext_n = fls(xsave_mask >> 32) + 32; 536 else 537 max_ext_n = fls(xsave_mask); 538 xsave_area_desc = malloc(max_ext_n * sizeof(struct 539 xsave_area_elm_descr), M_DEVBUF, M_WAITOK | M_ZERO); 540 /* x87 state */ 541 xsave_area_desc[0].offset = 0; 542 xsave_area_desc[0].size = 160; 543 /* XMM */ 544 xsave_area_desc[1].offset = 160; 545 xsave_area_desc[1].size = 288 - 160; 546 547 for (i = 2; i < max_ext_n; i++) { 548 cpuid_count(0xd, i, cp); 549 xsave_area_desc[i].offset = cp[1]; 550 xsave_area_desc[i].size = cp[0]; 551 } 552 } 553#endif 554 555 fpu_save_area_zone = uma_zcreate("FPU_save_area", 556 cpu_max_ext_state_size, NULL, NULL, NULL, NULL, 557 XSAVE_AREA_ALIGN - 1, 0); 558 559 start_emulating(); 560 intr_restore(saveintr); 561} 562SYSINIT(npxinitstate, SI_SUB_DRIVERS, SI_ORDER_ANY, npxinitstate, NULL); 563 564/* 565 * Free coprocessor (if we have it). 566 */ 567void 568npxexit(struct thread *td) 569{ 570 571 critical_enter(); 572 if (curthread == PCPU_GET(fpcurthread)) { 573 stop_emulating(); 574 fpusave(curpcb->pcb_save); 575 start_emulating(); 576 PCPU_SET(fpcurthread, NULL); 577 } 578 critical_exit(); 579#ifdef NPX_DEBUG 580 if (hw_float) { 581 u_int masked_exceptions; 582 583 masked_exceptions = GET_FPU_CW(td) & GET_FPU_SW(td) & 0x7f; 584 /* 585 * Log exceptions that would have trapped with the old 586 * control word (overflow, divide by 0, and invalid operand). 587 */ 588 if (masked_exceptions & 0x0d) 589 log(LOG_ERR, 590 "pid %d (%s) exited with masked floating point exceptions 0x%02x\n", 591 td->td_proc->p_pid, td->td_proc->p_comm, 592 masked_exceptions); 593 } 594#endif 595} 596 597int 598npxformat(void) 599{ 600 601 if (!hw_float) 602 return (_MC_FPFMT_NODEV); 603#ifdef CPU_ENABLE_SSE 604 if (cpu_fxsr) 605 return (_MC_FPFMT_XMM); 606#endif 607 return (_MC_FPFMT_387); 608} 609 610/* 611 * The following mechanism is used to ensure that the FPE_... value 612 * that is passed as a trapcode to the signal handler of the user 613 * process does not have more than one bit set. 614 * 615 * Multiple bits may be set if the user process modifies the control 616 * word while a status word bit is already set. While this is a sign 617 * of bad coding, we have no choise than to narrow them down to one 618 * bit, since we must not send a trapcode that is not exactly one of 619 * the FPE_ macros. 620 * 621 * The mechanism has a static table with 127 entries. Each combination 622 * of the 7 FPU status word exception bits directly translates to a 623 * position in this table, where a single FPE_... value is stored. 624 * This FPE_... value stored there is considered the "most important" 625 * of the exception bits and will be sent as the signal code. The 626 * precedence of the bits is based upon Intel Document "Numerical 627 * Applications", Chapter "Special Computational Situations". 628 * 629 * The macro to choose one of these values does these steps: 1) Throw 630 * away status word bits that cannot be masked. 2) Throw away the bits 631 * currently masked in the control word, assuming the user isn't 632 * interested in them anymore. 3) Reinsert status word bit 7 (stack 633 * fault) if it is set, which cannot be masked but must be presered. 634 * 4) Use the remaining bits to point into the trapcode table. 635 * 636 * The 6 maskable bits in order of their preference, as stated in the 637 * above referenced Intel manual: 638 * 1 Invalid operation (FP_X_INV) 639 * 1a Stack underflow 640 * 1b Stack overflow 641 * 1c Operand of unsupported format 642 * 1d SNaN operand. 643 * 2 QNaN operand (not an exception, irrelavant here) 644 * 3 Any other invalid-operation not mentioned above or zero divide 645 * (FP_X_INV, FP_X_DZ) 646 * 4 Denormal operand (FP_X_DNML) 647 * 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL) 648 * 6 Inexact result (FP_X_IMP) 649 */ 650static char fpetable[128] = { 651 0, 652 FPE_FLTINV, /* 1 - INV */ 653 FPE_FLTUND, /* 2 - DNML */ 654 FPE_FLTINV, /* 3 - INV | DNML */ 655 FPE_FLTDIV, /* 4 - DZ */ 656 FPE_FLTINV, /* 5 - INV | DZ */ 657 FPE_FLTDIV, /* 6 - DNML | DZ */ 658 FPE_FLTINV, /* 7 - INV | DNML | DZ */ 659 FPE_FLTOVF, /* 8 - OFL */ 660 FPE_FLTINV, /* 9 - INV | OFL */ 661 FPE_FLTUND, /* A - DNML | OFL */ 662 FPE_FLTINV, /* B - INV | DNML | OFL */ 663 FPE_FLTDIV, /* C - DZ | OFL */ 664 FPE_FLTINV, /* D - INV | DZ | OFL */ 665 FPE_FLTDIV, /* E - DNML | DZ | OFL */ 666 FPE_FLTINV, /* F - INV | DNML | DZ | OFL */ 667 FPE_FLTUND, /* 10 - UFL */ 668 FPE_FLTINV, /* 11 - INV | UFL */ 669 FPE_FLTUND, /* 12 - DNML | UFL */ 670 FPE_FLTINV, /* 13 - INV | DNML | UFL */ 671 FPE_FLTDIV, /* 14 - DZ | UFL */ 672 FPE_FLTINV, /* 15 - INV | DZ | UFL */ 673 FPE_FLTDIV, /* 16 - DNML | DZ | UFL */ 674 FPE_FLTINV, /* 17 - INV | DNML | DZ | UFL */ 675 FPE_FLTOVF, /* 18 - OFL | UFL */ 676 FPE_FLTINV, /* 19 - INV | OFL | UFL */ 677 FPE_FLTUND, /* 1A - DNML | OFL | UFL */ 678 FPE_FLTINV, /* 1B - INV | DNML | OFL | UFL */ 679 FPE_FLTDIV, /* 1C - DZ | OFL | UFL */ 680 FPE_FLTINV, /* 1D - INV | DZ | OFL | UFL */ 681 FPE_FLTDIV, /* 1E - DNML | DZ | OFL | UFL */ 682 FPE_FLTINV, /* 1F - INV | DNML | DZ | OFL | UFL */ 683 FPE_FLTRES, /* 20 - IMP */ 684 FPE_FLTINV, /* 21 - INV | IMP */ 685 FPE_FLTUND, /* 22 - DNML | IMP */ 686 FPE_FLTINV, /* 23 - INV | DNML | IMP */ 687 FPE_FLTDIV, /* 24 - DZ | IMP */ 688 FPE_FLTINV, /* 25 - INV | DZ | IMP */ 689 FPE_FLTDIV, /* 26 - DNML | DZ | IMP */ 690 FPE_FLTINV, /* 27 - INV | DNML | DZ | IMP */ 691 FPE_FLTOVF, /* 28 - OFL | IMP */ 692 FPE_FLTINV, /* 29 - INV | OFL | IMP */ 693 FPE_FLTUND, /* 2A - DNML | OFL | IMP */ 694 FPE_FLTINV, /* 2B - INV | DNML | OFL | IMP */ 695 FPE_FLTDIV, /* 2C - DZ | OFL | IMP */ 696 FPE_FLTINV, /* 2D - INV | DZ | OFL | IMP */ 697 FPE_FLTDIV, /* 2E - DNML | DZ | OFL | IMP */ 698 FPE_FLTINV, /* 2F - INV | DNML | DZ | OFL | IMP */ 699 FPE_FLTUND, /* 30 - UFL | IMP */ 700 FPE_FLTINV, /* 31 - INV | UFL | IMP */ 701 FPE_FLTUND, /* 32 - DNML | UFL | IMP */ 702 FPE_FLTINV, /* 33 - INV | DNML | UFL | IMP */ 703 FPE_FLTDIV, /* 34 - DZ | UFL | IMP */ 704 FPE_FLTINV, /* 35 - INV | DZ | UFL | IMP */ 705 FPE_FLTDIV, /* 36 - DNML | DZ | UFL | IMP */ 706 FPE_FLTINV, /* 37 - INV | DNML | DZ | UFL | IMP */ 707 FPE_FLTOVF, /* 38 - OFL | UFL | IMP */ 708 FPE_FLTINV, /* 39 - INV | OFL | UFL | IMP */ 709 FPE_FLTUND, /* 3A - DNML | OFL | UFL | IMP */ 710 FPE_FLTINV, /* 3B - INV | DNML | OFL | UFL | IMP */ 711 FPE_FLTDIV, /* 3C - DZ | OFL | UFL | IMP */ 712 FPE_FLTINV, /* 3D - INV | DZ | OFL | UFL | IMP */ 713 FPE_FLTDIV, /* 3E - DNML | DZ | OFL | UFL | IMP */ 714 FPE_FLTINV, /* 3F - INV | DNML | DZ | OFL | UFL | IMP */ 715 FPE_FLTSUB, /* 40 - STK */ 716 FPE_FLTSUB, /* 41 - INV | STK */ 717 FPE_FLTUND, /* 42 - DNML | STK */ 718 FPE_FLTSUB, /* 43 - INV | DNML | STK */ 719 FPE_FLTDIV, /* 44 - DZ | STK */ 720 FPE_FLTSUB, /* 45 - INV | DZ | STK */ 721 FPE_FLTDIV, /* 46 - DNML | DZ | STK */ 722 FPE_FLTSUB, /* 47 - INV | DNML | DZ | STK */ 723 FPE_FLTOVF, /* 48 - OFL | STK */ 724 FPE_FLTSUB, /* 49 - INV | OFL | STK */ 725 FPE_FLTUND, /* 4A - DNML | OFL | STK */ 726 FPE_FLTSUB, /* 4B - INV | DNML | OFL | STK */ 727 FPE_FLTDIV, /* 4C - DZ | OFL | STK */ 728 FPE_FLTSUB, /* 4D - INV | DZ | OFL | STK */ 729 FPE_FLTDIV, /* 4E - DNML | DZ | OFL | STK */ 730 FPE_FLTSUB, /* 4F - INV | DNML | DZ | OFL | STK */ 731 FPE_FLTUND, /* 50 - UFL | STK */ 732 FPE_FLTSUB, /* 51 - INV | UFL | STK */ 733 FPE_FLTUND, /* 52 - DNML | UFL | STK */ 734 FPE_FLTSUB, /* 53 - INV | DNML | UFL | STK */ 735 FPE_FLTDIV, /* 54 - DZ | UFL | STK */ 736 FPE_FLTSUB, /* 55 - INV | DZ | UFL | STK */ 737 FPE_FLTDIV, /* 56 - DNML | DZ | UFL | STK */ 738 FPE_FLTSUB, /* 57 - INV | DNML | DZ | UFL | STK */ 739 FPE_FLTOVF, /* 58 - OFL | UFL | STK */ 740 FPE_FLTSUB, /* 59 - INV | OFL | UFL | STK */ 741 FPE_FLTUND, /* 5A - DNML | OFL | UFL | STK */ 742 FPE_FLTSUB, /* 5B - INV | DNML | OFL | UFL | STK */ 743 FPE_FLTDIV, /* 5C - DZ | OFL | UFL | STK */ 744 FPE_FLTSUB, /* 5D - INV | DZ | OFL | UFL | STK */ 745 FPE_FLTDIV, /* 5E - DNML | DZ | OFL | UFL | STK */ 746 FPE_FLTSUB, /* 5F - INV | DNML | DZ | OFL | UFL | STK */ 747 FPE_FLTRES, /* 60 - IMP | STK */ 748 FPE_FLTSUB, /* 61 - INV | IMP | STK */ 749 FPE_FLTUND, /* 62 - DNML | IMP | STK */ 750 FPE_FLTSUB, /* 63 - INV | DNML | IMP | STK */ 751 FPE_FLTDIV, /* 64 - DZ | IMP | STK */ 752 FPE_FLTSUB, /* 65 - INV | DZ | IMP | STK */ 753 FPE_FLTDIV, /* 66 - DNML | DZ | IMP | STK */ 754 FPE_FLTSUB, /* 67 - INV | DNML | DZ | IMP | STK */ 755 FPE_FLTOVF, /* 68 - OFL | IMP | STK */ 756 FPE_FLTSUB, /* 69 - INV | OFL | IMP | STK */ 757 FPE_FLTUND, /* 6A - DNML | OFL | IMP | STK */ 758 FPE_FLTSUB, /* 6B - INV | DNML | OFL | IMP | STK */ 759 FPE_FLTDIV, /* 6C - DZ | OFL | IMP | STK */ 760 FPE_FLTSUB, /* 6D - INV | DZ | OFL | IMP | STK */ 761 FPE_FLTDIV, /* 6E - DNML | DZ | OFL | IMP | STK */ 762 FPE_FLTSUB, /* 6F - INV | DNML | DZ | OFL | IMP | STK */ 763 FPE_FLTUND, /* 70 - UFL | IMP | STK */ 764 FPE_FLTSUB, /* 71 - INV | UFL | IMP | STK */ 765 FPE_FLTUND, /* 72 - DNML | UFL | IMP | STK */ 766 FPE_FLTSUB, /* 73 - INV | DNML | UFL | IMP | STK */ 767 FPE_FLTDIV, /* 74 - DZ | UFL | IMP | STK */ 768 FPE_FLTSUB, /* 75 - INV | DZ | UFL | IMP | STK */ 769 FPE_FLTDIV, /* 76 - DNML | DZ | UFL | IMP | STK */ 770 FPE_FLTSUB, /* 77 - INV | DNML | DZ | UFL | IMP | STK */ 771 FPE_FLTOVF, /* 78 - OFL | UFL | IMP | STK */ 772 FPE_FLTSUB, /* 79 - INV | OFL | UFL | IMP | STK */ 773 FPE_FLTUND, /* 7A - DNML | OFL | UFL | IMP | STK */ 774 FPE_FLTSUB, /* 7B - INV | DNML | OFL | UFL | IMP | STK */ 775 FPE_FLTDIV, /* 7C - DZ | OFL | UFL | IMP | STK */ 776 FPE_FLTSUB, /* 7D - INV | DZ | OFL | UFL | IMP | STK */ 777 FPE_FLTDIV, /* 7E - DNML | DZ | OFL | UFL | IMP | STK */ 778 FPE_FLTSUB, /* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */ 779}; 780 781/* 782 * Read the FP status and control words, then generate si_code value 783 * for SIGFPE. The error code chosen will be one of the 784 * FPE_... macros. It will be sent as the second argument to old 785 * BSD-style signal handlers and as "siginfo_t->si_code" (second 786 * argument) to SA_SIGINFO signal handlers. 787 * 788 * Some time ago, we cleared the x87 exceptions with FNCLEX there. 789 * Clearing exceptions was necessary mainly to avoid IRQ13 bugs. The 790 * usermode code which understands the FPU hardware enough to enable 791 * the exceptions, can also handle clearing the exception state in the 792 * handler. The only consequence of not clearing the exception is the 793 * rethrow of the SIGFPE on return from the signal handler and 794 * reexecution of the corresponding instruction. 795 * 796 * For XMM traps, the exceptions were never cleared. 797 */ 798int 799npxtrap_x87(void) 800{ 801 u_short control, status; 802 803 if (!hw_float) { 804 printf( 805 "npxtrap_x87: fpcurthread = %p, curthread = %p, hw_float = %d\n", 806 PCPU_GET(fpcurthread), curthread, hw_float); 807 panic("npxtrap from nowhere"); 808 } 809 critical_enter(); 810 811 /* 812 * Interrupt handling (for another interrupt) may have pushed the 813 * state to memory. Fetch the relevant parts of the state from 814 * wherever they are. 815 */ 816 if (PCPU_GET(fpcurthread) != curthread) { 817 control = GET_FPU_CW(curthread); 818 status = GET_FPU_SW(curthread); 819 } else { 820 fnstcw(&control); 821 fnstsw(&status); 822 } 823 critical_exit(); 824 return (fpetable[status & ((~control & 0x3f) | 0x40)]); 825} 826 827#ifdef CPU_ENABLE_SSE 828int 829npxtrap_sse(void) 830{ 831 u_int mxcsr; 832 833 if (!hw_float) { 834 printf( 835 "npxtrap_sse: fpcurthread = %p, curthread = %p, hw_float = %d\n", 836 PCPU_GET(fpcurthread), curthread, hw_float); 837 panic("npxtrap from nowhere"); 838 } 839 critical_enter(); 840 if (PCPU_GET(fpcurthread) != curthread) 841 mxcsr = curthread->td_pcb->pcb_save->sv_xmm.sv_env.en_mxcsr; 842 else 843 stmxcsr(&mxcsr); 844 critical_exit(); 845 return (fpetable[(mxcsr & (~mxcsr >> 7)) & 0x3f]); 846} 847#endif 848 849static void 850restore_npx_curthread(struct thread *td, struct pcb *pcb) 851{ 852 853 /* 854 * Record new context early in case frstor causes a trap. 855 */ 856 PCPU_SET(fpcurthread, td); 857 858 stop_emulating(); 859#ifdef CPU_ENABLE_SSE 860 if (cpu_fxsr) 861 fpu_clean_state(); 862#endif 863 864 if ((pcb->pcb_flags & PCB_NPXINITDONE) == 0) { 865 /* 866 * This is the first time this thread has used the FPU or 867 * the PCB doesn't contain a clean FPU state. Explicitly 868 * load an initial state. 869 * 870 * We prefer to restore the state from the actual save 871 * area in PCB instead of directly loading from 872 * npx_initialstate, to ignite the XSAVEOPT 873 * tracking engine. 874 */ 875 bcopy(npx_initialstate, pcb->pcb_save, cpu_max_ext_state_size); 876 fpurstor(pcb->pcb_save); 877 if (pcb->pcb_initial_npxcw != __INITIAL_NPXCW__) 878 fldcw(pcb->pcb_initial_npxcw); 879 pcb->pcb_flags |= PCB_NPXINITDONE; 880 if (PCB_USER_FPU(pcb)) 881 pcb->pcb_flags |= PCB_NPXUSERINITDONE; 882 } else { 883 fpurstor(pcb->pcb_save); 884 } 885} 886 887/* 888 * Implement device not available (DNA) exception 889 * 890 * It would be better to switch FP context here (if curthread != fpcurthread) 891 * and not necessarily for every context switch, but it is too hard to 892 * access foreign pcb's. 893 */ 894int 895npxdna(void) 896{ 897 struct thread *td; 898 899 if (!hw_float) 900 return (0); 901 td = curthread; 902 critical_enter(); 903 if (__predict_false(PCPU_GET(fpcurthread) == td)) { 904 /* 905 * Some virtual machines seems to set %cr0.TS at 906 * arbitrary moments. Silently clear the TS bit 907 * regardless of the eager/lazy FPU context switch 908 * mode. 909 */ 910 stop_emulating(); 911 } else { 912 if (__predict_false(PCPU_GET(fpcurthread) != NULL)) { 913 printf( 914 "npxdna: fpcurthread = %p (%d), curthread = %p (%d)\n", 915 PCPU_GET(fpcurthread), 916 PCPU_GET(fpcurthread)->td_proc->p_pid, 917 td, td->td_proc->p_pid); 918 panic("npxdna"); 919 } 920 restore_npx_curthread(td, td->td_pcb); 921 } 922 critical_exit(); 923 return (1); 924} 925 926/* 927 * Wrapper for fpusave() called from context switch routines. 928 * 929 * npxsave() must be called with interrupts disabled, so that it clears 930 * fpcurthread atomically with saving the state. We require callers to do the 931 * disabling, since most callers need to disable interrupts anyway to call 932 * npxsave() atomically with checking fpcurthread. 933 */ 934void 935npxsave(addr) 936 union savefpu *addr; 937{ 938 939 stop_emulating(); 940#ifdef CPU_ENABLE_SSE 941 if (use_xsaveopt) 942 xsaveopt((char *)addr, xsave_mask); 943 else 944#endif 945 fpusave(addr); 946} 947 948void npxswitch(struct thread *td, struct pcb *pcb); 949void 950npxswitch(struct thread *td, struct pcb *pcb) 951{ 952 953 if (lazy_fpu_switch || (td->td_pflags & TDP_KTHREAD) != 0 || 954 !PCB_USER_FPU(pcb)) { 955 start_emulating(); 956 PCPU_SET(fpcurthread, NULL); 957 } else if (PCPU_GET(fpcurthread) != td) { 958 restore_npx_curthread(td, pcb); 959 } 960} 961 962/* 963 * Unconditionally save the current co-processor state across suspend and 964 * resume. 965 */ 966void 967npxsuspend(union savefpu *addr) 968{ 969 register_t cr0; 970 971 if (!hw_float) 972 return; 973 if (PCPU_GET(fpcurthread) == NULL) { 974 bcopy(npx_initialstate, addr, cpu_max_ext_state_size); 975 return; 976 } 977 cr0 = rcr0(); 978 stop_emulating(); 979 fpusave(addr); 980 load_cr0(cr0); 981} 982 983void 984npxresume(union savefpu *addr) 985{ 986 register_t cr0; 987 988 if (!hw_float) 989 return; 990 991 cr0 = rcr0(); 992 npxinit(false); 993 stop_emulating(); 994 fpurstor(addr); 995 load_cr0(cr0); 996} 997 998void 999npxdrop(void) 1000{ 1001 struct thread *td; 1002 1003 /* 1004 * Discard pending exceptions in the !cpu_fxsr case so that unmasked 1005 * ones don't cause a panic on the next frstor. 1006 */ 1007#ifdef CPU_ENABLE_SSE 1008 if (!cpu_fxsr) 1009#endif 1010 fnclex(); 1011 1012 td = PCPU_GET(fpcurthread); 1013 KASSERT(td == curthread, ("fpudrop: fpcurthread != curthread")); 1014 CRITICAL_ASSERT(td); 1015 PCPU_SET(fpcurthread, NULL); 1016 td->td_pcb->pcb_flags &= ~PCB_NPXINITDONE; 1017 start_emulating(); 1018} 1019 1020/* 1021 * Get the user state of the FPU into pcb->pcb_user_save without 1022 * dropping ownership (if possible). It returns the FPU ownership 1023 * status. 1024 */ 1025int 1026npxgetregs(struct thread *td) 1027{ 1028 struct pcb *pcb; 1029#ifdef CPU_ENABLE_SSE 1030 uint64_t *xstate_bv, bit; 1031 char *sa; 1032 int max_ext_n, i; 1033#endif 1034 int owned; 1035 1036 if (!hw_float) 1037 return (_MC_FPOWNED_NONE); 1038 1039 pcb = td->td_pcb; 1040 critical_enter(); 1041 if ((pcb->pcb_flags & PCB_NPXINITDONE) == 0) { 1042 bcopy(npx_initialstate, get_pcb_user_save_pcb(pcb), 1043 cpu_max_ext_state_size); 1044 SET_FPU_CW(get_pcb_user_save_pcb(pcb), pcb->pcb_initial_npxcw); 1045 npxuserinited(td); 1046 critical_exit(); 1047 return (_MC_FPOWNED_PCB); 1048 } 1049 if (td == PCPU_GET(fpcurthread)) { 1050 fpusave(get_pcb_user_save_pcb(pcb)); 1051#ifdef CPU_ENABLE_SSE 1052 if (!cpu_fxsr) 1053#endif 1054 /* 1055 * fnsave initializes the FPU and destroys whatever 1056 * context it contains. Make sure the FPU owner 1057 * starts with a clean state next time. 1058 */ 1059 npxdrop(); 1060 owned = _MC_FPOWNED_FPU; 1061 } else { 1062 owned = _MC_FPOWNED_PCB; 1063 } 1064#ifdef CPU_ENABLE_SSE 1065 if (use_xsave) { 1066 /* 1067 * Handle partially saved state. 1068 */ 1069 sa = (char *)get_pcb_user_save_pcb(pcb); 1070 xstate_bv = (uint64_t *)(sa + sizeof(union savefpu) + 1071 offsetof(struct xstate_hdr, xstate_bv)); 1072 if (xsave_mask >> 32 != 0) 1073 max_ext_n = fls(xsave_mask >> 32) + 32; 1074 else 1075 max_ext_n = fls(xsave_mask); 1076 for (i = 0; i < max_ext_n; i++) { 1077 bit = 1ULL << i; 1078 if ((xsave_mask & bit) == 0 || (*xstate_bv & bit) != 0) 1079 continue; 1080 bcopy((char *)npx_initialstate + 1081 xsave_area_desc[i].offset, 1082 sa + xsave_area_desc[i].offset, 1083 xsave_area_desc[i].size); 1084 *xstate_bv |= bit; 1085 } 1086 } 1087#endif 1088 critical_exit(); 1089 return (owned); 1090} 1091 1092void 1093npxuserinited(struct thread *td) 1094{ 1095 struct pcb *pcb; 1096 1097 CRITICAL_ASSERT(td); 1098 pcb = td->td_pcb; 1099 if (PCB_USER_FPU(pcb)) 1100 pcb->pcb_flags |= PCB_NPXINITDONE; 1101 pcb->pcb_flags |= PCB_NPXUSERINITDONE; 1102} 1103 1104#ifdef CPU_ENABLE_SSE 1105int 1106npxsetxstate(struct thread *td, char *xfpustate, size_t xfpustate_size) 1107{ 1108 struct xstate_hdr *hdr, *ehdr; 1109 size_t len, max_len; 1110 uint64_t bv; 1111 1112 /* XXXKIB should we clear all extended state in xstate_bv instead ? */ 1113 if (xfpustate == NULL) 1114 return (0); 1115 if (!use_xsave) 1116 return (EOPNOTSUPP); 1117 1118 len = xfpustate_size; 1119 if (len < sizeof(struct xstate_hdr)) 1120 return (EINVAL); 1121 max_len = cpu_max_ext_state_size - sizeof(union savefpu); 1122 if (len > max_len) 1123 return (EINVAL); 1124 1125 ehdr = (struct xstate_hdr *)xfpustate; 1126 bv = ehdr->xstate_bv; 1127 1128 /* 1129 * Avoid #gp. 1130 */ 1131 if (bv & ~xsave_mask) 1132 return (EINVAL); 1133 1134 hdr = (struct xstate_hdr *)(get_pcb_user_save_td(td) + 1); 1135 1136 hdr->xstate_bv = bv; 1137 bcopy(xfpustate + sizeof(struct xstate_hdr), 1138 (char *)(hdr + 1), len - sizeof(struct xstate_hdr)); 1139 1140 return (0); 1141} 1142#endif 1143 1144int 1145npxsetregs(struct thread *td, union savefpu *addr, char *xfpustate, 1146 size_t xfpustate_size) 1147{ 1148 struct pcb *pcb; 1149#ifdef CPU_ENABLE_SSE 1150 int error; 1151#endif 1152 1153 if (!hw_float) 1154 return (ENXIO); 1155 1156#ifdef CPU_ENABLE_SSE 1157 if (cpu_fxsr) 1158 addr->sv_xmm.sv_env.en_mxcsr &= cpu_mxcsr_mask; 1159#endif 1160 pcb = td->td_pcb; 1161 error = 0; 1162 critical_enter(); 1163 if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) { 1164#ifdef CPU_ENABLE_SSE 1165 error = npxsetxstate(td, xfpustate, xfpustate_size); 1166#endif 1167 if (error == 0) { 1168#ifdef CPU_ENABLE_SSE 1169 if (!cpu_fxsr) 1170#endif 1171 fnclex(); /* As in npxdrop(). */ 1172 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr)); 1173 fpurstor(get_pcb_user_save_td(td)); 1174 pcb->pcb_flags |= PCB_NPXUSERINITDONE | PCB_NPXINITDONE; 1175 } 1176 } else { 1177#ifdef CPU_ENABLE_SSE 1178 error = npxsetxstate(td, xfpustate, xfpustate_size); 1179#endif 1180 if (error == 0) { 1181 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr)); 1182 npxuserinited(td); 1183 } 1184 } 1185 critical_exit(); 1186 return (error); 1187} 1188 1189static void 1190fpusave(addr) 1191 union savefpu *addr; 1192{ 1193 1194#ifdef CPU_ENABLE_SSE 1195 if (use_xsave) 1196 xsave((char *)addr, xsave_mask); 1197 else if (cpu_fxsr) 1198 fxsave(addr); 1199 else 1200#endif 1201 fnsave(addr); 1202} 1203 1204#ifdef CPU_ENABLE_SSE 1205/* 1206 * On AuthenticAMD processors, the fxrstor instruction does not restore 1207 * the x87's stored last instruction pointer, last data pointer, and last 1208 * opcode values, except in the rare case in which the exception summary 1209 * (ES) bit in the x87 status word is set to 1. 1210 * 1211 * In order to avoid leaking this information across processes, we clean 1212 * these values by performing a dummy load before executing fxrstor(). 1213 */ 1214static void 1215fpu_clean_state(void) 1216{ 1217 static float dummy_variable = 0.0; 1218 u_short status; 1219 1220 /* 1221 * Clear the ES bit in the x87 status word if it is currently 1222 * set, in order to avoid causing a fault in the upcoming load. 1223 */ 1224 fnstsw(&status); 1225 if (status & 0x80) 1226 fnclex(); 1227 1228 /* 1229 * Load the dummy variable into the x87 stack. This mangles 1230 * the x87 stack, but we don't care since we're about to call 1231 * fxrstor() anyway. 1232 */ 1233 __asm __volatile("ffree %%st(7); flds %0" : : "m" (dummy_variable)); 1234} 1235#endif /* CPU_ENABLE_SSE */ 1236 1237static void 1238fpurstor(union savefpu *addr) 1239{ 1240 1241#ifdef CPU_ENABLE_SSE 1242 if (use_xsave) 1243 xrstor((char *)addr, xsave_mask); 1244 else if (cpu_fxsr) 1245 fxrstor(addr); 1246 else 1247#endif 1248 frstor(addr); 1249} 1250 1251#ifdef DEV_ISA 1252/* 1253 * This sucks up the legacy ISA support assignments from PNPBIOS/ACPI. 1254 */ 1255static struct isa_pnp_id npxisa_ids[] = { 1256 { 0x040cd041, "Legacy ISA coprocessor support" }, /* PNP0C04 */ 1257 { 0 } 1258}; 1259 1260static int 1261npxisa_probe(device_t dev) 1262{ 1263 int result; 1264 if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, npxisa_ids)) <= 0) { 1265 device_quiet(dev); 1266 } 1267 return(result); 1268} 1269 1270static int 1271npxisa_attach(device_t dev) 1272{ 1273 return (0); 1274} 1275 1276static device_method_t npxisa_methods[] = { 1277 /* Device interface */ 1278 DEVMETHOD(device_probe, npxisa_probe), 1279 DEVMETHOD(device_attach, npxisa_attach), 1280 DEVMETHOD(device_detach, bus_generic_detach), 1281 DEVMETHOD(device_shutdown, bus_generic_shutdown), 1282 DEVMETHOD(device_suspend, bus_generic_suspend), 1283 DEVMETHOD(device_resume, bus_generic_resume), 1284 1285 { 0, 0 } 1286}; 1287 1288static driver_t npxisa_driver = { 1289 "npxisa", 1290 npxisa_methods, 1291 1, /* no softc */ 1292}; 1293 1294static devclass_t npxisa_devclass; 1295 1296DRIVER_MODULE(npxisa, isa, npxisa_driver, npxisa_devclass, 0, 0); 1297#ifndef PC98 1298DRIVER_MODULE(npxisa, acpi, npxisa_driver, npxisa_devclass, 0, 0); 1299#endif 1300#endif /* DEV_ISA */ 1301 1302static MALLOC_DEFINE(M_FPUKERN_CTX, "fpukern_ctx", 1303 "Kernel contexts for FPU state"); 1304 1305#define FPU_KERN_CTX_NPXINITDONE 0x01 1306#define FPU_KERN_CTX_DUMMY 0x02 1307 1308struct fpu_kern_ctx { 1309 union savefpu *prev; 1310 uint32_t flags; 1311 char hwstate1[]; 1312}; 1313 1314struct fpu_kern_ctx * 1315fpu_kern_alloc_ctx(u_int flags) 1316{ 1317 struct fpu_kern_ctx *res; 1318 size_t sz; 1319 1320 sz = sizeof(struct fpu_kern_ctx) + XSAVE_AREA_ALIGN + 1321 cpu_max_ext_state_size; 1322 res = malloc(sz, M_FPUKERN_CTX, ((flags & FPU_KERN_NOWAIT) ? 1323 M_NOWAIT : M_WAITOK) | M_ZERO); 1324 return (res); 1325} 1326 1327void 1328fpu_kern_free_ctx(struct fpu_kern_ctx *ctx) 1329{ 1330 1331 /* XXXKIB clear the memory ? */ 1332 free(ctx, M_FPUKERN_CTX); 1333} 1334 1335static union savefpu * 1336fpu_kern_ctx_savefpu(struct fpu_kern_ctx *ctx) 1337{ 1338 vm_offset_t p; 1339 1340 p = (vm_offset_t)&ctx->hwstate1; 1341 p = roundup2(p, XSAVE_AREA_ALIGN); 1342 return ((union savefpu *)p); 1343} 1344 1345int 1346fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags) 1347{ 1348 struct pcb *pcb; 1349 1350 if ((flags & FPU_KERN_KTHR) != 0 && is_fpu_kern_thread(0)) { 1351 ctx->flags = FPU_KERN_CTX_DUMMY; 1352 return (0); 1353 } 1354 pcb = td->td_pcb; 1355 critical_enter(); 1356 KASSERT(!PCB_USER_FPU(pcb) || pcb->pcb_save == 1357 get_pcb_user_save_pcb(pcb), ("mangled pcb_save")); 1358 ctx->flags = 0; 1359 if ((pcb->pcb_flags & PCB_NPXINITDONE) != 0) 1360 ctx->flags |= FPU_KERN_CTX_NPXINITDONE; 1361 npxexit(td); 1362 ctx->prev = pcb->pcb_save; 1363 pcb->pcb_save = fpu_kern_ctx_savefpu(ctx); 1364 pcb->pcb_flags |= PCB_KERNNPX; 1365 pcb->pcb_flags &= ~PCB_NPXINITDONE; 1366 critical_exit(); 1367 return (0); 1368} 1369 1370int 1371fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx) 1372{ 1373 struct pcb *pcb; 1374 1375 if (is_fpu_kern_thread(0) && (ctx->flags & FPU_KERN_CTX_DUMMY) != 0) 1376 return (0); 1377 pcb = td->td_pcb; 1378 critical_enter(); 1379 if (curthread == PCPU_GET(fpcurthread)) 1380 npxdrop(); 1381 pcb->pcb_save = ctx->prev; 1382 if (pcb->pcb_save == get_pcb_user_save_pcb(pcb)) { 1383 if ((pcb->pcb_flags & PCB_NPXUSERINITDONE) != 0) 1384 pcb->pcb_flags |= PCB_NPXINITDONE; 1385 else 1386 pcb->pcb_flags &= ~PCB_NPXINITDONE; 1387 pcb->pcb_flags &= ~PCB_KERNNPX; 1388 } else { 1389 if ((ctx->flags & FPU_KERN_CTX_NPXINITDONE) != 0) 1390 pcb->pcb_flags |= PCB_NPXINITDONE; 1391 else 1392 pcb->pcb_flags &= ~PCB_NPXINITDONE; 1393 KASSERT(!PCB_USER_FPU(pcb), ("unpaired fpu_kern_leave")); 1394 } 1395 critical_exit(); 1396 return (0); 1397} 1398 1399int 1400fpu_kern_thread(u_int flags) 1401{ 1402 1403 KASSERT((curthread->td_pflags & TDP_KTHREAD) != 0, 1404 ("Only kthread may use fpu_kern_thread")); 1405 KASSERT(curpcb->pcb_save == get_pcb_user_save_pcb(curpcb), 1406 ("mangled pcb_save")); 1407 KASSERT(PCB_USER_FPU(curpcb), ("recursive call")); 1408 1409 curpcb->pcb_flags |= PCB_KERNNPX; 1410 return (0); 1411} 1412 1413int 1414is_fpu_kern_thread(u_int flags) 1415{ 1416 1417 if ((curthread->td_pflags & TDP_KTHREAD) == 0) 1418 return (0); 1419 return ((curpcb->pcb_flags & PCB_KERNNPX) != 0); 1420} 1421 1422/* 1423 * FPU save area alloc/free/init utility routines 1424 */ 1425union savefpu * 1426fpu_save_area_alloc(void) 1427{ 1428 1429 return (uma_zalloc(fpu_save_area_zone, 0)); 1430} 1431 1432void 1433fpu_save_area_free(union savefpu *fsa) 1434{ 1435 1436 uma_zfree(fpu_save_area_zone, fsa); 1437} 1438 1439void 1440fpu_save_area_reset(union savefpu *fsa) 1441{ 1442 1443 bcopy(npx_initialstate, fsa, cpu_max_ext_state_size); 1444} 1445