npx.c revision 285290
1105816Sphk/*- 2121921Smarcel * Copyright (c) 1990 William Jolitz. 3121921Smarcel * Copyright (c) 1991 The Regents of the University of California. 4121921Smarcel * All rights reserved. 5121921Smarcel * 6121921Smarcel * Redistribution and use in source and binary forms, with or without 7121921Smarcel * modification, are permitted provided that the following conditions 8121921Smarcel * are met: 9121921Smarcel * 1. Redistributions of source code must retain the above copyright 10121921Smarcel * notice, this list of conditions and the following disclaimer. 11121921Smarcel * 2. Redistributions in binary form must reproduce the above copyright 12121921Smarcel * notice, this list of conditions and the following disclaimer in the 13121921Smarcel * documentation and/or other materials provided with the distribution. 14121921Smarcel * 4. Neither the name of the University nor the names of its contributors 15121921Smarcel * may be used to endorse or promote products derived from this software 16121921Smarcel * without specific prior written permission. 17121921Smarcel * 18121921Smarcel * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 19121921Smarcel * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20121921Smarcel * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21121921Smarcel * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 22121921Smarcel * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23121921Smarcel * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24121921Smarcel * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25121921Smarcel * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26121921Smarcel * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27105816Sphk * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28105816Sphk * SUCH DAMAGE. 29105816Sphk * 30105816Sphk * from: @(#)npx.c 7.2 (Berkeley) 5/12/91 31105816Sphk */ 32105816Sphk 33105816Sphk#include <sys/cdefs.h> 34105816Sphk__FBSDID("$FreeBSD: head/sys/i386/isa/npx.c 285290 2015-07-08 19:26:36Z jmg $"); 35121921Smarcel 36121921Smarcel#include "opt_cpu.h" 37121921Smarcel#include "opt_isa.h" 38121921Smarcel#include "opt_npx.h" 39121921Smarcel 40105816Sphk#include <sys/param.h> 41121921Smarcel#include <sys/systm.h> 42121921Smarcel#include <sys/bus.h> 43121921Smarcel#include <sys/kernel.h> 44121921Smarcel#include <sys/lock.h> 45121921Smarcel#include <sys/malloc.h> 46121921Smarcel#include <sys/module.h> 47121921Smarcel#include <sys/mutex.h> 48105816Sphk#include <sys/mutex.h> 49105816Sphk#include <sys/proc.h> 50121921Smarcel#include <sys/smp.h> 51121921Smarcel#include <sys/sysctl.h> 52121921Smarcel#include <machine/bus.h> 53121921Smarcel#include <sys/rman.h> 54121921Smarcel#ifdef NPX_DEBUG 55121921Smarcel#include <sys/syslog.h> 56121921Smarcel#endif 57121921Smarcel#include <sys/signalvar.h> 58121921Smarcel#include <vm/uma.h> 59121921Smarcel 60121921Smarcel#include <machine/asmacros.h> 61121921Smarcel#include <machine/cputypes.h> 62121921Smarcel#include <machine/frame.h> 63121921Smarcel#include <machine/md_var.h> 64121921Smarcel#include <machine/pcb.h> 65121921Smarcel#include <machine/psl.h> 66121921Smarcel#include <machine/resource.h> 67121921Smarcel#include <machine/specialreg.h> 68121921Smarcel#include <machine/segments.h> 69121921Smarcel#include <machine/ucontext.h> 70121921Smarcel 71121921Smarcel#include <machine/intr_machdep.h> 72121921Smarcel 73121921Smarcel#ifdef DEV_ISA 74121921Smarcel#include <isa/isavar.h> 75121921Smarcel#endif 76121921Smarcel 77121921Smarcel#if !defined(CPU_DISABLE_SSE) && defined(I686_CPU) 78121921Smarcel#define CPU_ENABLE_SSE 79121921Smarcel#endif 80121921Smarcel 81121921Smarcel/* 82121921Smarcel * 387 and 287 Numeric Coprocessor Extension (NPX) Driver. 83121921Smarcel */ 84121921Smarcel 85121921Smarcel#if defined(__GNUCLIKE_ASM) && !defined(lint) 86121921Smarcel 87121921Smarcel#define fldcw(cw) __asm __volatile("fldcw %0" : : "m" (cw)) 88121921Smarcel#define fnclex() __asm __volatile("fnclex") 89121921Smarcel#define fninit() __asm __volatile("fninit") 90121921Smarcel#define fnsave(addr) __asm __volatile("fnsave %0" : "=m" (*(addr))) 91121921Smarcel#define fnstcw(addr) __asm __volatile("fnstcw %0" : "=m" (*(addr))) 92121921Smarcel#define fnstsw(addr) __asm __volatile("fnstsw %0" : "=am" (*(addr))) 93121921Smarcel#define fp_divide_by_0() __asm __volatile( \ 94121921Smarcel "fldz; fld1; fdiv %st,%st(1); fnop") 95121921Smarcel#define frstor(addr) __asm __volatile("frstor %0" : : "m" (*(addr))) 96121921Smarcel#ifdef CPU_ENABLE_SSE 97121921Smarcel#define fxrstor(addr) __asm __volatile("fxrstor %0" : : "m" (*(addr))) 98121921Smarcel#define fxsave(addr) __asm __volatile("fxsave %0" : "=m" (*(addr))) 99121921Smarcel#define ldmxcsr(csr) __asm __volatile("ldmxcsr %0" : : "m" (csr)) 100121921Smarcel#define stmxcsr(addr) __asm __volatile("stmxcsr %0" : : "m" (*(addr))) 101121921Smarcel 102121921Smarcelstatic __inline void 103121921Smarcelxrstor(char *addr, uint64_t mask) 104121921Smarcel{ 105121921Smarcel uint32_t low, hi; 106121921Smarcel 107121921Smarcel low = mask; 108121921Smarcel hi = mask >> 32; 109121921Smarcel __asm __volatile("xrstor %0" : : "m" (*addr), "a" (low), "d" (hi)); 110121921Smarcel} 111121921Smarcel 112121921Smarcelstatic __inline void 113121921Smarcelxsave(char *addr, uint64_t mask) 114121921Smarcel{ 115121921Smarcel uint32_t low, hi; 116121921Smarcel 117121921Smarcel low = mask; 118121921Smarcel hi = mask >> 32; 119121921Smarcel __asm __volatile("xsave %0" : "=m" (*addr) : "a" (low), "d" (hi) : 120121921Smarcel "memory"); 121121921Smarcel} 122121921Smarcel 123121921Smarcelstatic __inline void 124121921Smarcelxsaveopt(char *addr, uint64_t mask) 125121921Smarcel{ 126121921Smarcel uint32_t low, hi; 127121921Smarcel 128121921Smarcel low = mask; 129121921Smarcel hi = mask >> 32; 130121921Smarcel __asm __volatile("xsaveopt %0" : "=m" (*addr) : "a" (low), "d" (hi) : 131121921Smarcel "memory"); 132121921Smarcel} 133121921Smarcel#endif 134121921Smarcel#else /* !(__GNUCLIKE_ASM && !lint) */ 135121921Smarcel 136121921Smarcelvoid fldcw(u_short cw); 137121921Smarcelvoid fnclex(void); 138121921Smarcelvoid fninit(void); 139121921Smarcelvoid fnsave(caddr_t addr); 140121921Smarcelvoid fnstcw(caddr_t addr); 141121921Smarcelvoid fnstsw(caddr_t addr); 142121921Smarcelvoid fp_divide_by_0(void); 143121921Smarcelvoid frstor(caddr_t addr); 144121921Smarcel#ifdef CPU_ENABLE_SSE 145121921Smarcelvoid fxsave(caddr_t addr); 146121921Smarcelvoid fxrstor(caddr_t addr); 147121921Smarcelvoid ldmxcsr(u_int csr); 148121921Smarcelvoid stmxcsr(u_int *csr); 149121921Smarcelvoid xrstor(char *addr, uint64_t mask); 150121921Smarcelvoid xsave(char *addr, uint64_t mask); 151121921Smarcelvoid xsaveopt(char *addr, uint64_t mask); 152121921Smarcel#endif 153121921Smarcel 154121921Smarcel#endif /* __GNUCLIKE_ASM && !lint */ 155121921Smarcel 156121921Smarcel#define start_emulating() load_cr0(rcr0() | CR0_TS) 157121921Smarcel#define stop_emulating() clts() 158121921Smarcel 159121921Smarcel#ifdef CPU_ENABLE_SSE 160121921Smarcel#define GET_FPU_CW(thread) \ 161121921Smarcel (cpu_fxsr ? \ 162121921Smarcel (thread)->td_pcb->pcb_save->sv_xmm.sv_env.en_cw : \ 163121921Smarcel (thread)->td_pcb->pcb_save->sv_87.sv_env.en_cw) 164121921Smarcel#define GET_FPU_SW(thread) \ 165121921Smarcel (cpu_fxsr ? \ 166121921Smarcel (thread)->td_pcb->pcb_save->sv_xmm.sv_env.en_sw : \ 167121921Smarcel (thread)->td_pcb->pcb_save->sv_87.sv_env.en_sw) 168121921Smarcel#define SET_FPU_CW(savefpu, value) do { \ 169121921Smarcel if (cpu_fxsr) \ 170121921Smarcel (savefpu)->sv_xmm.sv_env.en_cw = (value); \ 171121921Smarcel else \ 172121921Smarcel (savefpu)->sv_87.sv_env.en_cw = (value); \ 173121921Smarcel} while (0) 174121921Smarcel#else /* CPU_ENABLE_SSE */ 175121921Smarcel#define GET_FPU_CW(thread) \ 176121921Smarcel (thread->td_pcb->pcb_save->sv_87.sv_env.en_cw) 177121921Smarcel#define GET_FPU_SW(thread) \ 178121921Smarcel (thread->td_pcb->pcb_save->sv_87.sv_env.en_sw) 179121921Smarcel#define SET_FPU_CW(savefpu, value) \ 180121921Smarcel (savefpu)->sv_87.sv_env.en_cw = (value) 181121921Smarcel#endif /* CPU_ENABLE_SSE */ 182121921Smarcel 183121921Smarcel#ifdef CPU_ENABLE_SSE 184121921SmarcelCTASSERT(sizeof(union savefpu) == 512); 185121921SmarcelCTASSERT(sizeof(struct xstate_hdr) == 64); 186121921SmarcelCTASSERT(sizeof(struct savefpu_ymm) == 832); 187121921Smarcel 188121921Smarcel/* 189121921Smarcel * This requirement is to make it easier for asm code to calculate 190121921Smarcel * offset of the fpu save area from the pcb address. FPU save area 191121921Smarcel * must be 64-byte aligned. 192121921Smarcel */ 193121921SmarcelCTASSERT(sizeof(struct pcb) % XSAVE_AREA_ALIGN == 0); 194121921Smarcel 195121921Smarcel/* 196121921Smarcel * Ensure the copy of XCR0 saved in a core is contained in the padding 197121921Smarcel * area. 198121921Smarcel */ 199121921SmarcelCTASSERT(X86_XSTATE_XCR0_OFFSET >= offsetof(struct savexmm, sv_pad) && 200121921Smarcel X86_XSTATE_XCR0_OFFSET + sizeof(uint64_t) <= sizeof(struct savexmm)); 201121921Smarcel 202121921Smarcelstatic void fpu_clean_state(void); 203121921Smarcel#endif 204121921Smarcel 205121921Smarcelstatic void fpusave(union savefpu *); 206121921Smarcelstatic void fpurstor(union savefpu *); 207121921Smarcel 208121921Smarcelint hw_float; 209121921Smarcel 210121921SmarcelSYSCTL_INT(_hw, HW_FLOATINGPT, floatingpoint, CTLFLAG_RD, 211121921Smarcel &hw_float, 0, "Floating point instructions executed in hardware"); 212121921Smarcel 213121921Smarcel#ifdef CPU_ENABLE_SSE 214121921Smarcelint use_xsave; 215121921Smarceluint64_t xsave_mask; 216121921Smarcel#endif 217121921Smarcelstatic uma_zone_t fpu_save_area_zone; 218121921Smarcelstatic union savefpu *npx_initialstate; 219121921Smarcel 220121921Smarcel#ifdef CPU_ENABLE_SSE 221121921Smarcelstruct xsave_area_elm_descr { 222121921Smarcel u_int offset; 223121921Smarcel u_int size; 224121921Smarcel} *xsave_area_desc; 225121921Smarcel 226121921Smarcelstatic int use_xsaveopt; 227121921Smarcel#endif 228121921Smarcel 229121921Smarcelstatic volatile u_int npx_traps_while_probing; 230121921Smarcel 231121921Smarcelalias_for_inthand_t probetrap; 232121921Smarcel__asm(" \n\ 233121921Smarcel .text \n\ 234121921Smarcel .p2align 2,0x90 \n\ 235121921Smarcel .type " __XSTRING(CNAME(probetrap)) ",@function \n\ 236121921Smarcel" __XSTRING(CNAME(probetrap)) ": \n\ 237121921Smarcel ss \n\ 238121921Smarcel incl " __XSTRING(CNAME(npx_traps_while_probing)) " \n\ 239121921Smarcel fnclex \n\ 240121921Smarcel iret \n\ 241121921Smarcel"); 242121921Smarcel 243121921Smarcel/* 244121921Smarcel * Determine if an FPU is present and how to use it. 245121921Smarcel */ 246121921Smarcelstatic int 247121921Smarcelnpx_probe(void) 248121921Smarcel{ 249121921Smarcel struct gate_descriptor save_idt_npxtrap; 250121921Smarcel u_short control, status; 251121921Smarcel 252121921Smarcel /* 253121921Smarcel * Modern CPUs all have an FPU that uses the INT16 interface 254121921Smarcel * and provide a simple way to verify that, so handle the 255121921Smarcel * common case right away. 256121921Smarcel */ 257121921Smarcel if (cpu_feature & CPUID_FPU) { 258121921Smarcel hw_float = 1; 259121921Smarcel return (1); 260121921Smarcel } 261121921Smarcel 262121921Smarcel save_idt_npxtrap = idt[IDT_MF]; 263121921Smarcel setidt(IDT_MF, probetrap, SDT_SYS386TGT, SEL_KPL, 264121921Smarcel GSEL(GCODE_SEL, SEL_KPL)); 265121921Smarcel 266121921Smarcel /* 267121921Smarcel * Don't trap while we're probing. 268121921Smarcel */ 269121921Smarcel stop_emulating(); 270121921Smarcel 271121921Smarcel /* 272121921Smarcel * Finish resetting the coprocessor, if any. If there is an error 273121921Smarcel * pending, then we may get a bogus IRQ13, but npx_intr() will handle 274121921Smarcel * it OK. Bogus halts have never been observed, but we enabled 275121921Smarcel * IRQ13 and cleared the BUSY# latch early to handle them anyway. 276121921Smarcel */ 277121921Smarcel fninit(); 278121921Smarcel 279121921Smarcel /* 280121921Smarcel * Don't use fwait here because it might hang. 281121921Smarcel * Don't use fnop here because it usually hangs if there is no FPU. 282121921Smarcel */ 283121921Smarcel DELAY(1000); /* wait for any IRQ13 */ 284121921Smarcel#ifdef DIAGNOSTIC 285121921Smarcel if (npx_traps_while_probing != 0) 286121921Smarcel printf("fninit caused %u bogus npx trap(s)\n", 287121921Smarcel npx_traps_while_probing); 288121921Smarcel#endif 289121921Smarcel /* 290121921Smarcel * Check for a status of mostly zero. 291121921Smarcel */ 292121921Smarcel status = 0x5a5a; 293121921Smarcel fnstsw(&status); 294121921Smarcel if ((status & 0xb8ff) == 0) { 295121921Smarcel /* 296121921Smarcel * Good, now check for a proper control word. 297121921Smarcel */ 298121921Smarcel control = 0x5a5a; 299121921Smarcel fnstcw(&control); 300121921Smarcel if ((control & 0x1f3f) == 0x033f) { 301121921Smarcel /* 302121921Smarcel * We have an npx, now divide by 0 to see if exception 303121921Smarcel * 16 works. 304121921Smarcel */ 305121921Smarcel control &= ~(1 << 2); /* enable divide by 0 trap */ 306121921Smarcel fldcw(control); 307121921Smarcel#ifdef FPU_ERROR_BROKEN 308121921Smarcel /* 309121921Smarcel * FPU error signal doesn't work on some CPU 310121921Smarcel * accelerator board. 311121921Smarcel */ 312121921Smarcel hw_float = 1; 313121921Smarcel return (1); 314121921Smarcel#endif 315121921Smarcel npx_traps_while_probing = 0; 316121921Smarcel fp_divide_by_0(); 317121921Smarcel if (npx_traps_while_probing != 0) { 318121921Smarcel /* 319121921Smarcel * Good, exception 16 works. 320121921Smarcel */ 321121921Smarcel hw_float = 1; 322121921Smarcel goto cleanup; 323105816Sphk } 324121921Smarcel printf( 325105816Sphk "FPU does not use exception 16 for error reporting\n"); 326121921Smarcel goto cleanup; 327121921Smarcel } 328121921Smarcel } 329121921Smarcel 330121921Smarcel /* 331121921Smarcel * Probe failed. Floating point simply won't work. 332121921Smarcel * Notify user and disable FPU/MMX/SSE instruction execution. 333121921Smarcel */ 334121921Smarcel printf("WARNING: no FPU!\n"); 335121921Smarcel __asm __volatile("smsw %%ax; orb %0,%%al; lmsw %%ax" : : 336121921Smarcel "n" (CR0_EM | CR0_MP) : "ax"); 337121921Smarcel 338121921Smarcelcleanup: 339121921Smarcel idt[IDT_MF] = save_idt_npxtrap; 340121921Smarcel return (hw_float); 341121921Smarcel} 342121921Smarcel 343121921Smarcel#ifdef CPU_ENABLE_SSE 344121921Smarcel/* 345121921Smarcel * Enable XSAVE if supported and allowed by user. 346121921Smarcel * Calculate the xsave_mask. 347121921Smarcel */ 348121921Smarcelstatic void 349121921Smarcelnpxinit_bsp1(void) 350121921Smarcel{ 351121921Smarcel u_int cp[4]; 352121921Smarcel uint64_t xsave_mask_user; 353121921Smarcel 354121921Smarcel if (cpu_fxsr && (cpu_feature2 & CPUID2_XSAVE) != 0) { 355121921Smarcel use_xsave = 1; 356121921Smarcel TUNABLE_INT_FETCH("hw.use_xsave", &use_xsave); 357121921Smarcel } 358121921Smarcel if (!use_xsave) 359121921Smarcel return; 360121921Smarcel 361121921Smarcel cpuid_count(0xd, 0x0, cp); 362121921Smarcel xsave_mask = XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE; 363121921Smarcel if ((cp[0] & xsave_mask) != xsave_mask) 364121921Smarcel panic("CPU0 does not support X87 or SSE: %x", cp[0]); 365121921Smarcel xsave_mask = ((uint64_t)cp[3] << 32) | cp[0]; 366121921Smarcel xsave_mask_user = xsave_mask; 367121921Smarcel TUNABLE_QUAD_FETCH("hw.xsave_mask", &xsave_mask_user); 368121921Smarcel xsave_mask_user |= XFEATURE_ENABLED_X87 | XFEATURE_ENABLED_SSE; 369121921Smarcel xsave_mask &= xsave_mask_user; 370121921Smarcel if ((xsave_mask & XFEATURE_AVX512) != XFEATURE_AVX512) 371121921Smarcel xsave_mask &= ~XFEATURE_AVX512; 372106741Smarcel if ((xsave_mask & XFEATURE_MPX) != XFEATURE_MPX) 373121921Smarcel xsave_mask &= ~XFEATURE_MPX; 374121921Smarcel 375121921Smarcel cpuid_count(0xd, 0x1, cp); 376121921Smarcel if ((cp[0] & CPUID_EXTSTATE_XSAVEOPT) != 0) 377121921Smarcel use_xsaveopt = 1; 378121921Smarcel} 379105816Sphk#endif 380/* 381 382 * Calculate the fpu save area size. 383 */ 384static void 385npxinit_bsp2(void) 386{ 387#ifdef CPU_ENABLE_SSE 388 u_int cp[4]; 389 390 if (use_xsave) { 391 cpuid_count(0xd, 0x0, cp); 392 cpu_max_ext_state_size = cp[1]; 393 394 /* 395 * Reload the cpu_feature2, since we enabled OSXSAVE. 396 */ 397 do_cpuid(1, cp); 398 cpu_feature2 = cp[2]; 399 } else 400#endif 401 cpu_max_ext_state_size = sizeof(union savefpu); 402} 403 404/* 405 * Initialize floating point unit. 406 */ 407void 408npxinit(bool bsp) 409{ 410 static union savefpu dummy; 411 register_t saveintr; 412#ifdef CPU_ENABLE_SSE 413 u_int mxcsr; 414#endif 415 u_short control; 416 417 if (bsp) { 418 if (!npx_probe()) 419 return; 420#ifdef CPU_ENABLE_SSE 421 npxinit_bsp1(); 422#endif 423 } 424 425#ifdef CPU_ENABLE_SSE 426 if (use_xsave) { 427 load_cr4(rcr4() | CR4_XSAVE); 428 load_xcr(XCR0, xsave_mask); 429 } 430#endif 431 432 /* 433 * XCR0 shall be set up before CPU can report the save area size. 434 */ 435 if (bsp) 436 npxinit_bsp2(); 437 438 /* 439 * fninit has the same h/w bugs as fnsave. Use the detoxified 440 * fnsave to throw away any junk in the fpu. fpusave() initializes 441 * the fpu. 442 * 443 * It is too early for critical_enter() to work on AP. 444 */ 445 saveintr = intr_disable(); 446 stop_emulating(); 447#ifdef CPU_ENABLE_SSE 448 if (cpu_fxsr) 449 fninit(); 450 else 451#endif 452 fnsave(&dummy); 453 control = __INITIAL_NPXCW__; 454 fldcw(control); 455#ifdef CPU_ENABLE_SSE 456 if (cpu_fxsr) { 457 mxcsr = __INITIAL_MXCSR__; 458 ldmxcsr(mxcsr); 459 } 460#endif 461 start_emulating(); 462 intr_restore(saveintr); 463} 464 465/* 466 * On the boot CPU we generate a clean state that is used to 467 * initialize the floating point unit when it is first used by a 468 * process. 469 */ 470static void 471npxinitstate(void *arg __unused) 472{ 473 register_t saveintr; 474#ifdef CPU_ENABLE_SSE 475 int cp[4], i, max_ext_n; 476#endif 477 478 if (!hw_float) 479 return; 480 481 npx_initialstate = malloc(cpu_max_ext_state_size, M_DEVBUF, 482 M_WAITOK | M_ZERO); 483 saveintr = intr_disable(); 484 stop_emulating(); 485 486 fpusave(npx_initialstate); 487#ifdef CPU_ENABLE_SSE 488 if (cpu_fxsr) { 489 if (npx_initialstate->sv_xmm.sv_env.en_mxcsr_mask) 490 cpu_mxcsr_mask = 491 npx_initialstate->sv_xmm.sv_env.en_mxcsr_mask; 492 else 493 cpu_mxcsr_mask = 0xFFBF; 494 495 /* 496 * The fninit instruction does not modify XMM 497 * registers. The fpusave call dumped the garbage 498 * contained in the registers after reset to the 499 * initial state saved. Clear XMM registers file 500 * image to make the startup program state and signal 501 * handler XMM register content predictable. 502 */ 503 bzero(npx_initialstate->sv_xmm.sv_fp, 504 sizeof(npx_initialstate->sv_xmm.sv_fp)); 505 bzero(npx_initialstate->sv_xmm.sv_xmm, 506 sizeof(npx_initialstate->sv_xmm.sv_xmm)); 507 } else 508#endif 509 bzero(npx_initialstate->sv_87.sv_ac, 510 sizeof(npx_initialstate->sv_87.sv_ac)); 511 512#ifdef CPU_ENABLE_SSE 513 /* 514 * Create a table describing the layout of the CPU Extended 515 * Save Area. 516 */ 517 if (use_xsave) { 518 if (xsave_mask >> 32 != 0) 519 max_ext_n = fls(xsave_mask >> 32) + 32; 520 else 521 max_ext_n = fls(xsave_mask); 522 xsave_area_desc = malloc(max_ext_n * sizeof(struct 523 xsave_area_elm_descr), M_DEVBUF, M_WAITOK | M_ZERO); 524 /* x87 state */ 525 xsave_area_desc[0].offset = 0; 526 xsave_area_desc[0].size = 160; 527 /* XMM */ 528 xsave_area_desc[1].offset = 160; 529 xsave_area_desc[1].size = 288 - 160; 530 531 for (i = 2; i < max_ext_n; i++) { 532 cpuid_count(0xd, i, cp); 533 xsave_area_desc[i].offset = cp[1]; 534 xsave_area_desc[i].size = cp[0]; 535 } 536 } 537#endif 538 539 fpu_save_area_zone = uma_zcreate("FPU_save_area", 540 cpu_max_ext_state_size, NULL, NULL, NULL, NULL, 541 XSAVE_AREA_ALIGN - 1, 0); 542 543 start_emulating(); 544 intr_restore(saveintr); 545} 546SYSINIT(npxinitstate, SI_SUB_DRIVERS, SI_ORDER_ANY, npxinitstate, NULL); 547 548/* 549 * Free coprocessor (if we have it). 550 */ 551void 552npxexit(td) 553 struct thread *td; 554{ 555 556 critical_enter(); 557 if (curthread == PCPU_GET(fpcurthread)) { 558 stop_emulating(); 559 fpusave(curpcb->pcb_save); 560 start_emulating(); 561 PCPU_SET(fpcurthread, NULL); 562 } 563 critical_exit(); 564#ifdef NPX_DEBUG 565 if (hw_float) { 566 u_int masked_exceptions; 567 568 masked_exceptions = GET_FPU_CW(td) & GET_FPU_SW(td) & 0x7f; 569 /* 570 * Log exceptions that would have trapped with the old 571 * control word (overflow, divide by 0, and invalid operand). 572 */ 573 if (masked_exceptions & 0x0d) 574 log(LOG_ERR, 575 "pid %d (%s) exited with masked floating point exceptions 0x%02x\n", 576 td->td_proc->p_pid, td->td_proc->p_comm, 577 masked_exceptions); 578 } 579#endif 580} 581 582int 583npxformat() 584{ 585 586 if (!hw_float) 587 return (_MC_FPFMT_NODEV); 588#ifdef CPU_ENABLE_SSE 589 if (cpu_fxsr) 590 return (_MC_FPFMT_XMM); 591#endif 592 return (_MC_FPFMT_387); 593} 594 595/* 596 * The following mechanism is used to ensure that the FPE_... value 597 * that is passed as a trapcode to the signal handler of the user 598 * process does not have more than one bit set. 599 * 600 * Multiple bits may be set if the user process modifies the control 601 * word while a status word bit is already set. While this is a sign 602 * of bad coding, we have no choise than to narrow them down to one 603 * bit, since we must not send a trapcode that is not exactly one of 604 * the FPE_ macros. 605 * 606 * The mechanism has a static table with 127 entries. Each combination 607 * of the 7 FPU status word exception bits directly translates to a 608 * position in this table, where a single FPE_... value is stored. 609 * This FPE_... value stored there is considered the "most important" 610 * of the exception bits and will be sent as the signal code. The 611 * precedence of the bits is based upon Intel Document "Numerical 612 * Applications", Chapter "Special Computational Situations". 613 * 614 * The macro to choose one of these values does these steps: 1) Throw 615 * away status word bits that cannot be masked. 2) Throw away the bits 616 * currently masked in the control word, assuming the user isn't 617 * interested in them anymore. 3) Reinsert status word bit 7 (stack 618 * fault) if it is set, which cannot be masked but must be presered. 619 * 4) Use the remaining bits to point into the trapcode table. 620 * 621 * The 6 maskable bits in order of their preference, as stated in the 622 * above referenced Intel manual: 623 * 1 Invalid operation (FP_X_INV) 624 * 1a Stack underflow 625 * 1b Stack overflow 626 * 1c Operand of unsupported format 627 * 1d SNaN operand. 628 * 2 QNaN operand (not an exception, irrelavant here) 629 * 3 Any other invalid-operation not mentioned above or zero divide 630 * (FP_X_INV, FP_X_DZ) 631 * 4 Denormal operand (FP_X_DNML) 632 * 5 Numeric over/underflow (FP_X_OFL, FP_X_UFL) 633 * 6 Inexact result (FP_X_IMP) 634 */ 635static char fpetable[128] = { 636 0, 637 FPE_FLTINV, /* 1 - INV */ 638 FPE_FLTUND, /* 2 - DNML */ 639 FPE_FLTINV, /* 3 - INV | DNML */ 640 FPE_FLTDIV, /* 4 - DZ */ 641 FPE_FLTINV, /* 5 - INV | DZ */ 642 FPE_FLTDIV, /* 6 - DNML | DZ */ 643 FPE_FLTINV, /* 7 - INV | DNML | DZ */ 644 FPE_FLTOVF, /* 8 - OFL */ 645 FPE_FLTINV, /* 9 - INV | OFL */ 646 FPE_FLTUND, /* A - DNML | OFL */ 647 FPE_FLTINV, /* B - INV | DNML | OFL */ 648 FPE_FLTDIV, /* C - DZ | OFL */ 649 FPE_FLTINV, /* D - INV | DZ | OFL */ 650 FPE_FLTDIV, /* E - DNML | DZ | OFL */ 651 FPE_FLTINV, /* F - INV | DNML | DZ | OFL */ 652 FPE_FLTUND, /* 10 - UFL */ 653 FPE_FLTINV, /* 11 - INV | UFL */ 654 FPE_FLTUND, /* 12 - DNML | UFL */ 655 FPE_FLTINV, /* 13 - INV | DNML | UFL */ 656 FPE_FLTDIV, /* 14 - DZ | UFL */ 657 FPE_FLTINV, /* 15 - INV | DZ | UFL */ 658 FPE_FLTDIV, /* 16 - DNML | DZ | UFL */ 659 FPE_FLTINV, /* 17 - INV | DNML | DZ | UFL */ 660 FPE_FLTOVF, /* 18 - OFL | UFL */ 661 FPE_FLTINV, /* 19 - INV | OFL | UFL */ 662 FPE_FLTUND, /* 1A - DNML | OFL | UFL */ 663 FPE_FLTINV, /* 1B - INV | DNML | OFL | UFL */ 664 FPE_FLTDIV, /* 1C - DZ | OFL | UFL */ 665 FPE_FLTINV, /* 1D - INV | DZ | OFL | UFL */ 666 FPE_FLTDIV, /* 1E - DNML | DZ | OFL | UFL */ 667 FPE_FLTINV, /* 1F - INV | DNML | DZ | OFL | UFL */ 668 FPE_FLTRES, /* 20 - IMP */ 669 FPE_FLTINV, /* 21 - INV | IMP */ 670 FPE_FLTUND, /* 22 - DNML | IMP */ 671 FPE_FLTINV, /* 23 - INV | DNML | IMP */ 672 FPE_FLTDIV, /* 24 - DZ | IMP */ 673 FPE_FLTINV, /* 25 - INV | DZ | IMP */ 674 FPE_FLTDIV, /* 26 - DNML | DZ | IMP */ 675 FPE_FLTINV, /* 27 - INV | DNML | DZ | IMP */ 676 FPE_FLTOVF, /* 28 - OFL | IMP */ 677 FPE_FLTINV, /* 29 - INV | OFL | IMP */ 678 FPE_FLTUND, /* 2A - DNML | OFL | IMP */ 679 FPE_FLTINV, /* 2B - INV | DNML | OFL | IMP */ 680 FPE_FLTDIV, /* 2C - DZ | OFL | IMP */ 681 FPE_FLTINV, /* 2D - INV | DZ | OFL | IMP */ 682 FPE_FLTDIV, /* 2E - DNML | DZ | OFL | IMP */ 683 FPE_FLTINV, /* 2F - INV | DNML | DZ | OFL | IMP */ 684 FPE_FLTUND, /* 30 - UFL | IMP */ 685 FPE_FLTINV, /* 31 - INV | UFL | IMP */ 686 FPE_FLTUND, /* 32 - DNML | UFL | IMP */ 687 FPE_FLTINV, /* 33 - INV | DNML | UFL | IMP */ 688 FPE_FLTDIV, /* 34 - DZ | UFL | IMP */ 689 FPE_FLTINV, /* 35 - INV | DZ | UFL | IMP */ 690 FPE_FLTDIV, /* 36 - DNML | DZ | UFL | IMP */ 691 FPE_FLTINV, /* 37 - INV | DNML | DZ | UFL | IMP */ 692 FPE_FLTOVF, /* 38 - OFL | UFL | IMP */ 693 FPE_FLTINV, /* 39 - INV | OFL | UFL | IMP */ 694 FPE_FLTUND, /* 3A - DNML | OFL | UFL | IMP */ 695 FPE_FLTINV, /* 3B - INV | DNML | OFL | UFL | IMP */ 696 FPE_FLTDIV, /* 3C - DZ | OFL | UFL | IMP */ 697 FPE_FLTINV, /* 3D - INV | DZ | OFL | UFL | IMP */ 698 FPE_FLTDIV, /* 3E - DNML | DZ | OFL | UFL | IMP */ 699 FPE_FLTINV, /* 3F - INV | DNML | DZ | OFL | UFL | IMP */ 700 FPE_FLTSUB, /* 40 - STK */ 701 FPE_FLTSUB, /* 41 - INV | STK */ 702 FPE_FLTUND, /* 42 - DNML | STK */ 703 FPE_FLTSUB, /* 43 - INV | DNML | STK */ 704 FPE_FLTDIV, /* 44 - DZ | STK */ 705 FPE_FLTSUB, /* 45 - INV | DZ | STK */ 706 FPE_FLTDIV, /* 46 - DNML | DZ | STK */ 707 FPE_FLTSUB, /* 47 - INV | DNML | DZ | STK */ 708 FPE_FLTOVF, /* 48 - OFL | STK */ 709 FPE_FLTSUB, /* 49 - INV | OFL | STK */ 710 FPE_FLTUND, /* 4A - DNML | OFL | STK */ 711 FPE_FLTSUB, /* 4B - INV | DNML | OFL | STK */ 712 FPE_FLTDIV, /* 4C - DZ | OFL | STK */ 713 FPE_FLTSUB, /* 4D - INV | DZ | OFL | STK */ 714 FPE_FLTDIV, /* 4E - DNML | DZ | OFL | STK */ 715 FPE_FLTSUB, /* 4F - INV | DNML | DZ | OFL | STK */ 716 FPE_FLTUND, /* 50 - UFL | STK */ 717 FPE_FLTSUB, /* 51 - INV | UFL | STK */ 718 FPE_FLTUND, /* 52 - DNML | UFL | STK */ 719 FPE_FLTSUB, /* 53 - INV | DNML | UFL | STK */ 720 FPE_FLTDIV, /* 54 - DZ | UFL | STK */ 721 FPE_FLTSUB, /* 55 - INV | DZ | UFL | STK */ 722 FPE_FLTDIV, /* 56 - DNML | DZ | UFL | STK */ 723 FPE_FLTSUB, /* 57 - INV | DNML | DZ | UFL | STK */ 724 FPE_FLTOVF, /* 58 - OFL | UFL | STK */ 725 FPE_FLTSUB, /* 59 - INV | OFL | UFL | STK */ 726 FPE_FLTUND, /* 5A - DNML | OFL | UFL | STK */ 727 FPE_FLTSUB, /* 5B - INV | DNML | OFL | UFL | STK */ 728 FPE_FLTDIV, /* 5C - DZ | OFL | UFL | STK */ 729 FPE_FLTSUB, /* 5D - INV | DZ | OFL | UFL | STK */ 730 FPE_FLTDIV, /* 5E - DNML | DZ | OFL | UFL | STK */ 731 FPE_FLTSUB, /* 5F - INV | DNML | DZ | OFL | UFL | STK */ 732 FPE_FLTRES, /* 60 - IMP | STK */ 733 FPE_FLTSUB, /* 61 - INV | IMP | STK */ 734 FPE_FLTUND, /* 62 - DNML | IMP | STK */ 735 FPE_FLTSUB, /* 63 - INV | DNML | IMP | STK */ 736 FPE_FLTDIV, /* 64 - DZ | IMP | STK */ 737 FPE_FLTSUB, /* 65 - INV | DZ | IMP | STK */ 738 FPE_FLTDIV, /* 66 - DNML | DZ | IMP | STK */ 739 FPE_FLTSUB, /* 67 - INV | DNML | DZ | IMP | STK */ 740 FPE_FLTOVF, /* 68 - OFL | IMP | STK */ 741 FPE_FLTSUB, /* 69 - INV | OFL | IMP | STK */ 742 FPE_FLTUND, /* 6A - DNML | OFL | IMP | STK */ 743 FPE_FLTSUB, /* 6B - INV | DNML | OFL | IMP | STK */ 744 FPE_FLTDIV, /* 6C - DZ | OFL | IMP | STK */ 745 FPE_FLTSUB, /* 6D - INV | DZ | OFL | IMP | STK */ 746 FPE_FLTDIV, /* 6E - DNML | DZ | OFL | IMP | STK */ 747 FPE_FLTSUB, /* 6F - INV | DNML | DZ | OFL | IMP | STK */ 748 FPE_FLTUND, /* 70 - UFL | IMP | STK */ 749 FPE_FLTSUB, /* 71 - INV | UFL | IMP | STK */ 750 FPE_FLTUND, /* 72 - DNML | UFL | IMP | STK */ 751 FPE_FLTSUB, /* 73 - INV | DNML | UFL | IMP | STK */ 752 FPE_FLTDIV, /* 74 - DZ | UFL | IMP | STK */ 753 FPE_FLTSUB, /* 75 - INV | DZ | UFL | IMP | STK */ 754 FPE_FLTDIV, /* 76 - DNML | DZ | UFL | IMP | STK */ 755 FPE_FLTSUB, /* 77 - INV | DNML | DZ | UFL | IMP | STK */ 756 FPE_FLTOVF, /* 78 - OFL | UFL | IMP | STK */ 757 FPE_FLTSUB, /* 79 - INV | OFL | UFL | IMP | STK */ 758 FPE_FLTUND, /* 7A - DNML | OFL | UFL | IMP | STK */ 759 FPE_FLTSUB, /* 7B - INV | DNML | OFL | UFL | IMP | STK */ 760 FPE_FLTDIV, /* 7C - DZ | OFL | UFL | IMP | STK */ 761 FPE_FLTSUB, /* 7D - INV | DZ | OFL | UFL | IMP | STK */ 762 FPE_FLTDIV, /* 7E - DNML | DZ | OFL | UFL | IMP | STK */ 763 FPE_FLTSUB, /* 7F - INV | DNML | DZ | OFL | UFL | IMP | STK */ 764}; 765 766/* 767 * Read the FP status and control words, then generate si_code value 768 * for SIGFPE. The error code chosen will be one of the 769 * FPE_... macros. It will be sent as the second argument to old 770 * BSD-style signal handlers and as "siginfo_t->si_code" (second 771 * argument) to SA_SIGINFO signal handlers. 772 * 773 * Some time ago, we cleared the x87 exceptions with FNCLEX there. 774 * Clearing exceptions was necessary mainly to avoid IRQ13 bugs. The 775 * usermode code which understands the FPU hardware enough to enable 776 * the exceptions, can also handle clearing the exception state in the 777 * handler. The only consequence of not clearing the exception is the 778 * rethrow of the SIGFPE on return from the signal handler and 779 * reexecution of the corresponding instruction. 780 * 781 * For XMM traps, the exceptions were never cleared. 782 */ 783int 784npxtrap_x87(void) 785{ 786 u_short control, status; 787 788 if (!hw_float) { 789 printf( 790 "npxtrap_x87: fpcurthread = %p, curthread = %p, hw_float = %d\n", 791 PCPU_GET(fpcurthread), curthread, hw_float); 792 panic("npxtrap from nowhere"); 793 } 794 critical_enter(); 795 796 /* 797 * Interrupt handling (for another interrupt) may have pushed the 798 * state to memory. Fetch the relevant parts of the state from 799 * wherever they are. 800 */ 801 if (PCPU_GET(fpcurthread) != curthread) { 802 control = GET_FPU_CW(curthread); 803 status = GET_FPU_SW(curthread); 804 } else { 805 fnstcw(&control); 806 fnstsw(&status); 807 } 808 critical_exit(); 809 return (fpetable[status & ((~control & 0x3f) | 0x40)]); 810} 811 812#ifdef CPU_ENABLE_SSE 813int 814npxtrap_sse(void) 815{ 816 u_int mxcsr; 817 818 if (!hw_float) { 819 printf( 820 "npxtrap_sse: fpcurthread = %p, curthread = %p, hw_float = %d\n", 821 PCPU_GET(fpcurthread), curthread, hw_float); 822 panic("npxtrap from nowhere"); 823 } 824 critical_enter(); 825 if (PCPU_GET(fpcurthread) != curthread) 826 mxcsr = curthread->td_pcb->pcb_save->sv_xmm.sv_env.en_mxcsr; 827 else 828 stmxcsr(&mxcsr); 829 critical_exit(); 830 return (fpetable[(mxcsr & (~mxcsr >> 7)) & 0x3f]); 831} 832#endif 833 834/* 835 * Implement device not available (DNA) exception 836 * 837 * It would be better to switch FP context here (if curthread != fpcurthread) 838 * and not necessarily for every context switch, but it is too hard to 839 * access foreign pcb's. 840 */ 841 842static int err_count = 0; 843 844int 845npxdna(void) 846{ 847 848 if (!hw_float) 849 return (0); 850 critical_enter(); 851 if (PCPU_GET(fpcurthread) == curthread) { 852 printf("npxdna: fpcurthread == curthread %d times\n", 853 ++err_count); 854 stop_emulating(); 855 critical_exit(); 856 return (1); 857 } 858 if (PCPU_GET(fpcurthread) != NULL) { 859 printf("npxdna: fpcurthread = %p (%d), curthread = %p (%d)\n", 860 PCPU_GET(fpcurthread), 861 PCPU_GET(fpcurthread)->td_proc->p_pid, 862 curthread, curthread->td_proc->p_pid); 863 panic("npxdna"); 864 } 865 stop_emulating(); 866 /* 867 * Record new context early in case frstor causes a trap. 868 */ 869 PCPU_SET(fpcurthread, curthread); 870 871#ifdef CPU_ENABLE_SSE 872 if (cpu_fxsr) 873 fpu_clean_state(); 874#endif 875 876 if ((curpcb->pcb_flags & PCB_NPXINITDONE) == 0) { 877 /* 878 * This is the first time this thread has used the FPU or 879 * the PCB doesn't contain a clean FPU state. Explicitly 880 * load an initial state. 881 * 882 * We prefer to restore the state from the actual save 883 * area in PCB instead of directly loading from 884 * npx_initialstate, to ignite the XSAVEOPT 885 * tracking engine. 886 */ 887 bcopy(npx_initialstate, curpcb->pcb_save, cpu_max_ext_state_size); 888 fpurstor(curpcb->pcb_save); 889 if (curpcb->pcb_initial_npxcw != __INITIAL_NPXCW__) 890 fldcw(curpcb->pcb_initial_npxcw); 891 curpcb->pcb_flags |= PCB_NPXINITDONE; 892 if (PCB_USER_FPU(curpcb)) 893 curpcb->pcb_flags |= PCB_NPXUSERINITDONE; 894 } else { 895 fpurstor(curpcb->pcb_save); 896 } 897 critical_exit(); 898 899 return (1); 900} 901 902/* 903 * Wrapper for fpusave() called from context switch routines. 904 * 905 * npxsave() must be called with interrupts disabled, so that it clears 906 * fpcurthread atomically with saving the state. We require callers to do the 907 * disabling, since most callers need to disable interrupts anyway to call 908 * npxsave() atomically with checking fpcurthread. 909 */ 910void 911npxsave(addr) 912 union savefpu *addr; 913{ 914 915 stop_emulating(); 916#ifdef CPU_ENABLE_SSE 917 if (use_xsaveopt) 918 xsaveopt((char *)addr, xsave_mask); 919 else 920#endif 921 fpusave(addr); 922 start_emulating(); 923 PCPU_SET(fpcurthread, NULL); 924} 925 926/* 927 * Unconditionally save the current co-processor state across suspend and 928 * resume. 929 */ 930void 931npxsuspend(union savefpu *addr) 932{ 933 register_t cr0; 934 935 if (!hw_float) 936 return; 937 if (PCPU_GET(fpcurthread) == NULL) { 938 bcopy(npx_initialstate, addr, cpu_max_ext_state_size); 939 return; 940 } 941 cr0 = rcr0(); 942 stop_emulating(); 943 fpusave(addr); 944 load_cr0(cr0); 945} 946 947void 948npxresume(union savefpu *addr) 949{ 950 register_t cr0; 951 952 if (!hw_float) 953 return; 954 955 cr0 = rcr0(); 956 npxinit(false); 957 stop_emulating(); 958 fpurstor(addr); 959 load_cr0(cr0); 960} 961 962void 963npxdrop() 964{ 965 struct thread *td; 966 967 /* 968 * Discard pending exceptions in the !cpu_fxsr case so that unmasked 969 * ones don't cause a panic on the next frstor. 970 */ 971#ifdef CPU_ENABLE_SSE 972 if (!cpu_fxsr) 973#endif 974 fnclex(); 975 976 td = PCPU_GET(fpcurthread); 977 KASSERT(td == curthread, ("fpudrop: fpcurthread != curthread")); 978 CRITICAL_ASSERT(td); 979 PCPU_SET(fpcurthread, NULL); 980 td->td_pcb->pcb_flags &= ~PCB_NPXINITDONE; 981 start_emulating(); 982} 983 984/* 985 * Get the user state of the FPU into pcb->pcb_user_save without 986 * dropping ownership (if possible). It returns the FPU ownership 987 * status. 988 */ 989int 990npxgetregs(struct thread *td) 991{ 992 struct pcb *pcb; 993#ifdef CPU_ENABLE_SSE 994 uint64_t *xstate_bv, bit; 995 char *sa; 996 int max_ext_n, i; 997#endif 998 int owned; 999 1000 if (!hw_float) 1001 return (_MC_FPOWNED_NONE); 1002 1003 pcb = td->td_pcb; 1004 if ((pcb->pcb_flags & PCB_NPXINITDONE) == 0) { 1005 bcopy(npx_initialstate, get_pcb_user_save_pcb(pcb), 1006 cpu_max_ext_state_size); 1007 SET_FPU_CW(get_pcb_user_save_pcb(pcb), pcb->pcb_initial_npxcw); 1008 npxuserinited(td); 1009 return (_MC_FPOWNED_PCB); 1010 } 1011 critical_enter(); 1012 if (td == PCPU_GET(fpcurthread)) { 1013 fpusave(get_pcb_user_save_pcb(pcb)); 1014#ifdef CPU_ENABLE_SSE 1015 if (!cpu_fxsr) 1016#endif 1017 /* 1018 * fnsave initializes the FPU and destroys whatever 1019 * context it contains. Make sure the FPU owner 1020 * starts with a clean state next time. 1021 */ 1022 npxdrop(); 1023 owned = _MC_FPOWNED_FPU; 1024 } else { 1025 owned = _MC_FPOWNED_PCB; 1026 } 1027 critical_exit(); 1028#ifdef CPU_ENABLE_SSE 1029 if (use_xsave) { 1030 /* 1031 * Handle partially saved state. 1032 */ 1033 sa = (char *)get_pcb_user_save_pcb(pcb); 1034 xstate_bv = (uint64_t *)(sa + sizeof(union savefpu) + 1035 offsetof(struct xstate_hdr, xstate_bv)); 1036 if (xsave_mask >> 32 != 0) 1037 max_ext_n = fls(xsave_mask >> 32) + 32; 1038 else 1039 max_ext_n = fls(xsave_mask); 1040 for (i = 0; i < max_ext_n; i++) { 1041 bit = 1ULL << i; 1042 if ((xsave_mask & bit) == 0 || (*xstate_bv & bit) != 0) 1043 continue; 1044 bcopy((char *)npx_initialstate + 1045 xsave_area_desc[i].offset, 1046 sa + xsave_area_desc[i].offset, 1047 xsave_area_desc[i].size); 1048 *xstate_bv |= bit; 1049 } 1050 } 1051#endif 1052 return (owned); 1053} 1054 1055void 1056npxuserinited(struct thread *td) 1057{ 1058 struct pcb *pcb; 1059 1060 pcb = td->td_pcb; 1061 if (PCB_USER_FPU(pcb)) 1062 pcb->pcb_flags |= PCB_NPXINITDONE; 1063 pcb->pcb_flags |= PCB_NPXUSERINITDONE; 1064} 1065 1066#ifdef CPU_ENABLE_SSE 1067int 1068npxsetxstate(struct thread *td, char *xfpustate, size_t xfpustate_size) 1069{ 1070 struct xstate_hdr *hdr, *ehdr; 1071 size_t len, max_len; 1072 uint64_t bv; 1073 1074 /* XXXKIB should we clear all extended state in xstate_bv instead ? */ 1075 if (xfpustate == NULL) 1076 return (0); 1077 if (!use_xsave) 1078 return (EOPNOTSUPP); 1079 1080 len = xfpustate_size; 1081 if (len < sizeof(struct xstate_hdr)) 1082 return (EINVAL); 1083 max_len = cpu_max_ext_state_size - sizeof(union savefpu); 1084 if (len > max_len) 1085 return (EINVAL); 1086 1087 ehdr = (struct xstate_hdr *)xfpustate; 1088 bv = ehdr->xstate_bv; 1089 1090 /* 1091 * Avoid #gp. 1092 */ 1093 if (bv & ~xsave_mask) 1094 return (EINVAL); 1095 1096 hdr = (struct xstate_hdr *)(get_pcb_user_save_td(td) + 1); 1097 1098 hdr->xstate_bv = bv; 1099 bcopy(xfpustate + sizeof(struct xstate_hdr), 1100 (char *)(hdr + 1), len - sizeof(struct xstate_hdr)); 1101 1102 return (0); 1103} 1104#endif 1105 1106int 1107npxsetregs(struct thread *td, union savefpu *addr, char *xfpustate, 1108 size_t xfpustate_size) 1109{ 1110 struct pcb *pcb; 1111#ifdef CPU_ENABLE_SSE 1112 int error; 1113#endif 1114 1115 if (!hw_float) 1116 return (ENXIO); 1117 1118 pcb = td->td_pcb; 1119 critical_enter(); 1120 if (td == PCPU_GET(fpcurthread) && PCB_USER_FPU(pcb)) { 1121#ifdef CPU_ENABLE_SSE 1122 error = npxsetxstate(td, xfpustate, xfpustate_size); 1123 if (error != 0) { 1124 critical_exit(); 1125 return (error); 1126 } 1127 if (!cpu_fxsr) 1128#endif 1129 fnclex(); /* As in npxdrop(). */ 1130 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr)); 1131 fpurstor(get_pcb_user_save_td(td)); 1132 critical_exit(); 1133 pcb->pcb_flags |= PCB_NPXUSERINITDONE | PCB_NPXINITDONE; 1134 } else { 1135 critical_exit(); 1136#ifdef CPU_ENABLE_SSE 1137 error = npxsetxstate(td, xfpustate, xfpustate_size); 1138 if (error != 0) 1139 return (error); 1140#endif 1141 bcopy(addr, get_pcb_user_save_td(td), sizeof(*addr)); 1142 npxuserinited(td); 1143 } 1144 return (0); 1145} 1146 1147static void 1148fpusave(addr) 1149 union savefpu *addr; 1150{ 1151 1152#ifdef CPU_ENABLE_SSE 1153 if (use_xsave) 1154 xsave((char *)addr, xsave_mask); 1155 else if (cpu_fxsr) 1156 fxsave(addr); 1157 else 1158#endif 1159 fnsave(addr); 1160} 1161 1162#ifdef CPU_ENABLE_SSE 1163static void 1164npx_fill_fpregs_xmm1(struct savexmm *sv_xmm, struct save87 *sv_87) 1165{ 1166 struct env87 *penv_87; 1167 struct envxmm *penv_xmm; 1168 int i; 1169 1170 penv_87 = &sv_87->sv_env; 1171 penv_xmm = &sv_xmm->sv_env; 1172 1173 /* FPU control/status */ 1174 penv_87->en_cw = penv_xmm->en_cw; 1175 penv_87->en_sw = penv_xmm->en_sw; 1176 penv_87->en_tw = penv_xmm->en_tw; 1177 penv_87->en_fip = penv_xmm->en_fip; 1178 penv_87->en_fcs = penv_xmm->en_fcs; 1179 penv_87->en_opcode = penv_xmm->en_opcode; 1180 penv_87->en_foo = penv_xmm->en_foo; 1181 penv_87->en_fos = penv_xmm->en_fos; 1182 1183 /* FPU registers */ 1184 for (i = 0; i < 8; ++i) 1185 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc; 1186} 1187 1188void 1189npx_fill_fpregs_xmm(struct savexmm *sv_xmm, struct save87 *sv_87) 1190{ 1191 1192 bzero(sv_87, sizeof(*sv_87)); 1193 npx_fill_fpregs_xmm1(sv_xmm, sv_87); 1194} 1195 1196void 1197npx_set_fpregs_xmm(struct save87 *sv_87, struct savexmm *sv_xmm) 1198{ 1199 struct env87 *penv_87; 1200 struct envxmm *penv_xmm; 1201 int i; 1202 1203 penv_87 = &sv_87->sv_env; 1204 penv_xmm = &sv_xmm->sv_env; 1205 1206 /* FPU control/status */ 1207 penv_xmm->en_cw = penv_87->en_cw; 1208 penv_xmm->en_sw = penv_87->en_sw; 1209 penv_xmm->en_tw = penv_87->en_tw; 1210 penv_xmm->en_fip = penv_87->en_fip; 1211 penv_xmm->en_fcs = penv_87->en_fcs; 1212 penv_xmm->en_opcode = penv_87->en_opcode; 1213 penv_xmm->en_foo = penv_87->en_foo; 1214 penv_xmm->en_fos = penv_87->en_fos; 1215 1216 /* FPU registers */ 1217 for (i = 0; i < 8; ++i) 1218 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i]; 1219} 1220#endif /* CPU_ENABLE_SSE */ 1221 1222void 1223npx_get_fsave(void *addr) 1224{ 1225 struct thread *td; 1226 union savefpu *sv; 1227 1228 td = curthread; 1229 npxgetregs(td); 1230 sv = get_pcb_user_save_td(td); 1231#ifdef CPU_ENABLE_SSE 1232 if (cpu_fxsr) 1233 npx_fill_fpregs_xmm1(&sv->sv_xmm, addr); 1234 else 1235#endif 1236 bcopy(sv, addr, sizeof(struct env87) + 1237 sizeof(struct fpacc87[8])); 1238} 1239 1240int 1241npx_set_fsave(void *addr) 1242{ 1243 union savefpu sv; 1244 int error; 1245 1246 bzero(&sv, sizeof(sv)); 1247#ifdef CPU_ENABLE_SSE 1248 if (cpu_fxsr) 1249 npx_set_fpregs_xmm(addr, &sv.sv_xmm); 1250 else 1251#endif 1252 bcopy(addr, &sv, sizeof(struct env87) + 1253 sizeof(struct fpacc87[8])); 1254 error = npxsetregs(curthread, &sv, NULL, 0); 1255 return (error); 1256} 1257 1258#ifdef CPU_ENABLE_SSE 1259/* 1260 * On AuthenticAMD processors, the fxrstor instruction does not restore 1261 * the x87's stored last instruction pointer, last data pointer, and last 1262 * opcode values, except in the rare case in which the exception summary 1263 * (ES) bit in the x87 status word is set to 1. 1264 * 1265 * In order to avoid leaking this information across processes, we clean 1266 * these values by performing a dummy load before executing fxrstor(). 1267 */ 1268static void 1269fpu_clean_state(void) 1270{ 1271 static float dummy_variable = 0.0; 1272 u_short status; 1273 1274 /* 1275 * Clear the ES bit in the x87 status word if it is currently 1276 * set, in order to avoid causing a fault in the upcoming load. 1277 */ 1278 fnstsw(&status); 1279 if (status & 0x80) 1280 fnclex(); 1281 1282 /* 1283 * Load the dummy variable into the x87 stack. This mangles 1284 * the x87 stack, but we don't care since we're about to call 1285 * fxrstor() anyway. 1286 */ 1287 __asm __volatile("ffree %%st(7); flds %0" : : "m" (dummy_variable)); 1288} 1289#endif /* CPU_ENABLE_SSE */ 1290 1291static void 1292fpurstor(addr) 1293 union savefpu *addr; 1294{ 1295 1296#ifdef CPU_ENABLE_SSE 1297 if (use_xsave) 1298 xrstor((char *)addr, xsave_mask); 1299 else if (cpu_fxsr) 1300 fxrstor(addr); 1301 else 1302#endif 1303 frstor(addr); 1304} 1305 1306#ifdef DEV_ISA 1307/* 1308 * This sucks up the legacy ISA support assignments from PNPBIOS/ACPI. 1309 */ 1310static struct isa_pnp_id npxisa_ids[] = { 1311 { 0x040cd041, "Legacy ISA coprocessor support" }, /* PNP0C04 */ 1312 { 0 } 1313}; 1314 1315static int 1316npxisa_probe(device_t dev) 1317{ 1318 int result; 1319 if ((result = ISA_PNP_PROBE(device_get_parent(dev), dev, npxisa_ids)) <= 0) { 1320 device_quiet(dev); 1321 } 1322 return(result); 1323} 1324 1325static int 1326npxisa_attach(device_t dev) 1327{ 1328 return (0); 1329} 1330 1331static device_method_t npxisa_methods[] = { 1332 /* Device interface */ 1333 DEVMETHOD(device_probe, npxisa_probe), 1334 DEVMETHOD(device_attach, npxisa_attach), 1335 DEVMETHOD(device_detach, bus_generic_detach), 1336 DEVMETHOD(device_shutdown, bus_generic_shutdown), 1337 DEVMETHOD(device_suspend, bus_generic_suspend), 1338 DEVMETHOD(device_resume, bus_generic_resume), 1339 1340 { 0, 0 } 1341}; 1342 1343static driver_t npxisa_driver = { 1344 "npxisa", 1345 npxisa_methods, 1346 1, /* no softc */ 1347}; 1348 1349static devclass_t npxisa_devclass; 1350 1351DRIVER_MODULE(npxisa, isa, npxisa_driver, npxisa_devclass, 0, 0); 1352#ifndef PC98 1353DRIVER_MODULE(npxisa, acpi, npxisa_driver, npxisa_devclass, 0, 0); 1354#endif 1355#endif /* DEV_ISA */ 1356 1357static MALLOC_DEFINE(M_FPUKERN_CTX, "fpukern_ctx", 1358 "Kernel contexts for FPU state"); 1359 1360#define FPU_KERN_CTX_NPXINITDONE 0x01 1361#define FPU_KERN_CTX_DUMMY 0x02 1362#define FPU_KERN_CTX_INUSE 0x04 1363 1364struct fpu_kern_ctx { 1365 union savefpu *prev; 1366 uint32_t flags; 1367 char hwstate1[]; 1368}; 1369 1370struct fpu_kern_ctx * 1371fpu_kern_alloc_ctx(u_int flags) 1372{ 1373 struct fpu_kern_ctx *res; 1374 size_t sz; 1375 1376 sz = sizeof(struct fpu_kern_ctx) + XSAVE_AREA_ALIGN + 1377 cpu_max_ext_state_size; 1378 res = malloc(sz, M_FPUKERN_CTX, ((flags & FPU_KERN_NOWAIT) ? 1379 M_NOWAIT : M_WAITOK) | M_ZERO); 1380 return (res); 1381} 1382 1383void 1384fpu_kern_free_ctx(struct fpu_kern_ctx *ctx) 1385{ 1386 1387 KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) == 0, ("free'ing inuse ctx")); 1388 /* XXXKIB clear the memory ? */ 1389 free(ctx, M_FPUKERN_CTX); 1390} 1391 1392static union savefpu * 1393fpu_kern_ctx_savefpu(struct fpu_kern_ctx *ctx) 1394{ 1395 vm_offset_t p; 1396 1397 p = (vm_offset_t)&ctx->hwstate1; 1398 p = roundup2(p, XSAVE_AREA_ALIGN); 1399 return ((union savefpu *)p); 1400} 1401 1402int 1403fpu_kern_enter(struct thread *td, struct fpu_kern_ctx *ctx, u_int flags) 1404{ 1405 struct pcb *pcb; 1406 1407 KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) == 0, ("using inuse ctx")); 1408 1409 if ((flags & FPU_KERN_KTHR) != 0 && is_fpu_kern_thread(0)) { 1410 ctx->flags = FPU_KERN_CTX_DUMMY | FPU_KERN_CTX_INUSE; 1411 return (0); 1412 } 1413 pcb = td->td_pcb; 1414 KASSERT(!PCB_USER_FPU(pcb) || pcb->pcb_save == 1415 get_pcb_user_save_pcb(pcb), ("mangled pcb_save")); 1416 ctx->flags = FPU_KERN_CTX_INUSE; 1417 if ((pcb->pcb_flags & PCB_NPXINITDONE) != 0) 1418 ctx->flags |= FPU_KERN_CTX_NPXINITDONE; 1419 npxexit(td); 1420 ctx->prev = pcb->pcb_save; 1421 pcb->pcb_save = fpu_kern_ctx_savefpu(ctx); 1422 pcb->pcb_flags |= PCB_KERNNPX; 1423 pcb->pcb_flags &= ~PCB_NPXINITDONE; 1424 return (0); 1425} 1426 1427int 1428fpu_kern_leave(struct thread *td, struct fpu_kern_ctx *ctx) 1429{ 1430 struct pcb *pcb; 1431 1432 KASSERT((ctx->flags & FPU_KERN_CTX_INUSE) != 0, 1433 ("leaving not inuse ctx")); 1434 ctx->flags &= ~FPU_KERN_CTX_INUSE; 1435 1436 if (is_fpu_kern_thread(0) && (ctx->flags & FPU_KERN_CTX_DUMMY) != 0) 1437 return (0); 1438 pcb = td->td_pcb; 1439 critical_enter(); 1440 if (curthread == PCPU_GET(fpcurthread)) 1441 npxdrop(); 1442 critical_exit(); 1443 pcb->pcb_save = ctx->prev; 1444 if (pcb->pcb_save == get_pcb_user_save_pcb(pcb)) { 1445 if ((pcb->pcb_flags & PCB_NPXUSERINITDONE) != 0) 1446 pcb->pcb_flags |= PCB_NPXINITDONE; 1447 else 1448 pcb->pcb_flags &= ~PCB_NPXINITDONE; 1449 pcb->pcb_flags &= ~PCB_KERNNPX; 1450 } else { 1451 if ((ctx->flags & FPU_KERN_CTX_NPXINITDONE) != 0) 1452 pcb->pcb_flags |= PCB_NPXINITDONE; 1453 else 1454 pcb->pcb_flags &= ~PCB_NPXINITDONE; 1455 KASSERT(!PCB_USER_FPU(pcb), ("unpaired fpu_kern_leave")); 1456 } 1457 return (0); 1458} 1459 1460int 1461fpu_kern_thread(u_int flags) 1462{ 1463 1464 KASSERT((curthread->td_pflags & TDP_KTHREAD) != 0, 1465 ("Only kthread may use fpu_kern_thread")); 1466 KASSERT(curpcb->pcb_save == get_pcb_user_save_pcb(curpcb), 1467 ("mangled pcb_save")); 1468 KASSERT(PCB_USER_FPU(curpcb), ("recursive call")); 1469 1470 curpcb->pcb_flags |= PCB_KERNNPX; 1471 return (0); 1472} 1473 1474int 1475is_fpu_kern_thread(u_int flags) 1476{ 1477 1478 if ((curthread->td_pflags & TDP_KTHREAD) == 0) 1479 return (0); 1480 return ((curpcb->pcb_flags & PCB_KERNNPX) != 0); 1481} 1482 1483/* 1484 * FPU save area alloc/free/init utility routines 1485 */ 1486union savefpu * 1487fpu_save_area_alloc(void) 1488{ 1489 1490 return (uma_zalloc(fpu_save_area_zone, 0)); 1491} 1492 1493void 1494fpu_save_area_free(union savefpu *fsa) 1495{ 1496 1497 uma_zfree(fpu_save_area_zone, fsa); 1498} 1499 1500void 1501fpu_save_area_reset(union savefpu *fsa) 1502{ 1503 1504 bcopy(npx_initialstate, fsa, cpu_max_ext_state_size); 1505} 1506