exception.S revision 302041
119370Spst/*- 219370Spst * Copyright (c) 1989, 1990 William F. Jolitz. 398944Sobrien * Copyright (c) 1990 The Regents of the University of California. 4130803Smarcel * Copyright (c) 2007 The FreeBSD Foundation 5130803Smarcel * All rights reserved. 619370Spst * 798944Sobrien * Portions of this software were developed by A. Joseph Koshy under 819370Spst * sponsorship from the FreeBSD Foundation and Google, Inc. 998944Sobrien * 1098944Sobrien * Redistribution and use in source and binary forms, with or without 1198944Sobrien * modification, are permitted provided that the following conditions 1298944Sobrien * are met: 1319370Spst * 1. Redistributions of source code must retain the above copyright 1498944Sobrien * notice, this list of conditions and the following disclaimer. 1598944Sobrien * 2. Redistributions in binary form must reproduce the above copyright 1698944Sobrien * notice, this list of conditions and the following disclaimer in the 1798944Sobrien * documentation and/or other materials provided with the distribution. 1819370Spst * 4. Neither the name of the University nor the names of its contributors 1998944Sobrien * may be used to endorse or promote products derived from this software 2098944Sobrien * without specific prior written permission. 2198944Sobrien * 2298944Sobrien * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 2398944Sobrien * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 2446283Sdfr * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 2519370Spst * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 2619370Spst * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2719370Spst * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2819370Spst * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2919370Spst * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 3019370Spst * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 3119370Spst * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 3219370Spst * SUCH DAMAGE. 3319370Spst * 3419370Spst * $FreeBSD: stable/10/sys/amd64/amd64/exception.S 302041 2016-06-21 04:51:55Z sephe $ 3519370Spst */ 36130803Smarcel 3719370Spst#include "opt_atpic.h" 3819370Spst#include "opt_compat.h" 3919370Spst#include "opt_hwpmc_hooks.h" 4019370Spst#include "opt_kdtrace.h" 4198944Sobrien 42130803Smarcel#include <machine/asmacros.h> 43130803Smarcel#include <machine/psl.h> 44130803Smarcel#include <machine/trap.h> 45130803Smarcel#include <machine/specialreg.h> 46130803Smarcel 47130803Smarcel#include "assym.s" 4819370Spst 4946283Sdfr#ifdef KDTRACE_HOOKS 5046283Sdfr .bss 5198944Sobrien .globl dtrace_invop_jump_addr 5246283Sdfr .align 8 5398944Sobrien .type dtrace_invop_jump_addr,@object 5446283Sdfr .size dtrace_invop_jump_addr,8 5598944Sobriendtrace_invop_jump_addr: 5646283Sdfr .zero 8 5798944Sobrien .globl dtrace_invop_calltrap_addr 5846283Sdfr .align 8 5946283Sdfr .type dtrace_invop_calltrap_addr,@object 6046283Sdfr .size dtrace_invop_calltrap_addr,8 6198944Sobriendtrace_invop_calltrap_addr: 6219370Spst .zero 8 6398944Sobrien#endif 6446283Sdfr .text 6598944Sobrien#ifdef HWPMC_HOOKS 6619370Spst ENTRY(start_exceptions) 6798944Sobrien#endif 6819370Spst 6998944Sobrien/*****************************************************************************/ 7046283Sdfr/* Trap handling */ 7198944Sobrien/*****************************************************************************/ 7219370Spst/* 7398944Sobrien * Trap and fault vector routines. 7419370Spst * 7598944Sobrien * All traps are 'interrupt gates', SDT_SYSIGT. An interrupt gate pushes 7619370Spst * state on the stack but also disables interrupts. This is important for 7798944Sobrien * us for the use of the swapgs instruction. We cannot be interrupted 7819370Spst * until the GS.base value is correct. For most traps, we automatically 7998944Sobrien * then enable interrupts if the interrupted context had them enabled. 8019370Spst * This is equivalent to the i386 port's use of SDT_SYS386TGT. 8198944Sobrien * 8219370Spst * The cpu will push a certain amount of state onto the kernel stack for 8398944Sobrien * the current process. See amd64/include/frame.h. 8498944Sobrien * This includes the current RFLAGS (status register, which includes 8519370Spst * the interrupt disable state prior to the trap), the code segment register, 8698944Sobrien * and the return instruction pointer are pushed by the cpu. The cpu 8798944Sobrien * will also push an 'error' code for certain traps. We push a dummy 8819370Spst * error code for those traps where the cpu doesn't in order to maintain 8998944Sobrien * a consistent frame. We also push a contrived 'trap number'. 9098944Sobrien * 9119370Spst * The CPU does not push the general registers, so we must do that, and we 9298944Sobrien * must restore them prior to calling 'iret'. The CPU adjusts %cs and %ss 9398944Sobrien * but does not mess with %ds, %es, %gs or %fs. We swap the %gs base for 9498944Sobrien * for the kernel mode operation shortly, without changes to the selector 9598944Sobrien * loaded. Since superuser long mode works with any selectors loaded into 9646283Sdfr * segment registers other then %cs, which makes them mostly unused in long 9798944Sobrien * mode, and kernel does not reference %fs, leave them alone. The segment 9898944Sobrien * registers are reloaded on return to the usermode. 9998944Sobrien */ 10098944Sobrien 10198944SobrienMCOUNT_LABEL(user) 10219370SpstMCOUNT_LABEL(btrap) 10398944Sobrien 10498944Sobrien/* Traps that we leave interrupts disabled for.. */ 10598944Sobrien#define TRAP_NOEN(a) \ 10698944Sobrien subq $TF_RIP,%rsp; \ 10798944Sobrien movl $(a),TF_TRAPNO(%rsp) ; \ 10898944Sobrien movq $0,TF_ADDR(%rsp) ; \ 10919370Spst movq $0,TF_ERR(%rsp) ; \ 11019370Spst jmp alltraps_noen 11119370SpstIDTVEC(dbg) 11219370Spst TRAP_NOEN(T_TRCTRAP) 11319370SpstIDTVEC(bpt) 11419370Spst TRAP_NOEN(T_BPTFLT) 11519370Spst#ifdef KDTRACE_HOOKS 11619370SpstIDTVEC(dtrace_ret) 11719370Spst TRAP_NOEN(T_DTRACE_RET) 11819370Spst#endif 11919370Spst 12098944Sobrien/* Regular traps; The cpu does not supply tf_err for these. */ 12198944Sobrien#define TRAP(a) \ 12298944Sobrien subq $TF_RIP,%rsp; \ 12398944Sobrien movl $(a),TF_TRAPNO(%rsp) ; \ 12498944Sobrien movq $0,TF_ADDR(%rsp) ; \ 12598944Sobrien movq $0,TF_ERR(%rsp) ; \ 12698944Sobrien jmp alltraps 12719370SpstIDTVEC(div) 12846283Sdfr TRAP(T_DIVIDE) 12946283SdfrIDTVEC(ofl) 13098944Sobrien TRAP(T_OFLOW) 13146283SdfrIDTVEC(bnd) 13298944Sobrien TRAP(T_BOUND) 13346283SdfrIDTVEC(ill) 13498944Sobrien TRAP(T_PRIVINFLT) 13546283SdfrIDTVEC(dna) 136130803Smarcel TRAP(T_DNA) 13746283SdfrIDTVEC(fpusegm) 13846283Sdfr TRAP(T_FPOPFLT) 13946283SdfrIDTVEC(mchk) 140130803Smarcel TRAP(T_MCHK) 14119370SpstIDTVEC(rsvd) 14219370Spst TRAP(T_RESERVED) 14319370SpstIDTVEC(fpu) 14419370Spst TRAP(T_ARITHTRAP) 14519370SpstIDTVEC(xmm) 14619370Spst TRAP(T_XMMFLT) 14719370Spst 14819370Spst/* This group of traps have tf_err already pushed by the cpu */ 14919370Spst#define TRAP_ERR(a) \ 150130803Smarcel subq $TF_ERR,%rsp; \ 151130803Smarcel movl $(a),TF_TRAPNO(%rsp) ; \ 15246283Sdfr movq $0,TF_ADDR(%rsp) ; \ 15346283Sdfr jmp alltraps 15446283SdfrIDTVEC(tss) 15546283Sdfr TRAP_ERR(T_TSSFLT) 15646283SdfrIDTVEC(missing) 15746283Sdfr subq $TF_ERR,%rsp 15846283Sdfr movl $T_SEGNPFLT,TF_TRAPNO(%rsp) 15946283Sdfr jmp prot_addrf 160130803SmarcelIDTVEC(stk) 161130803Smarcel subq $TF_ERR,%rsp 162130803Smarcel movl $T_STKFLT,TF_TRAPNO(%rsp) 163130803Smarcel jmp prot_addrf 164130803SmarcelIDTVEC(align) 165130803Smarcel TRAP_ERR(T_ALIGNFLT) 166130803Smarcel 167130803Smarcel /* 168130803Smarcel * alltraps entry point. Use swapgs if this is the first time in the 169130803Smarcel * kernel from userland. Reenable interrupts if they were enabled 170130803Smarcel * before the trap. This approximates SDT_SYS386TGT on the i386 port. 171130803Smarcel */ 172130803Smarcel SUPERALIGN_TEXT 173130803Smarcel .globl alltraps 174130803Smarcel .type alltraps,@function 175130803Smarcelalltraps: 176130803Smarcel movq %rdi,TF_RDI(%rsp) 177130803Smarcel testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */ 178130803Smarcel jz alltraps_testi /* already running with kernel GS.base */ 179130803Smarcel swapgs 180130803Smarcel movq PCPU(CURPCB),%rdi 181130803Smarcel andl $~PCB_FULL_IRET,PCB_FLAGS(%rdi) 182130803Smarcel movw %fs,TF_FS(%rsp) 183130803Smarcel movw %gs,TF_GS(%rsp) 184130803Smarcel movw %es,TF_ES(%rsp) 185130803Smarcel movw %ds,TF_DS(%rsp) 186130803Smarcelalltraps_testi: 187130803Smarcel testl $PSL_I,TF_RFLAGS(%rsp) 188130803Smarcel jz alltraps_pushregs_no_rdi 189130803Smarcel sti 190130803Smarcelalltraps_pushregs_no_rdi: 191130803Smarcel movq %rsi,TF_RSI(%rsp) 192130803Smarcel movq %rdx,TF_RDX(%rsp) 193130803Smarcel movq %rcx,TF_RCX(%rsp) 194130803Smarcel movq %r8,TF_R8(%rsp) 195130803Smarcel movq %r9,TF_R9(%rsp) 196130803Smarcel movq %rax,TF_RAX(%rsp) 197130803Smarcel movq %rbx,TF_RBX(%rsp) 198130803Smarcel movq %rbp,TF_RBP(%rsp) 19946283Sdfr movq %r10,TF_R10(%rsp) 20046283Sdfr movq %r11,TF_R11(%rsp) 201130803Smarcel movq %r12,TF_R12(%rsp) 202130803Smarcel movq %r13,TF_R13(%rsp) 203130803Smarcel movq %r14,TF_R14(%rsp) 20446283Sdfr movq %r15,TF_R15(%rsp) 205130803Smarcel movl $TF_HASSEGS,TF_FLAGS(%rsp) 206130803Smarcel cld 207130803Smarcel FAKE_MCOUNT(TF_RIP(%rsp)) 20846283Sdfr#ifdef KDTRACE_HOOKS 209130803Smarcel /* 210130803Smarcel * DTrace Function Boundary Trace (fbt) probes are triggered 211130803Smarcel * by int3 (0xcc) which causes the #BP (T_BPTFLT) breakpoint 21246283Sdfr * interrupt. For all other trap types, just handle them in 213130803Smarcel * the usual way. 214130803Smarcel */ 215130803Smarcel testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */ 216130803Smarcel jnz calltrap /* ignore userland traps */ 217130803Smarcel cmpl $T_BPTFLT,TF_TRAPNO(%rsp) 218130803Smarcel jne calltrap 219130803Smarcel 220130803Smarcel /* Check if there is no DTrace hook registered. */ 221130803Smarcel cmpq $0,dtrace_invop_jump_addr 222130803Smarcel je calltrap 223130803Smarcel 224130803Smarcel /* 225130803Smarcel * Set our jump address for the jump back in the event that 226130803Smarcel * the breakpoint wasn't caused by DTrace at all. 22746283Sdfr */ 228130803Smarcel movq $calltrap,dtrace_invop_calltrap_addr(%rip) 229130803Smarcel 23046283Sdfr /* Jump to the code hooked in by DTrace. */ 231130803Smarcel movq dtrace_invop_jump_addr,%rax 232130803Smarcel jmpq *dtrace_invop_jump_addr 233130803Smarcel#endif 23446283Sdfr .globl calltrap 235130803Smarcel .type calltrap,@function 236130803Smarcelcalltrap: 237130803Smarcel movq %rsp,%rdi 23846283Sdfr call trap 239130803Smarcel MEXITCOUNT 240130803Smarcel jmp doreti /* Handle any pending ASTs */ 24146283Sdfr 242130803Smarcel /* 243130803Smarcel * alltraps_noen entry point. Unlike alltraps above, we want to 244130803Smarcel * leave the interrupts disabled. This corresponds to 245130803Smarcel * SDT_SYS386IGT on the i386 port. 246130803Smarcel */ 247130803Smarcel SUPERALIGN_TEXT 248130803Smarcel .globl alltraps_noen 24946283Sdfr .type alltraps_noen,@function 250130803Smarcelalltraps_noen: 251130803Smarcel movq %rdi,TF_RDI(%rsp) 252130803Smarcel testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */ 253130803Smarcel jz 1f /* already running with kernel GS.base */ 254130803Smarcel swapgs 25519370Spst movq PCPU(CURPCB),%rdi 256130803Smarcel andl $~PCB_FULL_IRET,PCB_FLAGS(%rdi) 257130803Smarcel1: movw %fs,TF_FS(%rsp) 258130803Smarcel movw %gs,TF_GS(%rsp) 259130803Smarcel movw %es,TF_ES(%rsp) 26019370Spst movw %ds,TF_DS(%rsp) 261130803Smarcel jmp alltraps_pushregs_no_rdi 262130803Smarcel 263130803SmarcelIDTVEC(dblfault) 26419370Spst subq $TF_ERR,%rsp 265130803Smarcel movl $T_DOUBLEFLT,TF_TRAPNO(%rsp) 266130803Smarcel movq $0,TF_ADDR(%rsp) 267130803Smarcel movq $0,TF_ERR(%rsp) 268130803Smarcel movq %rdi,TF_RDI(%rsp) 269130803Smarcel movq %rsi,TF_RSI(%rsp) 270130803Smarcel movq %rdx,TF_RDX(%rsp) 271130803Smarcel movq %rcx,TF_RCX(%rsp) 272130803Smarcel movq %r8,TF_R8(%rsp) 27346283Sdfr movq %r9,TF_R9(%rsp) 274130803Smarcel movq %rax,TF_RAX(%rsp) 275130803Smarcel movq %rbx,TF_RBX(%rsp) 276130803Smarcel movq %rbp,TF_RBP(%rsp) 277130803Smarcel movq %r10,TF_R10(%rsp) 27846283Sdfr movq %r11,TF_R11(%rsp) 279130803Smarcel movq %r12,TF_R12(%rsp) 280130803Smarcel movq %r13,TF_R13(%rsp) 281130803Smarcel movq %r14,TF_R14(%rsp) 282130803Smarcel movq %r15,TF_R15(%rsp) 283130803Smarcel movw %fs,TF_FS(%rsp) 284130803Smarcel movw %gs,TF_GS(%rsp) 285130803Smarcel movw %es,TF_ES(%rsp) 286130803Smarcel movw %ds,TF_DS(%rsp) 287130803Smarcel movl $TF_HASSEGS,TF_FLAGS(%rsp) 288130803Smarcel cld 289130803Smarcel testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */ 290130803Smarcel jz 1f /* already running with kernel GS.base */ 29146283Sdfr swapgs 292130803Smarcel1: 293130803Smarcel movq %rsp,%rdi 294130803Smarcel call dblfault_handler 295130803Smarcel2: 296130803Smarcel hlt 297130803Smarcel jmp 2b 298130803Smarcel 299130803SmarcelIDTVEC(page) 300130803Smarcel subq $TF_ERR,%rsp 301130803Smarcel movl $T_PAGEFLT,TF_TRAPNO(%rsp) 302130803Smarcel movq %rdi,TF_RDI(%rsp) /* free up a GP register */ 303130803Smarcel testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */ 30446283Sdfr jz 1f /* already running with kernel GS.base */ 305130803Smarcel swapgs 306130803Smarcel movq PCPU(CURPCB),%rdi 307130803Smarcel andl $~PCB_FULL_IRET,PCB_FLAGS(%rdi) 308130803Smarcel1: movq %cr2,%rdi /* preserve %cr2 before .. */ 309130803Smarcel movq %rdi,TF_ADDR(%rsp) /* enabling interrupts. */ 310130803Smarcel movw %fs,TF_FS(%rsp) 31146283Sdfr movw %gs,TF_GS(%rsp) 312130803Smarcel movw %es,TF_ES(%rsp) 313130803Smarcel movw %ds,TF_DS(%rsp) 314130803Smarcel testl $PSL_I,TF_RFLAGS(%rsp) 315130803Smarcel jz alltraps_pushregs_no_rdi 316130803Smarcel sti 317130803Smarcel jmp alltraps_pushregs_no_rdi 31819370Spst 319130803Smarcel /* 320130803Smarcel * We have to special-case this one. If we get a trap in doreti() at 321130803Smarcel * the iretq stage, we'll reenter with the wrong gs state. We'll have 322130803Smarcel * to do a special the swapgs in this case even coming from the kernel. 323130803Smarcel * XXX linux has a trap handler for their equivalent of load_gs(). 32419370Spst */ 325130803SmarcelIDTVEC(prot) 326130803Smarcel subq $TF_ERR,%rsp 327130803Smarcel movl $T_PROTFLT,TF_TRAPNO(%rsp) 328130803Smarcelprot_addrf: 329130803Smarcel movq $0,TF_ADDR(%rsp) 330130803Smarcel movq %rdi,TF_RDI(%rsp) /* free up a GP register */ 331130803Smarcel leaq doreti_iret(%rip),%rdi 332130803Smarcel cmpq %rdi,TF_RIP(%rsp) 333130803Smarcel je 1f /* kernel but with user gsbase!! */ 334130803Smarcel testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */ 335130803Smarcel jz 2f /* already running with kernel GS.base */ 336130803Smarcel1: swapgs 337130803Smarcel2: movq PCPU(CURPCB),%rdi 338130803Smarcel orl $PCB_FULL_IRET,PCB_FLAGS(%rdi) /* always full iret from GPF */ 339130803Smarcel movw %fs,TF_FS(%rsp) 340130803Smarcel movw %gs,TF_GS(%rsp) 341130803Smarcel movw %es,TF_ES(%rsp) 342130803Smarcel movw %ds,TF_DS(%rsp) 343130803Smarcel testl $PSL_I,TF_RFLAGS(%rsp) 344130803Smarcel jz alltraps_pushregs_no_rdi 345130803Smarcel sti 346130803Smarcel jmp alltraps_pushregs_no_rdi 347130803Smarcel 348130803Smarcel/* 349130803Smarcel * Fast syscall entry point. We enter here with just our new %cs/%ss set, 350130803Smarcel * and the new privilige level. We are still running on the old user stack 351130803Smarcel * pointer. We have to juggle a few things around to find our stack etc. 352130803Smarcel * swapgs gives us access to our PCPU space only. 353130803Smarcel * 354130803Smarcel * We do not support invoking this from a custom %cs or %ss (e.g. using 355130803Smarcel * entries from an LDT). 356130803Smarcel */ 357242936SemasteIDTVEC(fast_syscall) 358130803Smarcel swapgs 359130803Smarcel movq %rsp,PCPU(SCRATCH_RSP) 360130803Smarcel movq PCPU(RSP0),%rsp 361130803Smarcel /* Now emulate a trapframe. Make the 8 byte alignment odd for call. */ 362130803Smarcel subq $TF_SIZE,%rsp 363130803Smarcel /* defer TF_RSP till we have a spare register */ 364130803Smarcel movq %r11,TF_RFLAGS(%rsp) 365130803Smarcel movq %rcx,TF_RIP(%rsp) /* %rcx original value is in %r10 */ 366130803Smarcel movq PCPU(SCRATCH_RSP),%r11 /* %r11 already saved */ 367130803Smarcel movq %r11,TF_RSP(%rsp) /* user stack pointer */ 368130803Smarcel movw %fs,TF_FS(%rsp) 369130803Smarcel movw %gs,TF_GS(%rsp) 370130803Smarcel movw %es,TF_ES(%rsp) 371130803Smarcel movw %ds,TF_DS(%rsp) 372130803Smarcel movq PCPU(CURPCB),%r11 373130803Smarcel andl $~PCB_FULL_IRET,PCB_FLAGS(%r11) 374130803Smarcel sti 375130803Smarcel movq $KUDSEL,TF_SS(%rsp) 376130803Smarcel movq $KUCSEL,TF_CS(%rsp) 377130803Smarcel movq $2,TF_ERR(%rsp) 378130803Smarcel movq %rdi,TF_RDI(%rsp) /* arg 1 */ 379130803Smarcel movq %rsi,TF_RSI(%rsp) /* arg 2 */ 380130803Smarcel movq %rdx,TF_RDX(%rsp) /* arg 3 */ 381130803Smarcel movq %r10,TF_RCX(%rsp) /* arg 4 */ 382130803Smarcel movq %r8,TF_R8(%rsp) /* arg 5 */ 383130803Smarcel movq %r9,TF_R9(%rsp) /* arg 6 */ 384130803Smarcel movq %rax,TF_RAX(%rsp) /* syscall number */ 385130803Smarcel movq %rbx,TF_RBX(%rsp) /* C preserved */ 386130803Smarcel movq %rbp,TF_RBP(%rsp) /* C preserved */ 387130803Smarcel movq %r12,TF_R12(%rsp) /* C preserved */ 388130803Smarcel movq %r13,TF_R13(%rsp) /* C preserved */ 38919370Spst movq %r14,TF_R14(%rsp) /* C preserved */ 39019370Spst movq %r15,TF_R15(%rsp) /* C preserved */ 39119370Spst movl $TF_HASSEGS,TF_FLAGS(%rsp) 392130803Smarcel cld 39319370Spst FAKE_MCOUNT(TF_RIP(%rsp)) 39419370Spst movq PCPU(CURTHREAD),%rdi 39598944Sobrien movq %rsp,TD_FRAME(%rdi) 39619370Spst movl TF_RFLAGS(%rsp),%esi 397130803Smarcel andl $PSL_T,%esi 398130803Smarcel call amd64_syscall 399130803Smarcel1: movq PCPU(CURPCB),%rax 400130803Smarcel /* Disable interrupts before testing PCB_FULL_IRET. */ 401130803Smarcel cli 402130803Smarcel testl $PCB_FULL_IRET,PCB_FLAGS(%rax) 403130803Smarcel jnz 3f 40498944Sobrien /* Check for and handle AST's on return to userland. */ 40519370Spst movq PCPU(CURTHREAD),%rax 40619370Spst testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax) 40719370Spst jne 2f 40846283Sdfr /* Restore preserved registers. */ 40998944Sobrien MEXITCOUNT 41098944Sobrien movq TF_RDI(%rsp),%rdi /* bonus; preserve arg 1 */ 41198944Sobrien movq TF_RSI(%rsp),%rsi /* bonus: preserve arg 2 */ 41298944Sobrien movq TF_RDX(%rsp),%rdx /* return value 2 */ 41398944Sobrien movq TF_RAX(%rsp),%rax /* return value 1 */ 41498944Sobrien movq TF_RFLAGS(%rsp),%r11 /* original %rflags */ 41598944Sobrien movq TF_RIP(%rsp),%rcx /* original %rip */ 41698944Sobrien movq TF_RSP(%rsp),%rsp /* user stack pointer */ 41719370Spst swapgs 418130803Smarcel sysretq 419130803Smarcel 42019370Spst2: /* AST scheduled. */ 42119370Spst sti 42298944Sobrien movq %rsp,%rdi 42398944Sobrien call ast 42419370Spst jmp 1b 425130803Smarcel 426130803Smarcel3: /* Requested full context restore, use doreti for that. */ 427130803Smarcel MEXITCOUNT 428130803Smarcel jmp doreti 429130803Smarcel 43019370Spst/* 431130803Smarcel * Here for CYA insurance, in case a "syscall" instruction gets 43219370Spst * issued from 32 bit compatability mode. MSR_CSTAR has to point 43319370Spst * to *something* if EFER_SCE is enabled. 43498944Sobrien */ 43519370SpstIDTVEC(fast_syscall32) 436130803Smarcel sysret 437130803Smarcel 438130803Smarcel/* 439130803Smarcel * NMI handling is special. 440130803Smarcel * 441130803Smarcel * First, NMIs do not respect the state of the processor's RFLAGS.IF 442130803Smarcel * bit. The NMI handler may be entered at any time, including when 443130803Smarcel * the processor is in a critical section with RFLAGS.IF == 0. 444130803Smarcel * The processor's GS.base value could be invalid on entry to the 445130803Smarcel * handler. 446130803Smarcel * 447130803Smarcel * Second, the processor treats NMIs specially, blocking further NMIs 448130803Smarcel * until an 'iretq' instruction is executed. We thus need to execute 449130803Smarcel * the NMI handler with interrupts disabled, to prevent a nested interrupt 450130803Smarcel * from executing an 'iretq' instruction and inadvertently taking the 451130803Smarcel * processor out of NMI mode. 452130803Smarcel * 453130803Smarcel * Third, the NMI handler runs on its own stack (tss_ist2). The canonical 454130803Smarcel * GS.base value for the processor is stored just above the bottom of its 455130803Smarcel * NMI stack. For NMIs taken from kernel mode, the current value in 456130803Smarcel * the processor's GS.base is saved at entry to C-preserved register %r12, 457130803Smarcel * the canonical value for GS.base is then loaded into the processor, and 45819370Spst * the saved value is restored at exit time. For NMIs taken from user mode, 45919370Spst * the cheaper 'SWAPGS' instructions are used for swapping GS.base. 460130803Smarcel */ 46119370Spst 46219370SpstIDTVEC(nmi) 46319370Spst subq $TF_RIP,%rsp 46419370Spst movl $(T_NMI),TF_TRAPNO(%rsp) 465130803Smarcel movq $0,TF_ADDR(%rsp) 466130803Smarcel movq $0,TF_ERR(%rsp) 467130803Smarcel movq %rdi,TF_RDI(%rsp) 468130803Smarcel movq %rsi,TF_RSI(%rsp) 469130803Smarcel movq %rdx,TF_RDX(%rsp) 470130803Smarcel movq %rcx,TF_RCX(%rsp) 47119370Spst movq %r8,TF_R8(%rsp) 47298944Sobrien movq %r9,TF_R9(%rsp) 47398944Sobrien movq %rax,TF_RAX(%rsp) 47498944Sobrien movq %rbx,TF_RBX(%rsp) 47598944Sobrien movq %rbp,TF_RBP(%rsp) 47698944Sobrien movq %r10,TF_R10(%rsp) 47798944Sobrien movq %r11,TF_R11(%rsp) 47898944Sobrien movq %r12,TF_R12(%rsp) 47998944Sobrien movq %r13,TF_R13(%rsp) 48098944Sobrien movq %r14,TF_R14(%rsp) 481130803Smarcel movq %r15,TF_R15(%rsp) 482130803Smarcel movw %fs,TF_FS(%rsp) 483130803Smarcel movw %gs,TF_GS(%rsp) 48498944Sobrien movw %es,TF_ES(%rsp) 48598944Sobrien movw %ds,TF_DS(%rsp) 486130803Smarcel movl $TF_HASSEGS,TF_FLAGS(%rsp) 48798944Sobrien cld 488130803Smarcel xorl %ebx,%ebx 48998944Sobrien testb $SEL_RPL_MASK,TF_CS(%rsp) 49098944Sobrien jnz nmi_fromuserspace 49198944Sobrien /* 492130803Smarcel * We've interrupted the kernel. Preserve GS.base in %r12. 49398944Sobrien */ 49498944Sobrien movl $MSR_GSBASE,%ecx 49598944Sobrien rdmsr 496130803Smarcel movq %rax,%r12 49798944Sobrien shlq $32,%rdx 49898944Sobrien orq %rdx,%r12 49998944Sobrien /* Retrieve and load the canonical value for GS.base. */ 50098944Sobrien movq TF_SIZE(%rsp),%rdx 50198944Sobrien movl %edx,%eax 50298944Sobrien shrq $32,%rdx 50398944Sobrien wrmsr 50498944Sobrien jmp nmi_calltrap 50598944Sobriennmi_fromuserspace: 50698944Sobrien incl %ebx 50798944Sobrien swapgs 50898944Sobrien/* Note: this label is also used by ddb and gdb: */ 509130803Smarcelnmi_calltrap: 51098944Sobrien FAKE_MCOUNT(TF_RIP(%rsp)) 51198944Sobrien movq %rsp,%rdi 51298944Sobrien call trap 51398944Sobrien MEXITCOUNT 51498944Sobrien#ifdef HWPMC_HOOKS 51598944Sobrien /* 516130803Smarcel * Capture a userspace callchain if needed. 517130803Smarcel * 518130803Smarcel * - Check if the current trap was from user mode. 519130803Smarcel * - Check if the current thread is valid. 520130803Smarcel * - Check if the thread requires a user call chain to be 52198944Sobrien * captured. 52298944Sobrien * 52398944Sobrien * We are still in NMI mode at this point. 524130803Smarcel */ 52598944Sobrien testl %ebx,%ebx 52698944Sobrien jz nocallchain /* not from userspace */ 52798944Sobrien movq PCPU(CURTHREAD),%rax 52898944Sobrien orq %rax,%rax /* curthread present? */ 52998944Sobrien jz nocallchain 53098944Sobrien testl $TDP_CALLCHAIN,TD_PFLAGS(%rax) /* flagged for capture? */ 53198944Sobrien jz nocallchain 53298944Sobrien /* 53398944Sobrien * A user callchain is to be captured, so: 53498944Sobrien * - Move execution to the regular kernel stack, to allow for 53598944Sobrien * nested NMI interrupts. 53698944Sobrien * - Take the processor out of "NMI" mode by faking an "iret". 53798944Sobrien * - Enable interrupts, so that copyin() can work. 53898944Sobrien */ 539130803Smarcel movq %rsp,%rsi /* source stack pointer */ 54098944Sobrien movq $TF_SIZE,%rcx 54198944Sobrien movq PCPU(RSP0),%rdx 54298944Sobrien subq %rcx,%rdx 54398944Sobrien movq %rdx,%rdi /* destination stack pointer */ 54498944Sobrien 54598944Sobrien shrq $3,%rcx /* trap frame size in long words */ 54698944Sobrien cld 54798944Sobrien rep 548130803Smarcel movsq /* copy trapframe */ 54919370Spst 55019370Spst movl %ss,%eax 55119370Spst pushq %rax /* tf_ss */ 55298944Sobrien pushq %rdx /* tf_rsp (on kernel stack) */ 55398944Sobrien pushfq /* tf_rflags */ 55498944Sobrien movl %cs,%eax 55598944Sobrien pushq %rax /* tf_cs */ 55698944Sobrien pushq $outofnmi /* tf_rip */ 55798944Sobrien iretq 55898944Sobrienoutofnmi: 55919370Spst /* 56098944Sobrien * At this point the processor has exited NMI mode and is running 56198944Sobrien * with interrupts turned off on the normal kernel stack. 56298944Sobrien * 56398944Sobrien * If a pending NMI gets recognized at or after this point, it 56498944Sobrien * will cause a kernel callchain to be traced. 56598944Sobrien * 56619370Spst * We turn interrupts back on, and call the user callchain capture hook. 567130803Smarcel */ 56819370Spst movq pmc_hook,%rax 56998944Sobrien orq %rax,%rax 57019370Spst jz nocallchain 57119370Spst movq PCPU(CURTHREAD),%rdi /* thread */ 57219370Spst movq $PMC_FN_USER_CALLCHAIN,%rsi /* command */ 57319370Spst movq %rsp,%rdx /* frame */ 57419370Spst sti 57519370Spst call *%rax 57619370Spst cli 57719370Spstnocallchain: 57819370Spst#endif 57919370Spst testl %ebx,%ebx 58019370Spst jnz doreti_exit 58119370Spstnmi_kernelexit: 58219370Spst /* 583130803Smarcel * Put back the preserved MSR_GSBASE value. 58419370Spst */ 58519370Spst movl $MSR_GSBASE,%ecx 58619370Spst movq %r12,%rdx 58719370Spst movl %edx,%eax 588130803Smarcel shrq $32,%rdx 58998944Sobrien wrmsr 59098944Sobriennmi_restoreregs: 59198944Sobrien movq TF_RDI(%rsp),%rdi 59298944Sobrien movq TF_RSI(%rsp),%rsi 59398944Sobrien movq TF_RDX(%rsp),%rdx 59498944Sobrien movq TF_RCX(%rsp),%rcx 59598944Sobrien movq TF_R8(%rsp),%r8 59698944Sobrien movq TF_R9(%rsp),%r9 59798944Sobrien movq TF_RAX(%rsp),%rax 59898944Sobrien movq TF_RBX(%rsp),%rbx 59998944Sobrien movq TF_RBP(%rsp),%rbp 600130803Smarcel movq TF_R10(%rsp),%r10 60119370Spst movq TF_R11(%rsp),%r11 60298944Sobrien movq TF_R12(%rsp),%r12 60398944Sobrien movq TF_R13(%rsp),%r13 60498944Sobrien movq TF_R14(%rsp),%r14 60598944Sobrien movq TF_R15(%rsp),%r15 60698944Sobrien addq $TF_RIP,%rsp 60798944Sobrien jmp doreti_iret 60898944Sobrien 609130803SmarcelENTRY(fork_trampoline) 61098944Sobrien movq %r12,%rdi /* function */ 61119370Spst movq %rbx,%rsi /* arg1 */ 61219370Spst movq %rsp,%rdx /* trapframe pointer */ 61319370Spst call fork_exit 61419370Spst MEXITCOUNT 615130803Smarcel jmp doreti /* Handle any ASTs */ 61646283Sdfr 61719370Spst/* 618130803Smarcel * To efficiently implement classification of trap and interrupt handlers 61946283Sdfr * for profiling, there must be only trap handlers between the labels btrap 62019370Spst * and bintr, and only interrupt handlers between the labels bintr and 62119370Spst * eintr. This is implemented (partly) by including files that contain 62219370Spst * some of the handlers. Before including the files, set up a normal asm 623130803Smarcel * environment so that the included files doen't need to know that they are 62419370Spst * included. 62598944Sobrien */ 62619370Spst 62798944Sobrien#ifdef COMPAT_FREEBSD32 62898944Sobrien .data 62998944Sobrien .p2align 4 630130803Smarcel .text 63119370Spst SUPERALIGN_TEXT 63298944Sobrien 633130803Smarcel#include <amd64/ia32/ia32_exception.S> 634130803Smarcel#endif 635130803Smarcel 63698944Sobrien .data 63798944Sobrien .p2align 4 638130803Smarcel .text 63998944Sobrien SUPERALIGN_TEXT 64098944SobrienMCOUNT_LABEL(bintr) 64198944Sobrien 64298944Sobrien#include <amd64/amd64/apic_vector.S> 64398944Sobrien 64498944Sobrien#ifdef DEV_ATPIC 64598944Sobrien .data 64698944Sobrien .p2align 4 64798944Sobrien .text 64898944Sobrien SUPERALIGN_TEXT 64998944Sobrien 65098944Sobrien#include <amd64/amd64/atpic_vector.S> 65198944Sobrien#endif 65298944Sobrien 65398944Sobrien .text 65498944SobrienMCOUNT_LABEL(eintr) 65598944Sobrien 65698944Sobrien/* 65798944Sobrien * void doreti(struct trapframe) 65898944Sobrien * 65998944Sobrien * Handle return from interrupts, traps and syscalls. 66098944Sobrien */ 66198944Sobrien .text 66298944Sobrien SUPERALIGN_TEXT 66398944Sobrien .type doreti,@function 66498944Sobrien .globl doreti 66598944Sobriendoreti: 66698944Sobrien FAKE_MCOUNT($bintr) /* init "from" bintr -> doreti */ 66798944Sobrien /* 66898944Sobrien * Check if ASTs can be handled now. 66998944Sobrien */ 67098944Sobrien testb $SEL_RPL_MASK,TF_CS(%rsp) /* are we returning to user mode? */ 67198944Sobrien jz doreti_exit /* can't handle ASTs now if not */ 67298944Sobrien 67398944Sobriendoreti_ast: 67498944Sobrien /* 67598944Sobrien * Check for ASTs atomically with returning. Disabling CPU 67698944Sobrien * interrupts provides sufficient locking even in the SMP case, 67798944Sobrien * since we will be informed of any new ASTs by an IPI. 67898944Sobrien */ 67919370Spst cli 68098944Sobrien movq PCPU(CURTHREAD),%rax 68198944Sobrien testl $TDF_ASTPENDING | TDF_NEEDRESCHED,TD_FLAGS(%rax) 68219370Spst je doreti_exit 683130803Smarcel sti 68498944Sobrien movq %rsp,%rdi /* pass a pointer to the trapframe */ 68519370Spst call ast 68698944Sobrien jmp doreti_ast 68798944Sobrien 68898944Sobrien /* 68998944Sobrien * doreti_exit: pop registers, iret. 69019370Spst * 69119370Spst * The segment register pop is a special case, since it may 69298944Sobrien * fault if (for example) a sigreturn specifies bad segment 69319370Spst * registers. The fault is handled in trap.c. 69498944Sobrien */ 69598944Sobriendoreti_exit: 69698944Sobrien MEXITCOUNT 69798944Sobrien movq PCPU(CURPCB),%r8 69819370Spst 69946283Sdfr /* 70046283Sdfr * Do not reload segment registers for kernel. 70146283Sdfr * Since we do not reload segments registers with sane 70246283Sdfr * values on kernel entry, descriptors referenced by 70398944Sobrien * segments registers might be not valid. This is fatal 70446283Sdfr * for user mode, but is not a problem for the kernel. 70546283Sdfr */ 70698944Sobrien testb $SEL_RPL_MASK,TF_CS(%rsp) 70746283Sdfr jz ld_regs 70819370Spst testl $PCB_FULL_IRET,PCB_FLAGS(%r8) 70919370Spst jz ld_regs 71019370Spst testl $TF_HASSEGS,TF_FLAGS(%rsp) 71119370Spst je set_segs 71246283Sdfr 71398944Sobriendo_segs: 71419370Spst /* Restore %fs and fsbase */ 71519370Spst movw TF_FS(%rsp),%ax 71619370Spst .globl ld_fs 71719370Spstld_fs: 71898944Sobrien movw %ax,%fs 71998944Sobrien cmpw $KUF32SEL,%ax 72019370Spst jne 1f 72119370Spst movl $MSR_FSBASE,%ecx 72219370Spst movl PCB_FSBASE(%r8),%eax 72319370Spst movl PCB_FSBASE+4(%r8),%edx 72419370Spst .globl ld_fsbase 72598944Sobrienld_fsbase: 72698944Sobrien wrmsr 72719370Spst1: 72819370Spst /* Restore %gs and gsbase */ 72919370Spst movw TF_GS(%rsp),%si 73019370Spst pushfq 73119370Spst cli 73219370Spst movl $MSR_GSBASE,%ecx 73398944Sobrien /* Save current kernel %gs base into %r12d:%r13d */ 73419370Spst rdmsr 73598944Sobrien movl %eax,%r12d 73619370Spst movl %edx,%r13d 73719370Spst .globl ld_gs 73898944Sobrienld_gs: 73998944Sobrien movw %si,%gs 74098944Sobrien /* Save user %gs base into %r14d:%r15d */ 74198944Sobrien rdmsr 74298944Sobrien movl %eax,%r14d 74398944Sobrien movl %edx,%r15d 74498944Sobrien /* Restore kernel %gs base */ 74598944Sobrien movl %r12d,%eax 74698944Sobrien movl %r13d,%edx 74798944Sobrien wrmsr 74898944Sobrien popfq 74998944Sobrien /* 75098944Sobrien * Restore user %gs base, either from PCB if used for TLS, or 75198944Sobrien * from the previously saved msr read. 75219370Spst */ 75319370Spst movl $MSR_KGSBASE,%ecx 75419370Spst cmpw $KUG32SEL,%si 75519370Spst jne 1f 75698944Sobrien movl PCB_GSBASE(%r8),%eax 75798944Sobrien movl PCB_GSBASE+4(%r8),%edx 75819370Spst jmp ld_gsbase 75919370Spst1: 76019370Spst movl %r14d,%eax 76119370Spst movl %r15d,%edx 76219370Spst .globl ld_gsbase 76319370Spstld_gsbase: 76419370Spst wrmsr /* May trap if non-canonical, but only for TLS. */ 765130803Smarcel .globl ld_es 76619370Spstld_es: 767130803Smarcel movw TF_ES(%rsp),%es 76819370Spst .globl ld_ds 76919370Spstld_ds: 77019370Spst movw TF_DS(%rsp),%ds 77119370Spstld_regs: 77298944Sobrien movq TF_RDI(%rsp),%rdi 77319370Spst movq TF_RSI(%rsp),%rsi 77419370Spst movq TF_RDX(%rsp),%rdx 77519370Spst movq TF_RCX(%rsp),%rcx 77619370Spst movq TF_R8(%rsp),%r8 77719370Spst movq TF_R9(%rsp),%r9 77819370Spst movq TF_RAX(%rsp),%rax 77919370Spst movq TF_RBX(%rsp),%rbx 78019370Spst movq TF_RBP(%rsp),%rbp 78119370Spst movq TF_R10(%rsp),%r10 78219370Spst movq TF_R11(%rsp),%r11 78319370Spst movq TF_R12(%rsp),%r12 78419370Spst movq TF_R13(%rsp),%r13 78519370Spst movq TF_R14(%rsp),%r14 78619370Spst movq TF_R15(%rsp),%r15 78719370Spst testb $SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */ 78819370Spst jz 1f /* keep running with kernel GS.base */ 78919370Spst cli 79019370Spst swapgs 79119370Spst1: 792130803Smarcel addq $TF_RIP,%rsp /* skip over tf_err, tf_trapno */ 79319370Spst .globl doreti_iret 79419370Spstdoreti_iret: 79519370Spst iretq 79619370Spst 79719370Spstset_segs: 79819370Spst movw $KUDSEL,%ax 799130803Smarcel movw %ax,TF_DS(%rsp) 80019370Spst movw %ax,TF_ES(%rsp) 80119370Spst movw $KUF32SEL,TF_FS(%rsp) 80219370Spst movw $KUG32SEL,TF_GS(%rsp) 80319370Spst jmp do_segs 80419370Spst 805130803Smarcel /* 80619370Spst * doreti_iret_fault. Alternative return code for 80798944Sobrien * the case where we get a fault in the doreti_exit code 80819370Spst * above. trap() (amd64/amd64/trap.c) catches this specific 80919370Spst * case, sends the process a signal and continues in the 81019370Spst * corresponding place in the code below. 81119370Spst */ 81298944Sobrien ALIGN_TEXT 81319370Spst .globl doreti_iret_fault 81419370Spstdoreti_iret_fault: 81519370Spst subq $TF_RIP,%rsp /* space including tf_err, tf_trapno */ 81619370Spst testl $PSL_I,TF_RFLAGS(%rsp) 81798944Sobrien jz 1f 81819370Spst sti 81919370Spst1: 82019370Spst movw %fs,TF_FS(%rsp) 82119370Spst movw %gs,TF_GS(%rsp) 82219370Spst movw %es,TF_ES(%rsp) 82319370Spst movw %ds,TF_DS(%rsp) 82419370Spst movl $TF_HASSEGS,TF_FLAGS(%rsp) 82519370Spst movq %rdi,TF_RDI(%rsp) 82619370Spst movq %rsi,TF_RSI(%rsp) 82719370Spst movq %rdx,TF_RDX(%rsp) 82819370Spst movq %rcx,TF_RCX(%rsp) 82919370Spst movq %r8,TF_R8(%rsp) 83019370Spst movq %r9,TF_R9(%rsp) 83198944Sobrien movq %rax,TF_RAX(%rsp) 83219370Spst movq %rbx,TF_RBX(%rsp) 83319370Spst movq %rbp,TF_RBP(%rsp) 83419370Spst movq %r10,TF_R10(%rsp) 83519370Spst movq %r11,TF_R11(%rsp) 83619370Spst movq %r12,TF_R12(%rsp) 83719370Spst movq %r13,TF_R13(%rsp) 83819370Spst movq %r14,TF_R14(%rsp) 83919370Spst movq %r15,TF_R15(%rsp) 84019370Spst movl $T_PROTFLT,TF_TRAPNO(%rsp) 841130803Smarcel movq $0,TF_ERR(%rsp) /* XXX should be the error code */ 84219370Spst movq $0,TF_ADDR(%rsp) 84319370Spst FAKE_MCOUNT(TF_RIP(%rsp)) 84419370Spst jmp calltrap 84519370Spst 846130803Smarcel ALIGN_TEXT 847130803Smarcel .globl ds_load_fault 848130803Smarcelds_load_fault: 849130803Smarcel movl $T_PROTFLT,TF_TRAPNO(%rsp) 850130803Smarcel testl $PSL_I,TF_RFLAGS(%rsp) 851130803Smarcel jz 1f 852130803Smarcel sti 853130803Smarcel1: 854130803Smarcel movq %rsp,%rdi 855130803Smarcel call trap 856130803Smarcel movw $KUDSEL,TF_DS(%rsp) 857130803Smarcel jmp doreti 858130803Smarcel 859130803Smarcel ALIGN_TEXT 86019370Spst .globl es_load_fault 86119370Spstes_load_fault: 86219370Spst movl $T_PROTFLT,TF_TRAPNO(%rsp) 86319370Spst testl $PSL_I,TF_RFLAGS(%rsp) 864130803Smarcel jz 1f 86519370Spst sti 866130803Smarcel1: 867130803Smarcel movq %rsp,%rdi 868130803Smarcel call trap 86919370Spst movw $KUDSEL,TF_ES(%rsp) 87019370Spst jmp doreti 871130803Smarcel 87246283Sdfr ALIGN_TEXT 87346283Sdfr .globl fs_load_fault 87446283Sdfrfs_load_fault: 87546283Sdfr testl $PSL_I,TF_RFLAGS(%rsp) 87646283Sdfr jz 1f 87746283Sdfr sti 87846283Sdfr1: 87946283Sdfr movl $T_PROTFLT,TF_TRAPNO(%rsp) 88046283Sdfr movq %rsp,%rdi 88146283Sdfr call trap 88246283Sdfr movw $KUF32SEL,TF_FS(%rsp) 88346283Sdfr jmp doreti 88498944Sobrien 885130803Smarcel ALIGN_TEXT 88698944Sobrien .globl gs_load_fault 88798944Sobriengs_load_fault: 88898944Sobrien popfq 88998944Sobrien movl $T_PROTFLT,TF_TRAPNO(%rsp) 89098944Sobrien testl $PSL_I,TF_RFLAGS(%rsp) 89198944Sobrien jz 1f 89298944Sobrien sti 89398944Sobrien1: 89498944Sobrien movq %rsp,%rdi 895130803Smarcel call trap 89698944Sobrien movw $KUG32SEL,TF_GS(%rsp) 89719370Spst jmp doreti 89819370Spst 89919370Spst ALIGN_TEXT 900130803Smarcel .globl fsbase_load_fault 90119370Spstfsbase_load_fault: 90219370Spst movl $T_PROTFLT,TF_TRAPNO(%rsp) 903130803Smarcel testl $PSL_I,TF_RFLAGS(%rsp) 90419370Spst jz 1f 90519370Spst sti 90619370Spst1: 90719370Spst movq %rsp,%rdi 90819370Spst call trap 909130803Smarcel movq PCPU(CURTHREAD),%r8 91019370Spst movq TD_PCB(%r8),%r8 911130803Smarcel movq $0,PCB_FSBASE(%r8) 912130803Smarcel jmp doreti 913130803Smarcel 91419370Spst ALIGN_TEXT 91519370Spst .globl gsbase_load_fault 91619370Spstgsbase_load_fault: 91719370Spst movl $T_PROTFLT,TF_TRAPNO(%rsp) 91819370Spst testl $PSL_I,TF_RFLAGS(%rsp) 919130803Smarcel jz 1f 92019370Spst sti 92119370Spst1: 922130803Smarcel movq %rsp,%rdi 923130803Smarcel call trap 92419370Spst movq PCPU(CURTHREAD),%r8 92519370Spst movq TD_PCB(%r8),%r8 92619370Spst movq $0,PCB_GSBASE(%r8) 92719370Spst jmp doreti 92819370Spst 92919370Spst#ifdef HWPMC_HOOKS 93019370Spst ENTRY(end_exceptions) 93119370Spst#endif 93219370Spst