machdep.c revision 129412
1/*- 2 * Copyright (c) 2003 Peter Wemm. 3 * Copyright (c) 1992 Terrence R. Lambert. 4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * William Jolitz. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 39 */ 40 41#include <sys/cdefs.h> 42__FBSDID("$FreeBSD: head/sys/amd64/amd64/machdep.c 129412 2004-05-19 01:23:48Z peter $"); 43 44#include "opt_atalk.h" 45#include "opt_atpic.h" 46#include "opt_compat.h" 47#include "opt_cpu.h" 48#include "opt_ddb.h" 49#include "opt_inet.h" 50#include "opt_ipx.h" 51#include "opt_isa.h" 52#include "opt_kstack_pages.h" 53#include "opt_maxmem.h" 54#include "opt_msgbuf.h" 55#include "opt_perfmon.h" 56 57#include <sys/param.h> 58#include <sys/systm.h> 59#include <sys/sysproto.h> 60#include <sys/signalvar.h> 61#include <sys/imgact.h> 62#include <sys/kernel.h> 63#include <sys/ktr.h> 64#include <sys/linker.h> 65#include <sys/lock.h> 66#include <sys/malloc.h> 67#include <sys/mutex.h> 68#include <sys/pcpu.h> 69#include <sys/proc.h> 70#include <sys/bio.h> 71#include <sys/buf.h> 72#include <sys/reboot.h> 73#include <sys/callout.h> 74#include <sys/msgbuf.h> 75#include <sys/sched.h> 76#include <sys/sysent.h> 77#include <sys/sysctl.h> 78#include <sys/ucontext.h> 79#include <sys/vmmeter.h> 80#include <sys/bus.h> 81#include <sys/eventhandler.h> 82 83#include <vm/vm.h> 84#include <vm/vm_param.h> 85#include <vm/vm_kern.h> 86#include <vm/vm_object.h> 87#include <vm/vm_page.h> 88#include <vm/vm_map.h> 89#include <vm/vm_pager.h> 90#include <vm/vm_extern.h> 91 92#include <sys/user.h> 93#include <sys/exec.h> 94#include <sys/cons.h> 95 96#include <ddb/ddb.h> 97 98#include <net/netisr.h> 99 100#include <machine/cpu.h> 101#include <machine/cputypes.h> 102#include <machine/reg.h> 103#include <machine/clock.h> 104#include <machine/specialreg.h> 105#include <machine/intr_machdep.h> 106#include <machine/md_var.h> 107#include <machine/metadata.h> 108#include <machine/proc.h> 109#ifdef PERFMON 110#include <machine/perfmon.h> 111#endif 112#include <machine/tss.h> 113#ifdef SMP 114#include <machine/smp.h> 115#endif 116 117#include <amd64/isa/icu.h> 118 119#include <isa/isareg.h> 120#include <isa/rtc.h> 121#include <sys/ptrace.h> 122#include <machine/sigframe.h> 123 124/* Sanity check for __curthread() */ 125CTASSERT(offsetof(struct pcpu, pc_curthread) == 0); 126 127extern u_int64_t hammer_time(u_int64_t, u_int64_t); 128extern void dblfault_handler(void); 129 130extern void printcpuinfo(void); /* XXX header file */ 131extern void identify_cpu(void); 132extern void panicifcpuunsupported(void); 133extern void initializecpu(void); 134 135#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 136#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 137 138static void cpu_startup(void *); 139static void get_fpcontext(struct thread *td, mcontext_t *mcp); 140static int set_fpcontext(struct thread *td, const mcontext_t *mcp); 141SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) 142 143int _udatasel, _ucodesel, _ucode32sel; 144u_long atdevbase; 145 146int cold = 1; 147 148long Maxmem = 0; 149 150vm_paddr_t phys_avail[20]; 151 152/* must be 2 less so 0 0 can signal end of chunks */ 153#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2) 154 155struct kva_md_info kmi; 156 157static struct trapframe proc0_tf; 158struct region_descriptor r_gdt, r_idt; 159 160struct pcpu __pcpu[MAXCPU]; 161 162struct mtx icu_lock; 163 164#ifdef DDB 165void *ksym_start, *ksym_end; 166#endif 167 168static void 169cpu_startup(dummy) 170 void *dummy; 171{ 172 /* 173 * Good {morning,afternoon,evening,night}. 174 */ 175 startrtclock(); 176 printcpuinfo(); 177 panicifcpuunsupported(); 178#ifdef PERFMON 179 perfmon_init(); 180#endif 181 printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)Maxmem), 182 ptoa((uintmax_t)Maxmem) / 1048576); 183 /* 184 * Display any holes after the first chunk of extended memory. 185 */ 186 if (bootverbose) { 187 int indx; 188 189 printf("Physical memory chunk(s):\n"); 190 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 191 vm_paddr_t size; 192 193 size = phys_avail[indx + 1] - phys_avail[indx]; 194 printf( 195 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n", 196 (uintmax_t)phys_avail[indx], 197 (uintmax_t)phys_avail[indx + 1] - 1, 198 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE); 199 } 200 } 201 202 vm_ksubmap_init(&kmi); 203 204 printf("avail memory = %ju (%ju MB)\n", 205 ptoa((uintmax_t)cnt.v_free_count), 206 ptoa((uintmax_t)cnt.v_free_count) / 1048576); 207 208 /* 209 * Set up buffers, so they can be used to read disk labels. 210 */ 211 bufinit(); 212 vm_pager_bufferinit(); 213 214 cpu_setregs(); 215} 216 217/* 218 * Send an interrupt to process. 219 * 220 * Stack is set up to allow sigcode stored 221 * at top to call routine, followed by kcall 222 * to sigreturn routine below. After sigreturn 223 * resets the signal mask, the stack, and the 224 * frame pointer, it returns to the user 225 * specified pc, psl. 226 */ 227void 228sendsig(catcher, sig, mask, code) 229 sig_t catcher; 230 int sig; 231 sigset_t *mask; 232 u_long code; 233{ 234 struct sigframe sf, *sfp; 235 struct proc *p; 236 struct thread *td; 237 struct sigacts *psp; 238 char *sp; 239 struct trapframe *regs; 240 int oonstack; 241 242 td = curthread; 243 p = td->td_proc; 244 PROC_LOCK_ASSERT(p, MA_OWNED); 245 psp = p->p_sigacts; 246 mtx_assert(&psp->ps_mtx, MA_OWNED); 247 regs = td->td_frame; 248 oonstack = sigonstack(regs->tf_rsp); 249 250 /* Save user context. */ 251 bzero(&sf, sizeof(sf)); 252 sf.sf_uc.uc_sigmask = *mask; 253 sf.sf_uc.uc_stack = td->td_sigstk; 254 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) 255 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 256 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 257 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(*regs)); 258 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */ 259 get_fpcontext(td, &sf.sf_uc.uc_mcontext); 260 fpstate_drop(td); 261 262 /* Allocate space for the signal handler context. */ 263 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && 264 SIGISMEMBER(psp->ps_sigonstack, sig)) { 265 sp = td->td_sigstk.ss_sp + 266 td->td_sigstk.ss_size - sizeof(struct sigframe); 267#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 268 td->td_sigstk.ss_flags |= SS_ONSTACK; 269#endif 270 } else 271 sp = (char *)regs->tf_rsp - sizeof(struct sigframe) - 128; 272 /* Align to 16 bytes. */ 273 sfp = (struct sigframe *)((unsigned long)sp & ~0xFul); 274 275 /* Translate the signal if appropriate. */ 276 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 277 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 278 279 /* Build the argument list for the signal handler. */ 280 regs->tf_rdi = sig; /* arg 1 in %rdi */ 281 regs->tf_rdx = (register_t)&sfp->sf_uc; /* arg 3 in %rdx */ 282 if (SIGISMEMBER(psp->ps_siginfo, sig)) { 283 /* Signal handler installed with SA_SIGINFO. */ 284 regs->tf_rsi = (register_t)&sfp->sf_si; /* arg 2 in %rsi */ 285 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 286 287 /* Fill in POSIX parts */ 288 sf.sf_si.si_signo = sig; 289 sf.sf_si.si_code = code; 290 regs->tf_rcx = regs->tf_addr; /* arg 4 in %rcx */ 291 } else { 292 /* Old FreeBSD-style arguments. */ 293 regs->tf_rsi = code; /* arg 2 in %rsi */ 294 regs->tf_rcx = regs->tf_addr; /* arg 4 in %rcx */ 295 sf.sf_ahu.sf_handler = catcher; 296 } 297 mtx_unlock(&psp->ps_mtx); 298 PROC_UNLOCK(p); 299 300 /* 301 * Copy the sigframe out to the user's stack. 302 */ 303 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { 304#ifdef DEBUG 305 printf("process %ld has trashed its stack\n", (long)p->p_pid); 306#endif 307 PROC_LOCK(p); 308 sigexit(td, SIGILL); 309 } 310 311 regs->tf_rsp = (long)sfp; 312 regs->tf_rip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 313 regs->tf_rflags &= ~PSL_T; 314 regs->tf_cs = _ucodesel; 315 PROC_LOCK(p); 316 mtx_lock(&psp->ps_mtx); 317} 318 319/* 320 * Build siginfo_t for SA thread 321 */ 322void 323cpu_thread_siginfo(int sig, u_long code, siginfo_t *si) 324{ 325 struct proc *p; 326 struct thread *td; 327 struct trapframe *regs; 328 329 td = curthread; 330 p = td->td_proc; 331 regs = td->td_frame; 332 PROC_LOCK_ASSERT(p, MA_OWNED); 333 334 bzero(si, sizeof(*si)); 335 si->si_signo = sig; 336 si->si_code = code; 337 si->si_addr = (void *)regs->tf_addr; 338 /* XXXKSE fill other fields */ 339} 340 341/* 342 * System call to cleanup state after a signal 343 * has been taken. Reset signal mask and 344 * stack state from context left by sendsig (above). 345 * Return to previous pc and psl as specified by 346 * context left by sendsig. Check carefully to 347 * make sure that the user has not modified the 348 * state to gain improper privileges. 349 * 350 * MPSAFE 351 */ 352int 353sigreturn(td, uap) 354 struct thread *td; 355 struct sigreturn_args /* { 356 const __ucontext *sigcntxp; 357 } */ *uap; 358{ 359 ucontext_t uc; 360 struct proc *p = td->td_proc; 361 struct trapframe *regs; 362 const ucontext_t *ucp; 363 long rflags; 364 int cs, error, ret; 365 366 error = copyin(uap->sigcntxp, &uc, sizeof(uc)); 367 if (error != 0) 368 return (error); 369 ucp = &uc; 370 regs = td->td_frame; 371 rflags = ucp->uc_mcontext.mc_rflags; 372 /* 373 * Don't allow users to change privileged or reserved flags. 374 */ 375 /* 376 * XXX do allow users to change the privileged flag PSL_RF. 377 * The cpu sets PSL_RF in tf_rflags for faults. Debuggers 378 * should sometimes set it there too. tf_rflags is kept in 379 * the signal context during signal handling and there is no 380 * other place to remember it, so the PSL_RF bit may be 381 * corrupted by the signal handler without us knowing. 382 * Corruption of the PSL_RF bit at worst causes one more or 383 * one less debugger trap, so allowing it is fairly harmless. 384 */ 385 if (!EFL_SECURE(rflags & ~PSL_RF, regs->tf_rflags & ~PSL_RF)) { 386 printf("sigreturn: rflags = 0x%lx\n", rflags); 387 return (EINVAL); 388 } 389 390 /* 391 * Don't allow users to load a valid privileged %cs. Let the 392 * hardware check for invalid selectors, excess privilege in 393 * other selectors, invalid %eip's and invalid %esp's. 394 */ 395 cs = ucp->uc_mcontext.mc_cs; 396 if (!CS_SECURE(cs)) { 397 printf("sigreturn: cs = 0x%x\n", cs); 398 trapsignal(td, SIGBUS, T_PROTFLT); 399 return (EINVAL); 400 } 401 402 ret = set_fpcontext(td, &ucp->uc_mcontext); 403 if (ret != 0) 404 return (ret); 405 bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(*regs)); 406 407 PROC_LOCK(p); 408#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 409 if (ucp->uc_mcontext.mc_onstack & 1) 410 td->td_sigstk.ss_flags |= SS_ONSTACK; 411 else 412 td->td_sigstk.ss_flags &= ~SS_ONSTACK; 413#endif 414 415 td->td_sigmask = ucp->uc_sigmask; 416 SIG_CANTMASK(td->td_sigmask); 417 signotify(td); 418 PROC_UNLOCK(p); 419 td->td_pcb->pcb_flags |= PCB_FULLCTX; 420 return (EJUSTRETURN); 421} 422 423#ifdef COMPAT_FREEBSD4 424int 425freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap) 426{ 427 428 return sigreturn(td, (struct sigreturn_args *)uap); 429} 430#endif 431 432 433/* 434 * Machine dependent boot() routine 435 * 436 * I haven't seen anything to put here yet 437 * Possibly some stuff might be grafted back here from boot() 438 */ 439void 440cpu_boot(int howto) 441{ 442} 443 444/* 445 * Shutdown the CPU as much as possible 446 */ 447void 448cpu_halt(void) 449{ 450 for (;;) 451 __asm__ ("hlt"); 452} 453 454/* 455 * Hook to idle the CPU when possible. In the SMP case we default to 456 * off because a halted cpu will not currently pick up a new thread in the 457 * run queue until the next timer tick. If turned on this will result in 458 * approximately a 4.2% loss in real time performance in buildworld tests 459 * (but improves user and sys times oddly enough), and saves approximately 460 * 5% in power consumption on an idle machine (tests w/2xCPU 1.1GHz P3). 461 * 462 * XXX we need to have a cpu mask of idle cpus and generate an IPI or 463 * otherwise generate some sort of interrupt to wake up cpus sitting in HLT. 464 * Then we can have our cake and eat it too. 465 * 466 * XXX I'm turning it on for SMP as well by default for now. It seems to 467 * help lock contention somewhat, and this is critical for HTT. -Peter 468 */ 469static int cpu_idle_hlt = 1; 470SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, 471 &cpu_idle_hlt, 0, "Idle loop HLT enable"); 472 473static void 474cpu_idle_default(void) 475{ 476 /* 477 * we must absolutely guarentee that hlt is the 478 * absolute next instruction after sti or we 479 * introduce a timing window. 480 */ 481 __asm __volatile("sti; hlt"); 482} 483 484/* 485 * Note that we have to be careful here to avoid a race between checking 486 * sched_runnable() and actually halting. If we don't do this, we may waste 487 * the time between calling hlt and the next interrupt even though there 488 * is a runnable process. 489 */ 490void 491cpu_idle(void) 492{ 493 494 if (cpu_idle_hlt) { 495 disable_intr(); 496 if (sched_runnable()) 497 enable_intr(); 498 else 499 (*cpu_idle_hook)(); 500 } 501} 502 503/* Other subsystems (e.g., ACPI) can hook this later. */ 504void (*cpu_idle_hook)(void) = cpu_idle_default; 505 506/* 507 * Clear registers on exec 508 */ 509void 510exec_setregs(td, entry, stack, ps_strings) 511 struct thread *td; 512 u_long entry; 513 u_long stack; 514 u_long ps_strings; 515{ 516 struct trapframe *regs = td->td_frame; 517 struct pcb *pcb = td->td_pcb; 518 519 wrmsr(MSR_FSBASE, 0); 520 wrmsr(MSR_KGSBASE, 0); /* User value while we're in the kernel */ 521 pcb->pcb_fsbase = 0; 522 pcb->pcb_gsbase = 0; 523 load_ds(_udatasel); 524 load_es(_udatasel); 525 load_fs(_udatasel); 526 load_gs(_udatasel); 527 pcb->pcb_ds = _udatasel; 528 pcb->pcb_es = _udatasel; 529 pcb->pcb_fs = _udatasel; 530 pcb->pcb_gs = _udatasel; 531 532 bzero((char *)regs, sizeof(struct trapframe)); 533 regs->tf_rip = entry; 534 regs->tf_rsp = ((stack - 8) & ~0xFul) + 8; 535 regs->tf_rdi = stack; /* argv */ 536 regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T); 537 regs->tf_ss = _udatasel; 538 regs->tf_cs = _ucodesel; 539 540 /* 541 * Reset the hardware debug registers if they were in use. 542 * They won't have any meaning for the newly exec'd process. 543 */ 544 if (pcb->pcb_flags & PCB_DBREGS) { 545 pcb->pcb_dr0 = 0; 546 pcb->pcb_dr1 = 0; 547 pcb->pcb_dr2 = 0; 548 pcb->pcb_dr3 = 0; 549 pcb->pcb_dr6 = 0; 550 pcb->pcb_dr7 = 0; 551 if (pcb == PCPU_GET(curpcb)) { 552 /* 553 * Clear the debug registers on the running 554 * CPU, otherwise they will end up affecting 555 * the next process we switch to. 556 */ 557 reset_dbregs(); 558 } 559 pcb->pcb_flags &= ~PCB_DBREGS; 560 } 561 562 /* 563 * Arrange to trap the next fpu or `fwait' instruction (see fpu.c 564 * for why fwait must be trapped at least if there is an fpu or an 565 * emulator). This is mainly to handle the case where npx0 is not 566 * configured, since the fpu routines normally set up the trap 567 * otherwise. It should be done only at boot time, but doing it 568 * here allows modifying `fpu_exists' for testing the emulator on 569 * systems with an fpu. 570 */ 571 load_cr0(rcr0() | CR0_MP | CR0_TS); 572 573 /* Initialize the fpu (if any) for the current process. */ 574 /* 575 * XXX the above load_cr0() also initializes it and is a layering 576 * violation. It drops the fpu state partially 577 * and this would be fatal if we were interrupted now, and decided 578 * to force the state to the pcb, and checked the invariant 579 * (CR0_TS clear) if and only if PCPU_GET(fpcurthread) != NULL). 580 * ALL of this can happen except the check. The check used to 581 * happen and be fatal later when we didn't complete the drop 582 * before returning to user mode. This should be fixed properly 583 * soon. 584 */ 585 fpstate_drop(td); 586} 587 588void 589cpu_setregs(void) 590{ 591 register_t cr0; 592 593 cr0 = rcr0(); 594 cr0 |= CR0_NE; /* Done by fpuinit() */ 595 cr0 |= CR0_MP | CR0_TS; /* Done at every execve() too. */ 596 cr0 |= CR0_WP | CR0_AM; 597 load_cr0(cr0); 598} 599 600static int 601sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS) 602{ 603 int error; 604 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 605 req); 606 if (!error && req->newptr) 607 resettodr(); 608 return (error); 609} 610 611SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 612 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 613 614SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, 615 CTLFLAG_RW, &disable_rtc_set, 0, ""); 616 617SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, 618 CTLFLAG_RW, &wall_cmos_clock, 0, ""); 619 620/* 621 * Initialize 386 and configure to run kernel 622 */ 623 624/* 625 * Initialize segments & interrupt table 626 */ 627 628struct user_segment_descriptor gdt[NGDT * MAXCPU];/* global descriptor table */ 629static struct gate_descriptor idt0[NIDT]; 630struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 631 632static char dblfault_stack[PAGE_SIZE] __aligned(16); 633 634struct amd64tss common_tss[MAXCPU]; 635 636/* software prototypes -- in more palatable form */ 637struct soft_segment_descriptor gdt_segs[] = { 638/* GNULL_SEL 0 Null Descriptor */ 639{ 0x0, /* segment base address */ 640 0x0, /* length */ 641 0, /* segment type */ 642 0, /* segment descriptor priority level */ 643 0, /* segment descriptor present */ 644 0, /* long */ 645 0, /* default 32 vs 16 bit size */ 646 0 /* limit granularity (byte/page units)*/ }, 647/* GCODE_SEL 1 Code Descriptor for kernel */ 648{ 0x0, /* segment base address */ 649 0xfffff, /* length - all address space */ 650 SDT_MEMERA, /* segment type */ 651 SEL_KPL, /* segment descriptor priority level */ 652 1, /* segment descriptor present */ 653 1, /* long */ 654 0, /* default 32 vs 16 bit size */ 655 1 /* limit granularity (byte/page units)*/ }, 656/* GDATA_SEL 2 Data Descriptor for kernel */ 657{ 0x0, /* segment base address */ 658 0xfffff, /* length - all address space */ 659 SDT_MEMRWA, /* segment type */ 660 SEL_KPL, /* segment descriptor priority level */ 661 1, /* segment descriptor present */ 662 1, /* long */ 663 0, /* default 32 vs 16 bit size */ 664 1 /* limit granularity (byte/page units)*/ }, 665/* GUCODE32_SEL 3 32 bit Code Descriptor for user */ 666{ 0x0, /* segment base address */ 667 0xfffff, /* length - all address space */ 668 SDT_MEMERA, /* segment type */ 669 SEL_UPL, /* segment descriptor priority level */ 670 1, /* segment descriptor present */ 671 0, /* long */ 672 1, /* default 32 vs 16 bit size */ 673 1 /* limit granularity (byte/page units)*/ }, 674/* GUDATA_SEL 4 32/64 bit Data Descriptor for user */ 675{ 0x0, /* segment base address */ 676 0xfffff, /* length - all address space */ 677 SDT_MEMRWA, /* segment type */ 678 SEL_UPL, /* segment descriptor priority level */ 679 1, /* segment descriptor present */ 680 0, /* long */ 681 1, /* default 32 vs 16 bit size */ 682 1 /* limit granularity (byte/page units)*/ }, 683/* GUCODE_SEL 5 64 bit Code Descriptor for user */ 684{ 0x0, /* segment base address */ 685 0xfffff, /* length - all address space */ 686 SDT_MEMERA, /* segment type */ 687 SEL_UPL, /* segment descriptor priority level */ 688 1, /* segment descriptor present */ 689 1, /* long */ 690 0, /* default 32 vs 16 bit size */ 691 1 /* limit granularity (byte/page units)*/ }, 692/* GPROC0_SEL 6 Proc 0 Tss Descriptor */ 693{ 694 0x0, /* segment base address */ 695 sizeof(struct amd64tss)-1,/* length - all address space */ 696 SDT_SYSTSS, /* segment type */ 697 SEL_KPL, /* segment descriptor priority level */ 698 1, /* segment descriptor present */ 699 0, /* long */ 700 0, /* unused - default 32 vs 16 bit size */ 701 0 /* limit granularity (byte/page units)*/ }, 702/* Actually, the TSS is a system descriptor which is double size */ 703{ 0x0, /* segment base address */ 704 0x0, /* length */ 705 0, /* segment type */ 706 0, /* segment descriptor priority level */ 707 0, /* segment descriptor present */ 708 0, /* long */ 709 0, /* default 32 vs 16 bit size */ 710 0 /* limit granularity (byte/page units)*/ }, 711}; 712 713void 714setidt(idx, func, typ, dpl, ist) 715 int idx; 716 inthand_t *func; 717 int typ; 718 int dpl; 719 int ist; 720{ 721 struct gate_descriptor *ip; 722 723 ip = idt + idx; 724 ip->gd_looffset = (uintptr_t)func; 725 ip->gd_selector = GSEL(GCODE_SEL, SEL_KPL); 726 ip->gd_ist = ist; 727 ip->gd_xx = 0; 728 ip->gd_type = typ; 729 ip->gd_dpl = dpl; 730 ip->gd_p = 1; 731 ip->gd_hioffset = ((uintptr_t)func)>>16 ; 732} 733 734#define IDTVEC(name) __CONCAT(X,name) 735 736extern inthand_t 737 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 738 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 739 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 740 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 741 IDTVEC(xmm), IDTVEC(dblfault), 742 IDTVEC(fast_syscall), IDTVEC(fast_syscall32); 743 744void 745sdtossd(sd, ssd) 746 struct user_segment_descriptor *sd; 747 struct soft_segment_descriptor *ssd; 748{ 749 750 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 751 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 752 ssd->ssd_type = sd->sd_type; 753 ssd->ssd_dpl = sd->sd_dpl; 754 ssd->ssd_p = sd->sd_p; 755 ssd->ssd_long = sd->sd_long; 756 ssd->ssd_def32 = sd->sd_def32; 757 ssd->ssd_gran = sd->sd_gran; 758} 759 760void 761ssdtosd(ssd, sd) 762 struct soft_segment_descriptor *ssd; 763 struct user_segment_descriptor *sd; 764{ 765 766 sd->sd_lobase = (ssd->ssd_base) & 0xffffff; 767 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xff; 768 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff; 769 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf; 770 sd->sd_type = ssd->ssd_type; 771 sd->sd_dpl = ssd->ssd_dpl; 772 sd->sd_p = ssd->ssd_p; 773 sd->sd_long = ssd->ssd_long; 774 sd->sd_def32 = ssd->ssd_def32; 775 sd->sd_gran = ssd->ssd_gran; 776} 777 778void 779ssdtosyssd(ssd, sd) 780 struct soft_segment_descriptor *ssd; 781 struct system_segment_descriptor *sd; 782{ 783 784 sd->sd_lobase = (ssd->ssd_base) & 0xffffff; 785 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xfffffffffful; 786 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff; 787 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf; 788 sd->sd_type = ssd->ssd_type; 789 sd->sd_dpl = ssd->ssd_dpl; 790 sd->sd_p = ssd->ssd_p; 791 sd->sd_gran = ssd->ssd_gran; 792} 793 794#if !defined(DEV_ATPIC) && defined(DEV_ISA) 795#include <isa/isavar.h> 796u_int 797isa_irq_pending(void) 798{ 799 800 return (0); 801} 802#endif 803 804#define PHYSMAP_SIZE (2 * 8) 805 806struct bios_smap { 807 u_int64_t base; 808 u_int64_t length; 809 u_int32_t type; 810} __packed; 811 812u_int basemem; 813 814/* 815 * Populate the (physmap) array with base/bound pairs describing the 816 * available physical memory in the system, then test this memory and 817 * build the phys_avail array describing the actually-available memory. 818 * 819 * If we cannot accurately determine the physical memory map, then use 820 * value from the 0xE801 call, and failing that, the RTC. 821 * 822 * Total memory size may be set by the kernel environment variable 823 * hw.physmem or the compile-time define MAXMEM. 824 * 825 * XXX first should be vm_paddr_t. 826 */ 827static void 828getmemsize(caddr_t kmdp, u_int64_t first) 829{ 830 int i, physmap_idx, pa_indx; 831 vm_paddr_t pa, physmap[PHYSMAP_SIZE]; 832 pt_entry_t *pte; 833 char *cp; 834 struct bios_smap *smapbase, *smap, *smapend; 835 u_int32_t smapsize; 836 837 bzero(physmap, sizeof(physmap)); 838 basemem = 0; 839 physmap_idx = 0; 840 841 /* 842 * get memory map from INT 15:E820, kindly supplied by the loader. 843 * 844 * subr_module.c says: 845 * "Consumer may safely assume that size value precedes data." 846 * ie: an int32_t immediately precedes smap. 847 */ 848 smapbase = (struct bios_smap *)preload_search_info(kmdp, 849 MODINFO_METADATA | MODINFOMD_SMAP); 850 if (smapbase == NULL) 851 panic("No BIOS smap info from loader!"); 852 853 smapsize = *((u_int32_t *)smapbase - 1); 854 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize); 855 856 for (smap = smapbase; smap < smapend; smap++) { 857 if (boothowto & RB_VERBOSE) 858 printf("SMAP type=%02x base=%016lx len=%016lx\n", 859 smap->type, smap->base, smap->length); 860 861 if (smap->type != 0x01) 862 continue; 863 864 if (smap->length == 0) 865 continue; 866 867 for (i = 0; i <= physmap_idx; i += 2) { 868 if (smap->base < physmap[i + 1]) { 869 if (boothowto & RB_VERBOSE) 870 printf( 871 "Overlapping or non-montonic memory region, ignoring second region\n"); 872 goto next_run; 873 } 874 } 875 876 if (smap->base == physmap[physmap_idx + 1]) { 877 physmap[physmap_idx + 1] += smap->length; 878next_run: 879 continue; 880 } 881 882 physmap_idx += 2; 883 if (physmap_idx == PHYSMAP_SIZE) { 884 printf( 885 "Too many segments in the physical address map, giving up\n"); 886 break; 887 } 888 physmap[physmap_idx] = smap->base; 889 physmap[physmap_idx + 1] = smap->base + smap->length; 890 } 891 892 /* 893 * Find the 'base memory' segment for SMP 894 */ 895 basemem = 0; 896 for (i = 0; i <= physmap_idx; i += 2) { 897 if (physmap[i] == 0x00000000) { 898 basemem = physmap[i + 1] / 1024; 899 break; 900 } 901 } 902 if (basemem == 0) 903 panic("BIOS smap did not include a basemem segment!"); 904 905#ifdef SMP 906 /* make hole for AP bootstrap code */ 907 physmap[1] = mp_bootaddress(physmap[1] / 1024); 908#endif 909 910 /* 911 * Maxmem isn't the "maximum memory", it's one larger than the 912 * highest page of the physical address space. It should be 913 * called something like "Maxphyspage". We may adjust this 914 * based on ``hw.physmem'' and the results of the memory test. 915 */ 916 Maxmem = atop(physmap[physmap_idx + 1]); 917 918#ifdef MAXMEM 919 Maxmem = MAXMEM / 4; 920#endif 921 922 /* 923 * hw.physmem is a size in bytes; we also allow k, m, and g suffixes 924 * for the appropriate modifiers. This overrides MAXMEM. 925 */ 926 cp = getenv("hw.physmem"); 927 if (cp != NULL) { 928 u_int64_t AllowMem, sanity; 929 char *ep; 930 931 sanity = AllowMem = strtouq(cp, &ep, 0); 932 if ((ep != cp) && (*ep != 0)) { 933 switch(*ep) { 934 case 'g': 935 case 'G': 936 AllowMem <<= 10; 937 case 'm': 938 case 'M': 939 AllowMem <<= 10; 940 case 'k': 941 case 'K': 942 AllowMem <<= 10; 943 break; 944 default: 945 AllowMem = sanity = 0; 946 } 947 if (AllowMem < sanity) 948 AllowMem = 0; 949 } 950 if (AllowMem == 0) 951 printf("Ignoring invalid memory size of '%s'\n", cp); 952 else 953 Maxmem = atop(AllowMem); 954 freeenv(cp); 955 } 956 957 if (atop(physmap[physmap_idx + 1]) != Maxmem && 958 (boothowto & RB_VERBOSE)) 959 printf("Physical memory use set to %ldK\n", Maxmem * 4); 960 961 /* 962 * If Maxmem has been increased beyond what the system has detected, 963 * extend the last memory segment to the new limit. 964 */ 965 if (atop(physmap[physmap_idx + 1]) < Maxmem) 966 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem); 967 968 /* call pmap initialization to make new kernel address space */ 969 pmap_bootstrap(&first); 970 971 /* 972 * Size up each available chunk of physical memory. 973 */ 974 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 975 pa_indx = 0; 976 phys_avail[pa_indx++] = physmap[0]; 977 phys_avail[pa_indx] = physmap[0]; 978 pte = CMAP1; 979 980 /* 981 * physmap is in bytes, so when converting to page boundaries, 982 * round up the start address and round down the end address. 983 */ 984 for (i = 0; i <= physmap_idx; i += 2) { 985 vm_paddr_t end; 986 987 end = ptoa((vm_paddr_t)Maxmem); 988 if (physmap[i + 1] < end) 989 end = trunc_page(physmap[i + 1]); 990 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 991 int tmp, page_bad; 992 int *ptr = (int *)CADDR1; 993 994 /* 995 * block out kernel memory as not available. 996 */ 997 if (pa >= 0x100000 && pa < first) 998 continue; 999 1000 page_bad = FALSE; 1001 1002 /* 1003 * map page into kernel: valid, read/write,non-cacheable 1004 */ 1005 *pte = pa | PG_V | PG_RW | PG_N; 1006 invltlb(); 1007 1008 tmp = *(int *)ptr; 1009 /* 1010 * Test for alternating 1's and 0's 1011 */ 1012 *(volatile int *)ptr = 0xaaaaaaaa; 1013 if (*(volatile int *)ptr != 0xaaaaaaaa) 1014 page_bad = TRUE; 1015 /* 1016 * Test for alternating 0's and 1's 1017 */ 1018 *(volatile int *)ptr = 0x55555555; 1019 if (*(volatile int *)ptr != 0x55555555) 1020 page_bad = TRUE; 1021 /* 1022 * Test for all 1's 1023 */ 1024 *(volatile int *)ptr = 0xffffffff; 1025 if (*(volatile int *)ptr != 0xffffffff) 1026 page_bad = TRUE; 1027 /* 1028 * Test for all 0's 1029 */ 1030 *(volatile int *)ptr = 0x0; 1031 if (*(volatile int *)ptr != 0x0) 1032 page_bad = TRUE; 1033 /* 1034 * Restore original value. 1035 */ 1036 *(int *)ptr = tmp; 1037 1038 /* 1039 * Adjust array of valid/good pages. 1040 */ 1041 if (page_bad == TRUE) 1042 continue; 1043 /* 1044 * If this good page is a continuation of the 1045 * previous set of good pages, then just increase 1046 * the end pointer. Otherwise start a new chunk. 1047 * Note that "end" points one higher than end, 1048 * making the range >= start and < end. 1049 * If we're also doing a speculative memory 1050 * test and we at or past the end, bump up Maxmem 1051 * so that we keep going. The first bad page 1052 * will terminate the loop. 1053 */ 1054 if (phys_avail[pa_indx] == pa) { 1055 phys_avail[pa_indx] += PAGE_SIZE; 1056 } else { 1057 pa_indx++; 1058 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1059 printf( 1060 "Too many holes in the physical address space, giving up\n"); 1061 pa_indx--; 1062 break; 1063 } 1064 phys_avail[pa_indx++] = pa; /* start */ 1065 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1066 } 1067 physmem++; 1068 } 1069 } 1070 *pte = 0; 1071 invltlb(); 1072 1073 /* 1074 * XXX 1075 * The last chunk must contain at least one page plus the message 1076 * buffer to avoid complicating other code (message buffer address 1077 * calculation, etc.). 1078 */ 1079 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1080 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { 1081 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1082 phys_avail[pa_indx--] = 0; 1083 phys_avail[pa_indx--] = 0; 1084 } 1085 1086 Maxmem = atop(phys_avail[pa_indx]); 1087 1088 /* Trim off space for the message buffer. */ 1089 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); 1090 1091 avail_end = phys_avail[pa_indx]; 1092} 1093 1094u_int64_t 1095hammer_time(u_int64_t modulep, u_int64_t physfree) 1096{ 1097 caddr_t kmdp; 1098 int gsel_tss, off, x; 1099 struct pcpu *pc; 1100 u_int64_t msr; 1101 char *env; 1102 1103#ifdef DEV_ISA 1104 /* Preemptively mask the atpics and leave them shut down */ 1105 outb(IO_ICU1 + ICU_IMR_OFFSET, 0xff); 1106 outb(IO_ICU2 + ICU_IMR_OFFSET, 0xff); 1107#else 1108#error "have you forgotten the isa device?"; 1109#endif 1110 1111#if 0 /* Not till we test the features bit */ 1112 /* Turn on PTE NX (no execute) bit */ 1113 msr = rdmsr(MSR_EFER) | EFER_NXE; 1114 wrmsr(MSR_EFER, msr); 1115#endif 1116 1117 proc0.p_uarea = (struct user *)(physfree + KERNBASE); 1118 bzero(proc0.p_uarea, UAREA_PAGES * PAGE_SIZE); 1119 physfree += UAREA_PAGES * PAGE_SIZE; 1120 thread0.td_kstack = physfree + KERNBASE; 1121 bzero((void *)thread0.td_kstack, KSTACK_PAGES * PAGE_SIZE); 1122 physfree += KSTACK_PAGES * PAGE_SIZE; 1123 thread0.td_pcb = (struct pcb *) 1124 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; 1125 1126 atdevbase = ISA_HOLE_START + KERNBASE; 1127 1128 /* 1129 * This may be done better later if it gets more high level 1130 * components in it. If so just link td->td_proc here. 1131 */ 1132 proc_linkup(&proc0, &ksegrp0, &kse0, &thread0); 1133 1134 preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE); 1135 preload_bootstrap_relocate(KERNBASE); 1136 kmdp = preload_search_by_type("elf kernel"); 1137 if (kmdp == NULL) 1138 kmdp = preload_search_by_type("elf64 kernel"); 1139 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); 1140 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *) + KERNBASE; 1141#ifdef DDB 1142 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, void *); 1143 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, void *); 1144#endif 1145 1146 /* Init basic tunables, hz etc */ 1147 init_param1(); 1148 1149 /* 1150 * make gdt memory segments 1151 */ 1152 gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&common_tss[0]; 1153 1154 for (x = 0; x < NGDT; x++) { 1155 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1)) 1156 ssdtosd(&gdt_segs[x], &gdt[x]); 1157 } 1158 ssdtosyssd(&gdt_segs[GPROC0_SEL], 1159 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]); 1160 1161 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1162 r_gdt.rd_base = (long) gdt; 1163 lgdt(&r_gdt); 1164 pc = &__pcpu[0]; 1165 1166 wrmsr(MSR_FSBASE, 0); /* User value */ 1167 wrmsr(MSR_GSBASE, (u_int64_t)pc); 1168 wrmsr(MSR_KGSBASE, 0); /* User value while in the kernel */ 1169 1170 pcpu_init(pc, 0, sizeof(struct pcpu)); 1171 PCPU_SET(prvspace, pc); 1172 PCPU_SET(curthread, &thread0); 1173 PCPU_SET(curpcb, thread0.td_pcb); 1174 PCPU_SET(tssp, &common_tss[0]); 1175 1176 /* 1177 * Initialize mutexes. 1178 * 1179 * icu_lock: in order to allow an interrupt to occur in a critical 1180 * section, to set pcpu->ipending (etc...) properly, we 1181 * must be able to get the icu lock, so it can't be 1182 * under witness. 1183 */ 1184 mutex_init(); 1185 mtx_init(&clock_lock, "clk", NULL, MTX_SPIN); 1186 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS); 1187 1188 /* exceptions */ 1189 for (x = 0; x < NIDT; x++) 1190 setidt(x, &IDTVEC(rsvd), SDT_SYSIGT, SEL_KPL, 0); 1191 setidt(IDT_DE, &IDTVEC(div), SDT_SYSIGT, SEL_KPL, 0); 1192 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYSIGT, SEL_KPL, 0); 1193 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYSIGT, SEL_KPL, 0); 1194 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYSIGT, SEL_UPL, 0); 1195 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYSIGT, SEL_KPL, 0); 1196 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYSIGT, SEL_KPL, 0); 1197 setidt(IDT_UD, &IDTVEC(ill), SDT_SYSIGT, SEL_KPL, 0); 1198 setidt(IDT_NM, &IDTVEC(dna), SDT_SYSIGT, SEL_KPL, 0); 1199 setidt(IDT_DF, &IDTVEC(dblfault), SDT_SYSIGT, SEL_KPL, 1); 1200 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYSIGT, SEL_KPL, 0); 1201 setidt(IDT_TS, &IDTVEC(tss), SDT_SYSIGT, SEL_KPL, 0); 1202 setidt(IDT_NP, &IDTVEC(missing), SDT_SYSIGT, SEL_KPL, 0); 1203 setidt(IDT_SS, &IDTVEC(stk), SDT_SYSIGT, SEL_KPL, 0); 1204 setidt(IDT_GP, &IDTVEC(prot), SDT_SYSIGT, SEL_KPL, 0); 1205 setidt(IDT_PF, &IDTVEC(page), SDT_SYSIGT, SEL_KPL, 0); 1206 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYSIGT, SEL_KPL, 0); 1207 setidt(IDT_AC, &IDTVEC(align), SDT_SYSIGT, SEL_KPL, 0); 1208 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYSIGT, SEL_KPL, 0); 1209 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYSIGT, SEL_KPL, 0); 1210 1211 r_idt.rd_limit = sizeof(idt0) - 1; 1212 r_idt.rd_base = (long) idt; 1213 lidt(&r_idt); 1214 1215 /* 1216 * Initialize the console before we print anything out. 1217 */ 1218 cninit(); 1219 1220#ifdef DEV_ATPIC 1221 atpic_startup(); 1222#endif 1223 1224#ifdef DDB 1225 kdb_init(); 1226 if (boothowto & RB_KDB) 1227 Debugger("Boot flags requested debugger"); 1228#endif 1229 1230 identify_cpu(); /* Final stage of CPU initialization */ 1231 initializecpu(); /* Initialize CPU registers */ 1232 1233 /* make an initial tss so cpu can get interrupt stack on syscall! */ 1234 common_tss[0].tss_rsp0 = thread0.td_kstack + \ 1235 KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb); 1236 /* Ensure the stack is aligned to 16 bytes */ 1237 common_tss[0].tss_rsp0 &= ~0xFul; 1238 PCPU_SET(rsp0, common_tss[0].tss_rsp0); 1239 1240 /* doublefault stack space, runs on ist1 */ 1241 common_tss[0].tss_ist1 = (long)&dblfault_stack[sizeof(dblfault_stack)]; 1242 1243 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1244 ltr(gsel_tss); 1245 1246 /* Set up the fast syscall stuff */ 1247 msr = rdmsr(MSR_EFER) | EFER_SCE; 1248 wrmsr(MSR_EFER, msr); 1249 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall)); 1250 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32)); 1251 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) | 1252 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48); 1253 wrmsr(MSR_STAR, msr); 1254 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D); 1255 1256 getmemsize(kmdp, physfree); 1257 init_param2(physmem); 1258 1259 /* now running on new page tables, configured,and u/iom is accessible */ 1260 1261 /* Map the message buffer. */ 1262 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) 1263 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off); 1264 1265 msgbufinit(msgbufp, MSGBUF_SIZE); 1266 fpuinit(); 1267 1268 /* transfer to user mode */ 1269 1270 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL); 1271 _udatasel = GSEL(GUDATA_SEL, SEL_UPL); 1272 _ucode32sel = GSEL(GUCODE32_SEL, SEL_UPL); 1273 1274 /* setup proc 0's pcb */ 1275 thread0.td_pcb->pcb_flags = 0; /* XXXKSE */ 1276 thread0.td_pcb->pcb_cr3 = KPML4phys; 1277 thread0.td_frame = &proc0_tf; 1278 1279 env = getenv("kernelname"); 1280 if (env != NULL) 1281 strlcpy(kernelname, env, sizeof(kernelname)); 1282 1283 /* Location of kernel stack for locore */ 1284 return ((u_int64_t)thread0.td_pcb); 1285} 1286 1287void 1288cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) 1289{ 1290 1291 pcpu->pc_acpi_id = 0xffffffff; 1292} 1293 1294int 1295ptrace_set_pc(struct thread *td, unsigned long addr) 1296{ 1297 td->td_frame->tf_rip = addr; 1298 return (0); 1299} 1300 1301int 1302ptrace_single_step(struct thread *td) 1303{ 1304 td->td_frame->tf_rflags |= PSL_T; 1305 return (0); 1306} 1307 1308int 1309fill_regs(struct thread *td, struct reg *regs) 1310{ 1311 struct pcb *pcb; 1312 struct trapframe *tp; 1313 1314 tp = td->td_frame; 1315 regs->r_r15 = tp->tf_r15; 1316 regs->r_r14 = tp->tf_r14; 1317 regs->r_r13 = tp->tf_r13; 1318 regs->r_r12 = tp->tf_r12; 1319 regs->r_r11 = tp->tf_r11; 1320 regs->r_r10 = tp->tf_r10; 1321 regs->r_r9 = tp->tf_r9; 1322 regs->r_r8 = tp->tf_r8; 1323 regs->r_rdi = tp->tf_rdi; 1324 regs->r_rsi = tp->tf_rsi; 1325 regs->r_rbp = tp->tf_rbp; 1326 regs->r_rbx = tp->tf_rbx; 1327 regs->r_rdx = tp->tf_rdx; 1328 regs->r_rcx = tp->tf_rcx; 1329 regs->r_rax = tp->tf_rax; 1330 regs->r_rip = tp->tf_rip; 1331 regs->r_cs = tp->tf_cs; 1332 regs->r_rflags = tp->tf_rflags; 1333 regs->r_rsp = tp->tf_rsp; 1334 regs->r_ss = tp->tf_ss; 1335 pcb = td->td_pcb; 1336 return (0); 1337} 1338 1339int 1340set_regs(struct thread *td, struct reg *regs) 1341{ 1342 struct pcb *pcb; 1343 struct trapframe *tp; 1344 1345 tp = td->td_frame; 1346 if (!EFL_SECURE(regs->r_rflags, tp->tf_rflags) || 1347 !CS_SECURE(regs->r_cs)) 1348 return (EINVAL); 1349 tp->tf_r15 = regs->r_r15; 1350 tp->tf_r14 = regs->r_r14; 1351 tp->tf_r13 = regs->r_r13; 1352 tp->tf_r12 = regs->r_r12; 1353 tp->tf_r11 = regs->r_r11; 1354 tp->tf_r10 = regs->r_r10; 1355 tp->tf_r9 = regs->r_r9; 1356 tp->tf_r8 = regs->r_r8; 1357 tp->tf_rdi = regs->r_rdi; 1358 tp->tf_rsi = regs->r_rsi; 1359 tp->tf_rbp = regs->r_rbp; 1360 tp->tf_rbx = regs->r_rbx; 1361 tp->tf_rdx = regs->r_rdx; 1362 tp->tf_rcx = regs->r_rcx; 1363 tp->tf_rax = regs->r_rax; 1364 tp->tf_rip = regs->r_rip; 1365 tp->tf_cs = regs->r_cs; 1366 tp->tf_rflags = regs->r_rflags; 1367 tp->tf_rsp = regs->r_rsp; 1368 tp->tf_ss = regs->r_ss; 1369 pcb = td->td_pcb; 1370 return (0); 1371} 1372 1373/* XXX check all this stuff! */ 1374/* externalize from sv_xmm */ 1375static void 1376fill_fpregs_xmm(struct savefpu *sv_xmm, struct fpreg *fpregs) 1377{ 1378 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env; 1379 struct envxmm *penv_xmm = &sv_xmm->sv_env; 1380 int i; 1381 1382 /* pcb -> fpregs */ 1383 bzero(fpregs, sizeof(*fpregs)); 1384 1385 /* FPU control/status */ 1386 penv_fpreg->en_cw = penv_xmm->en_cw; 1387 penv_fpreg->en_sw = penv_xmm->en_sw; 1388 penv_fpreg->en_tw = penv_xmm->en_tw; 1389 penv_fpreg->en_opcode = penv_xmm->en_opcode; 1390 penv_fpreg->en_rip = penv_xmm->en_rip; 1391 penv_fpreg->en_rdp = penv_xmm->en_rdp; 1392 penv_fpreg->en_mxcsr = penv_xmm->en_mxcsr; 1393 penv_fpreg->en_mxcsr_mask = penv_xmm->en_mxcsr_mask; 1394 1395 /* FPU registers */ 1396 for (i = 0; i < 8; ++i) 1397 bcopy(sv_xmm->sv_fp[i].fp_acc.fp_bytes, fpregs->fpr_acc[i], 10); 1398 1399 /* SSE registers */ 1400 for (i = 0; i < 16; ++i) 1401 bcopy(sv_xmm->sv_xmm[i].xmm_bytes, fpregs->fpr_xacc[i], 16); 1402} 1403 1404/* internalize from fpregs into sv_xmm */ 1405static void 1406set_fpregs_xmm(struct fpreg *fpregs, struct savefpu *sv_xmm) 1407{ 1408 struct envxmm *penv_xmm = &sv_xmm->sv_env; 1409 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env; 1410 int i; 1411 1412 /* fpregs -> pcb */ 1413 /* FPU control/status */ 1414 penv_xmm->en_cw = penv_fpreg->en_cw; 1415 penv_xmm->en_sw = penv_fpreg->en_sw; 1416 penv_xmm->en_tw = penv_fpreg->en_tw; 1417 penv_xmm->en_opcode = penv_fpreg->en_opcode; 1418 penv_xmm->en_rip = penv_fpreg->en_rip; 1419 penv_xmm->en_rdp = penv_fpreg->en_rdp; 1420 penv_xmm->en_mxcsr = penv_fpreg->en_mxcsr; 1421 penv_xmm->en_mxcsr_mask = penv_fpreg->en_mxcsr_mask; 1422 1423 /* FPU registers */ 1424 for (i = 0; i < 8; ++i) 1425 bcopy(fpregs->fpr_acc[i], sv_xmm->sv_fp[i].fp_acc.fp_bytes, 10); 1426 1427 /* SSE registers */ 1428 for (i = 0; i < 16; ++i) 1429 bcopy(fpregs->fpr_xacc[i], sv_xmm->sv_xmm[i].xmm_bytes, 16); 1430} 1431 1432/* externalize from td->pcb */ 1433int 1434fill_fpregs(struct thread *td, struct fpreg *fpregs) 1435{ 1436 1437 fill_fpregs_xmm(&td->td_pcb->pcb_save, fpregs); 1438 return (0); 1439} 1440 1441/* internalize to td->pcb */ 1442int 1443set_fpregs(struct thread *td, struct fpreg *fpregs) 1444{ 1445 1446 set_fpregs_xmm(fpregs, &td->td_pcb->pcb_save); 1447 return (0); 1448} 1449 1450/* 1451 * Get machine context. 1452 */ 1453int 1454get_mcontext(struct thread *td, mcontext_t *mcp, int flags) 1455{ 1456 struct trapframe *tp; 1457 1458 tp = td->td_frame; 1459 PROC_LOCK(curthread->td_proc); 1460 mcp->mc_onstack = sigonstack(tp->tf_rsp); 1461 PROC_UNLOCK(curthread->td_proc); 1462 mcp->mc_r15 = tp->tf_r15; 1463 mcp->mc_r14 = tp->tf_r14; 1464 mcp->mc_r13 = tp->tf_r13; 1465 mcp->mc_r12 = tp->tf_r12; 1466 mcp->mc_r11 = tp->tf_r11; 1467 mcp->mc_r10 = tp->tf_r10; 1468 mcp->mc_r9 = tp->tf_r9; 1469 mcp->mc_r8 = tp->tf_r8; 1470 mcp->mc_rdi = tp->tf_rdi; 1471 mcp->mc_rsi = tp->tf_rsi; 1472 mcp->mc_rbp = tp->tf_rbp; 1473 mcp->mc_rbx = tp->tf_rbx; 1474 mcp->mc_rcx = tp->tf_rcx; 1475 if (flags & GET_MC_CLEAR_RET) { 1476 mcp->mc_rax = 0; 1477 mcp->mc_rdx = 0; 1478 } else { 1479 mcp->mc_rax = tp->tf_rax; 1480 mcp->mc_rdx = tp->tf_rdx; 1481 } 1482 mcp->mc_rip = tp->tf_rip; 1483 mcp->mc_cs = tp->tf_cs; 1484 mcp->mc_rflags = tp->tf_rflags; 1485 mcp->mc_rsp = tp->tf_rsp; 1486 mcp->mc_ss = tp->tf_ss; 1487 mcp->mc_len = sizeof(*mcp); 1488 get_fpcontext(td, mcp); 1489 return (0); 1490} 1491 1492/* 1493 * Set machine context. 1494 * 1495 * However, we don't set any but the user modifiable flags, and we won't 1496 * touch the cs selector. 1497 */ 1498int 1499set_mcontext(struct thread *td, const mcontext_t *mcp) 1500{ 1501 struct trapframe *tp; 1502 long rflags; 1503 int ret; 1504 1505 tp = td->td_frame; 1506 if (mcp->mc_len != sizeof(*mcp)) 1507 return (EINVAL); 1508 rflags = (mcp->mc_rflags & PSL_USERCHANGE) | 1509 (tp->tf_rflags & ~PSL_USERCHANGE); 1510 ret = set_fpcontext(td, mcp); 1511 if (ret != 0) 1512 return (ret); 1513 tp->tf_r15 = mcp->mc_r15; 1514 tp->tf_r14 = mcp->mc_r14; 1515 tp->tf_r13 = mcp->mc_r13; 1516 tp->tf_r12 = mcp->mc_r12; 1517 tp->tf_r11 = mcp->mc_r11; 1518 tp->tf_r10 = mcp->mc_r10; 1519 tp->tf_r9 = mcp->mc_r9; 1520 tp->tf_r8 = mcp->mc_r8; 1521 tp->tf_rdi = mcp->mc_rdi; 1522 tp->tf_rsi = mcp->mc_rsi; 1523 tp->tf_rbp = mcp->mc_rbp; 1524 tp->tf_rbx = mcp->mc_rbx; 1525 tp->tf_rdx = mcp->mc_rdx; 1526 tp->tf_rcx = mcp->mc_rcx; 1527 tp->tf_rax = mcp->mc_rax; 1528 tp->tf_rip = mcp->mc_rip; 1529 tp->tf_rflags = rflags; 1530 tp->tf_rsp = mcp->mc_rsp; 1531 tp->tf_ss = mcp->mc_ss; 1532 return (0); 1533} 1534 1535static void 1536get_fpcontext(struct thread *td, mcontext_t *mcp) 1537{ 1538 1539 mcp->mc_ownedfp = fpugetregs(td, (struct savefpu *)&mcp->mc_fpstate); 1540 mcp->mc_fpformat = fpuformat(); 1541} 1542 1543static int 1544set_fpcontext(struct thread *td, const mcontext_t *mcp) 1545{ 1546 1547 if (mcp->mc_fpformat == _MC_FPFMT_NODEV) 1548 return (0); 1549 else if (mcp->mc_fpformat != _MC_FPFMT_XMM) 1550 return (EINVAL); 1551 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) 1552 /* We don't care what state is left in the FPU or PCB. */ 1553 fpstate_drop(td); 1554 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU || 1555 mcp->mc_ownedfp == _MC_FPOWNED_PCB) { 1556 /* 1557 * XXX we violate the dubious requirement that fpusetregs() 1558 * be called with interrupts disabled. 1559 * XXX obsolete on trap-16 systems? 1560 */ 1561 fpusetregs(td, (struct savefpu *)&mcp->mc_fpstate); 1562 } else 1563 return (EINVAL); 1564 return (0); 1565} 1566 1567void 1568fpstate_drop(struct thread *td) 1569{ 1570 register_t s; 1571 1572 s = intr_disable(); 1573 if (PCPU_GET(fpcurthread) == td) 1574 fpudrop(); 1575 /* 1576 * XXX force a full drop of the fpu. The above only drops it if we 1577 * owned it. 1578 * 1579 * XXX I don't much like fpugetregs()'s semantics of doing a full 1580 * drop. Dropping only to the pcb matches fnsave's behaviour. 1581 * We only need to drop to !PCB_INITDONE in sendsig(). But 1582 * sendsig() is the only caller of fpugetregs()... perhaps we just 1583 * have too many layers. 1584 */ 1585 curthread->td_pcb->pcb_flags &= ~PCB_FPUINITDONE; 1586 intr_restore(s); 1587} 1588 1589int 1590fill_dbregs(struct thread *td, struct dbreg *dbregs) 1591{ 1592 struct pcb *pcb; 1593 1594 if (td == NULL) { 1595 dbregs->dr[0] = rdr0(); 1596 dbregs->dr[1] = rdr1(); 1597 dbregs->dr[2] = rdr2(); 1598 dbregs->dr[3] = rdr3(); 1599 dbregs->dr[6] = rdr6(); 1600 dbregs->dr[7] = rdr7(); 1601 } else { 1602 pcb = td->td_pcb; 1603 dbregs->dr[0] = pcb->pcb_dr0; 1604 dbregs->dr[1] = pcb->pcb_dr1; 1605 dbregs->dr[2] = pcb->pcb_dr2; 1606 dbregs->dr[3] = pcb->pcb_dr3; 1607 dbregs->dr[6] = pcb->pcb_dr6; 1608 dbregs->dr[7] = pcb->pcb_dr7; 1609 } 1610 dbregs->dr[4] = 0; 1611 dbregs->dr[5] = 0; 1612 dbregs->dr[8] = 0; 1613 dbregs->dr[9] = 0; 1614 dbregs->dr[10] = 0; 1615 dbregs->dr[11] = 0; 1616 dbregs->dr[12] = 0; 1617 dbregs->dr[13] = 0; 1618 dbregs->dr[14] = 0; 1619 dbregs->dr[15] = 0; 1620 return (0); 1621} 1622 1623int 1624set_dbregs(struct thread *td, struct dbreg *dbregs) 1625{ 1626 struct pcb *pcb; 1627 int i; 1628 u_int64_t mask1, mask2; 1629 1630 if (td == NULL) { 1631 load_dr0(dbregs->dr[0]); 1632 load_dr1(dbregs->dr[1]); 1633 load_dr2(dbregs->dr[2]); 1634 load_dr3(dbregs->dr[3]); 1635 load_dr6(dbregs->dr[6]); 1636 load_dr7(dbregs->dr[7]); 1637 } else { 1638 /* 1639 * Don't let an illegal value for dr7 get set. Specifically, 1640 * check for undefined settings. Setting these bit patterns 1641 * result in undefined behaviour and can lead to an unexpected 1642 * TRCTRAP or a general protection fault right here. 1643 */ 1644 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 8; 1645 i++, mask1 <<= 2, mask2 <<= 2) 1646 if ((dbregs->dr[7] & mask1) == mask2) 1647 return (EINVAL); 1648 1649 pcb = td->td_pcb; 1650 1651 /* 1652 * Don't let a process set a breakpoint that is not within the 1653 * process's address space. If a process could do this, it 1654 * could halt the system by setting a breakpoint in the kernel 1655 * (if ddb was enabled). Thus, we need to check to make sure 1656 * that no breakpoints are being enabled for addresses outside 1657 * process's address space, unless, perhaps, we were called by 1658 * uid 0. 1659 * 1660 * XXX - what about when the watched area of the user's 1661 * address space is written into from within the kernel 1662 * ... wouldn't that still cause a breakpoint to be generated 1663 * from within kernel mode? 1664 */ 1665 1666 if (suser(td) != 0) { 1667 if (dbregs->dr[7] & 0x3) { 1668 /* dr0 is enabled */ 1669 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS) 1670 return (EINVAL); 1671 } 1672 if (dbregs->dr[7] & 0x3<<2) { 1673 /* dr1 is enabled */ 1674 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS) 1675 return (EINVAL); 1676 } 1677 if (dbregs->dr[7] & 0x3<<4) { 1678 /* dr2 is enabled */ 1679 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS) 1680 return (EINVAL); 1681 } 1682 if (dbregs->dr[7] & 0x3<<6) { 1683 /* dr3 is enabled */ 1684 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS) 1685 return (EINVAL); 1686 } 1687 } 1688 1689 pcb->pcb_dr0 = dbregs->dr[0]; 1690 pcb->pcb_dr1 = dbregs->dr[1]; 1691 pcb->pcb_dr2 = dbregs->dr[2]; 1692 pcb->pcb_dr3 = dbregs->dr[3]; 1693 pcb->pcb_dr6 = dbregs->dr[6]; 1694 pcb->pcb_dr7 = dbregs->dr[7]; 1695 1696 pcb->pcb_flags |= PCB_DBREGS; 1697 } 1698 1699 return (0); 1700} 1701 1702void 1703reset_dbregs(void) 1704{ 1705 1706 load_dr7(0); /* Turn off the control bits first */ 1707 load_dr0(0); 1708 load_dr1(0); 1709 load_dr2(0); 1710 load_dr3(0); 1711 load_dr6(0); 1712} 1713 1714/* 1715 * Return > 0 if a hardware breakpoint has been hit, and the 1716 * breakpoint was in user space. Return 0, otherwise. 1717 */ 1718int 1719user_dbreg_trap(void) 1720{ 1721 u_int64_t dr7, dr6; /* debug registers dr6 and dr7 */ 1722 u_int64_t bp; /* breakpoint bits extracted from dr6 */ 1723 int nbp; /* number of breakpoints that triggered */ 1724 caddr_t addr[4]; /* breakpoint addresses */ 1725 int i; 1726 1727 dr7 = rdr7(); 1728 if ((dr7 & 0x000000ff) == 0) { 1729 /* 1730 * all GE and LE bits in the dr7 register are zero, 1731 * thus the trap couldn't have been caused by the 1732 * hardware debug registers 1733 */ 1734 return 0; 1735 } 1736 1737 nbp = 0; 1738 dr6 = rdr6(); 1739 bp = dr6 & 0x0000000f; 1740 1741 if (!bp) { 1742 /* 1743 * None of the breakpoint bits are set meaning this 1744 * trap was not caused by any of the debug registers 1745 */ 1746 return 0; 1747 } 1748 1749 /* 1750 * at least one of the breakpoints were hit, check to see 1751 * which ones and if any of them are user space addresses 1752 */ 1753 1754 if (bp & 0x01) { 1755 addr[nbp++] = (caddr_t)rdr0(); 1756 } 1757 if (bp & 0x02) { 1758 addr[nbp++] = (caddr_t)rdr1(); 1759 } 1760 if (bp & 0x04) { 1761 addr[nbp++] = (caddr_t)rdr2(); 1762 } 1763 if (bp & 0x08) { 1764 addr[nbp++] = (caddr_t)rdr3(); 1765 } 1766 1767 for (i=0; i<nbp; i++) { 1768 if (addr[i] < 1769 (caddr_t)VM_MAXUSER_ADDRESS) { 1770 /* 1771 * addr[i] is in user space 1772 */ 1773 return nbp; 1774 } 1775 } 1776 1777 /* 1778 * None of the breakpoints are in user space. 1779 */ 1780 return 0; 1781} 1782 1783#ifndef DDB 1784void 1785Debugger(const char *msg) 1786{ 1787 printf("Debugger(\"%s\") called.\n", msg); 1788} 1789#endif /* no DDB */ 1790 1791#ifdef DDB 1792 1793/* 1794 * Provide inb() and outb() as functions. They are normally only 1795 * available as macros calling inlined functions, thus cannot be 1796 * called inside DDB. 1797 * 1798 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 1799 */ 1800 1801#undef inb 1802#undef outb 1803 1804/* silence compiler warnings */ 1805u_char inb(u_int); 1806void outb(u_int, u_char); 1807 1808u_char 1809inb(u_int port) 1810{ 1811 u_char data; 1812 /* 1813 * We use %%dx and not %1 here because i/o is done at %dx and not at 1814 * %edx, while gcc generates inferior code (movw instead of movl) 1815 * if we tell it to load (u_short) port. 1816 */ 1817 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 1818 return (data); 1819} 1820 1821void 1822outb(u_int port, u_char data) 1823{ 1824 u_char al; 1825 /* 1826 * Use an unnecessary assignment to help gcc's register allocator. 1827 * This make a large difference for gcc-1.40 and a tiny difference 1828 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 1829 * best results. gcc-2.6.0 can't handle this. 1830 */ 1831 al = data; 1832 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 1833} 1834 1835#endif /* DDB */ 1836