machdep.c revision 133431
1/*- 2 * Copyright (c) 2003 Peter Wemm. 3 * Copyright (c) 1992 Terrence R. Lambert. 4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * William Jolitz. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 39 */ 40 41#include <sys/cdefs.h> 42__FBSDID("$FreeBSD: head/sys/amd64/amd64/machdep.c 133431 2004-08-10 12:15:27Z davidxu $"); 43 44#include "opt_atalk.h" 45#include "opt_atpic.h" 46#include "opt_compat.h" 47#include "opt_cpu.h" 48#include "opt_ddb.h" 49#include "opt_inet.h" 50#include "opt_ipx.h" 51#include "opt_isa.h" 52#include "opt_kstack_pages.h" 53#include "opt_maxmem.h" 54#include "opt_msgbuf.h" 55#include "opt_perfmon.h" 56 57#include <sys/param.h> 58#include <sys/systm.h> 59#include <sys/sysproto.h> 60#include <sys/signalvar.h> 61#include <sys/imgact.h> 62#include <sys/kdb.h> 63#include <sys/kernel.h> 64#include <sys/ktr.h> 65#include <sys/linker.h> 66#include <sys/lock.h> 67#include <sys/malloc.h> 68#include <sys/memrange.h> 69#include <sys/mutex.h> 70#include <sys/pcpu.h> 71#include <sys/proc.h> 72#include <sys/bio.h> 73#include <sys/buf.h> 74#include <sys/reboot.h> 75#include <sys/callout.h> 76#include <sys/msgbuf.h> 77#include <sys/sched.h> 78#include <sys/sysent.h> 79#include <sys/sysctl.h> 80#include <sys/ucontext.h> 81#include <sys/vmmeter.h> 82#include <sys/bus.h> 83#include <sys/eventhandler.h> 84 85#include <vm/vm.h> 86#include <vm/vm_param.h> 87#include <vm/vm_kern.h> 88#include <vm/vm_object.h> 89#include <vm/vm_page.h> 90#include <vm/vm_map.h> 91#include <vm/vm_pager.h> 92#include <vm/vm_extern.h> 93 94#include <sys/user.h> 95#include <sys/exec.h> 96#include <sys/cons.h> 97 98#include <ddb/ddb.h> 99 100#include <net/netisr.h> 101 102#include <machine/cpu.h> 103#include <machine/cputypes.h> 104#include <machine/reg.h> 105#include <machine/clock.h> 106#include <machine/specialreg.h> 107#include <machine/intr_machdep.h> 108#include <machine/md_var.h> 109#include <machine/metadata.h> 110#include <machine/proc.h> 111#ifdef PERFMON 112#include <machine/perfmon.h> 113#endif 114#include <machine/tss.h> 115#ifdef SMP 116#include <machine/smp.h> 117#endif 118 119#include <amd64/isa/icu.h> 120 121#include <isa/isareg.h> 122#include <isa/rtc.h> 123#include <sys/ptrace.h> 124#include <machine/sigframe.h> 125 126/* Sanity check for __curthread() */ 127CTASSERT(offsetof(struct pcpu, pc_curthread) == 0); 128 129extern u_int64_t hammer_time(u_int64_t, u_int64_t); 130extern void dblfault_handler(void); 131 132extern void printcpuinfo(void); /* XXX header file */ 133extern void identify_cpu(void); 134extern void panicifcpuunsupported(void); 135 136#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 137#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 138 139static void cpu_startup(void *); 140static void get_fpcontext(struct thread *td, mcontext_t *mcp); 141static int set_fpcontext(struct thread *td, const mcontext_t *mcp); 142SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) 143 144#ifdef DDB 145extern vm_offset_t ksym_start, ksym_end; 146#endif 147 148int _udatasel, _ucodesel, _ucode32sel; 149 150int cold = 1; 151 152long Maxmem = 0; 153 154vm_paddr_t phys_avail[20]; 155 156/* must be 2 less so 0 0 can signal end of chunks */ 157#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2) 158 159struct kva_md_info kmi; 160 161static struct trapframe proc0_tf; 162struct region_descriptor r_gdt, r_idt; 163 164struct pcpu __pcpu[MAXCPU]; 165 166struct mtx icu_lock; 167 168struct mem_range_softc mem_range_softc; 169 170static void 171cpu_startup(dummy) 172 void *dummy; 173{ 174 /* 175 * Good {morning,afternoon,evening,night}. 176 */ 177 startrtclock(); 178 printcpuinfo(); 179 panicifcpuunsupported(); 180#ifdef PERFMON 181 perfmon_init(); 182#endif 183 printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)Maxmem), 184 ptoa((uintmax_t)Maxmem) / 1048576); 185 /* 186 * Display any holes after the first chunk of extended memory. 187 */ 188 if (bootverbose) { 189 int indx; 190 191 printf("Physical memory chunk(s):\n"); 192 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 193 vm_paddr_t size; 194 195 size = phys_avail[indx + 1] - phys_avail[indx]; 196 printf( 197 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n", 198 (uintmax_t)phys_avail[indx], 199 (uintmax_t)phys_avail[indx + 1] - 1, 200 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE); 201 } 202 } 203 204 vm_ksubmap_init(&kmi); 205 206 printf("avail memory = %ju (%ju MB)\n", 207 ptoa((uintmax_t)cnt.v_free_count), 208 ptoa((uintmax_t)cnt.v_free_count) / 1048576); 209 210 /* 211 * Set up buffers, so they can be used to read disk labels. 212 */ 213 bufinit(); 214 vm_pager_bufferinit(); 215 216 cpu_setregs(); 217} 218 219/* 220 * Send an interrupt to process. 221 * 222 * Stack is set up to allow sigcode stored 223 * at top to call routine, followed by kcall 224 * to sigreturn routine below. After sigreturn 225 * resets the signal mask, the stack, and the 226 * frame pointer, it returns to the user 227 * specified pc, psl. 228 */ 229void 230sendsig(catcher, sig, mask, code) 231 sig_t catcher; 232 int sig; 233 sigset_t *mask; 234 u_long code; 235{ 236 struct sigframe sf, *sfp; 237 struct proc *p; 238 struct thread *td; 239 struct sigacts *psp; 240 char *sp; 241 struct trapframe *regs; 242 int oonstack; 243 244 td = curthread; 245 p = td->td_proc; 246 PROC_LOCK_ASSERT(p, MA_OWNED); 247 psp = p->p_sigacts; 248 mtx_assert(&psp->ps_mtx, MA_OWNED); 249 regs = td->td_frame; 250 oonstack = sigonstack(regs->tf_rsp); 251 252 /* Save user context. */ 253 bzero(&sf, sizeof(sf)); 254 sf.sf_uc.uc_sigmask = *mask; 255 sf.sf_uc.uc_stack = td->td_sigstk; 256 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) 257 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 258 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 259 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(*regs)); 260 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */ 261 get_fpcontext(td, &sf.sf_uc.uc_mcontext); 262 fpstate_drop(td); 263 264 /* Allocate space for the signal handler context. */ 265 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && 266 SIGISMEMBER(psp->ps_sigonstack, sig)) { 267 sp = td->td_sigstk.ss_sp + 268 td->td_sigstk.ss_size - sizeof(struct sigframe); 269#if defined(COMPAT_43) 270 td->td_sigstk.ss_flags |= SS_ONSTACK; 271#endif 272 } else 273 sp = (char *)regs->tf_rsp - sizeof(struct sigframe) - 128; 274 /* Align to 16 bytes. */ 275 sfp = (struct sigframe *)((unsigned long)sp & ~0xFul); 276 277 /* Translate the signal if appropriate. */ 278 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 279 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 280 281 /* Build the argument list for the signal handler. */ 282 regs->tf_rdi = sig; /* arg 1 in %rdi */ 283 regs->tf_rdx = (register_t)&sfp->sf_uc; /* arg 3 in %rdx */ 284 if (SIGISMEMBER(psp->ps_siginfo, sig)) { 285 /* Signal handler installed with SA_SIGINFO. */ 286 regs->tf_rsi = (register_t)&sfp->sf_si; /* arg 2 in %rsi */ 287 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 288 289 /* Fill in POSIX parts */ 290 sf.sf_si.si_signo = sig; 291 sf.sf_si.si_code = code; 292 regs->tf_rcx = regs->tf_addr; /* arg 4 in %rcx */ 293 } else { 294 /* Old FreeBSD-style arguments. */ 295 regs->tf_rsi = code; /* arg 2 in %rsi */ 296 regs->tf_rcx = regs->tf_addr; /* arg 4 in %rcx */ 297 sf.sf_ahu.sf_handler = catcher; 298 } 299 mtx_unlock(&psp->ps_mtx); 300 PROC_UNLOCK(p); 301 302 /* 303 * Copy the sigframe out to the user's stack. 304 */ 305 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { 306#ifdef DEBUG 307 printf("process %ld has trashed its stack\n", (long)p->p_pid); 308#endif 309 PROC_LOCK(p); 310 sigexit(td, SIGILL); 311 } 312 313 regs->tf_rsp = (long)sfp; 314 regs->tf_rip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 315 regs->tf_rflags &= ~PSL_T; 316 regs->tf_cs = _ucodesel; 317 PROC_LOCK(p); 318 mtx_lock(&psp->ps_mtx); 319} 320 321/* 322 * Build siginfo_t for SA thread 323 */ 324void 325cpu_thread_siginfo(int sig, u_long code, siginfo_t *si) 326{ 327 struct proc *p; 328 struct thread *td; 329 struct trapframe *regs; 330 331 td = curthread; 332 p = td->td_proc; 333 regs = td->td_frame; 334 PROC_LOCK_ASSERT(p, MA_OWNED); 335 336 bzero(si, sizeof(*si)); 337 si->si_signo = sig; 338 si->si_code = code; 339 si->si_addr = (void *)regs->tf_addr; 340 /* XXXKSE fill other fields */ 341} 342 343/* 344 * System call to cleanup state after a signal 345 * has been taken. Reset signal mask and 346 * stack state from context left by sendsig (above). 347 * Return to previous pc and psl as specified by 348 * context left by sendsig. Check carefully to 349 * make sure that the user has not modified the 350 * state to gain improper privileges. 351 * 352 * MPSAFE 353 */ 354int 355sigreturn(td, uap) 356 struct thread *td; 357 struct sigreturn_args /* { 358 const __ucontext *sigcntxp; 359 } */ *uap; 360{ 361 ucontext_t uc; 362 struct proc *p = td->td_proc; 363 struct trapframe *regs; 364 const ucontext_t *ucp; 365 long rflags; 366 int cs, error, ret; 367 368 error = copyin(uap->sigcntxp, &uc, sizeof(uc)); 369 if (error != 0) 370 return (error); 371 ucp = &uc; 372 regs = td->td_frame; 373 rflags = ucp->uc_mcontext.mc_rflags; 374 /* 375 * Don't allow users to change privileged or reserved flags. 376 */ 377 /* 378 * XXX do allow users to change the privileged flag PSL_RF. 379 * The cpu sets PSL_RF in tf_rflags for faults. Debuggers 380 * should sometimes set it there too. tf_rflags is kept in 381 * the signal context during signal handling and there is no 382 * other place to remember it, so the PSL_RF bit may be 383 * corrupted by the signal handler without us knowing. 384 * Corruption of the PSL_RF bit at worst causes one more or 385 * one less debugger trap, so allowing it is fairly harmless. 386 */ 387 if (!EFL_SECURE(rflags & ~PSL_RF, regs->tf_rflags & ~PSL_RF)) { 388 printf("sigreturn: rflags = 0x%lx\n", rflags); 389 return (EINVAL); 390 } 391 392 /* 393 * Don't allow users to load a valid privileged %cs. Let the 394 * hardware check for invalid selectors, excess privilege in 395 * other selectors, invalid %eip's and invalid %esp's. 396 */ 397 cs = ucp->uc_mcontext.mc_cs; 398 if (!CS_SECURE(cs)) { 399 printf("sigreturn: cs = 0x%x\n", cs); 400 trapsignal(td, SIGBUS, T_PROTFLT); 401 return (EINVAL); 402 } 403 404 ret = set_fpcontext(td, &ucp->uc_mcontext); 405 if (ret != 0) 406 return (ret); 407 bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(*regs)); 408 409 PROC_LOCK(p); 410#if defined(COMPAT_43) 411 if (ucp->uc_mcontext.mc_onstack & 1) 412 td->td_sigstk.ss_flags |= SS_ONSTACK; 413 else 414 td->td_sigstk.ss_flags &= ~SS_ONSTACK; 415#endif 416 417 td->td_sigmask = ucp->uc_sigmask; 418 SIG_CANTMASK(td->td_sigmask); 419 signotify(td); 420 PROC_UNLOCK(p); 421 td->td_pcb->pcb_flags |= PCB_FULLCTX; 422 return (EJUSTRETURN); 423} 424 425#ifdef COMPAT_FREEBSD4 426int 427freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap) 428{ 429 430 return sigreturn(td, (struct sigreturn_args *)uap); 431} 432#endif 433 434 435/* 436 * Machine dependent boot() routine 437 * 438 * I haven't seen anything to put here yet 439 * Possibly some stuff might be grafted back here from boot() 440 */ 441void 442cpu_boot(int howto) 443{ 444} 445 446/* 447 * Shutdown the CPU as much as possible 448 */ 449void 450cpu_halt(void) 451{ 452 for (;;) 453 __asm__ ("hlt"); 454} 455 456/* 457 * Hook to idle the CPU when possible. In the SMP case we default to 458 * off because a halted cpu will not currently pick up a new thread in the 459 * run queue until the next timer tick. If turned on this will result in 460 * approximately a 4.2% loss in real time performance in buildworld tests 461 * (but improves user and sys times oddly enough), and saves approximately 462 * 5% in power consumption on an idle machine (tests w/2xCPU 1.1GHz P3). 463 * 464 * XXX we need to have a cpu mask of idle cpus and generate an IPI or 465 * otherwise generate some sort of interrupt to wake up cpus sitting in HLT. 466 * Then we can have our cake and eat it too. 467 * 468 * XXX I'm turning it on for SMP as well by default for now. It seems to 469 * help lock contention somewhat, and this is critical for HTT. -Peter 470 */ 471static int cpu_idle_hlt = 1; 472SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, 473 &cpu_idle_hlt, 0, "Idle loop HLT enable"); 474 475static void 476cpu_idle_default(void) 477{ 478 /* 479 * we must absolutely guarentee that hlt is the 480 * absolute next instruction after sti or we 481 * introduce a timing window. 482 */ 483 __asm __volatile("sti; hlt"); 484} 485 486/* 487 * Note that we have to be careful here to avoid a race between checking 488 * sched_runnable() and actually halting. If we don't do this, we may waste 489 * the time between calling hlt and the next interrupt even though there 490 * is a runnable process. 491 */ 492void 493cpu_idle(void) 494{ 495 496 if (cpu_idle_hlt) { 497 disable_intr(); 498 if (sched_runnable()) 499 enable_intr(); 500 else 501 (*cpu_idle_hook)(); 502 } 503} 504 505/* Other subsystems (e.g., ACPI) can hook this later. */ 506void (*cpu_idle_hook)(void) = cpu_idle_default; 507 508/* 509 * Clear registers on exec 510 */ 511void 512exec_setregs(td, entry, stack, ps_strings) 513 struct thread *td; 514 u_long entry; 515 u_long stack; 516 u_long ps_strings; 517{ 518 struct trapframe *regs = td->td_frame; 519 struct pcb *pcb = td->td_pcb; 520 521 wrmsr(MSR_FSBASE, 0); 522 wrmsr(MSR_KGSBASE, 0); /* User value while we're in the kernel */ 523 pcb->pcb_fsbase = 0; 524 pcb->pcb_gsbase = 0; 525 load_ds(_udatasel); 526 load_es(_udatasel); 527 load_fs(_udatasel); 528 load_gs(_udatasel); 529 pcb->pcb_ds = _udatasel; 530 pcb->pcb_es = _udatasel; 531 pcb->pcb_fs = _udatasel; 532 pcb->pcb_gs = _udatasel; 533 534 bzero((char *)regs, sizeof(struct trapframe)); 535 regs->tf_rip = entry; 536 regs->tf_rsp = ((stack - 8) & ~0xFul) + 8; 537 regs->tf_rdi = stack; /* argv */ 538 regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T); 539 regs->tf_ss = _udatasel; 540 regs->tf_cs = _ucodesel; 541 542 /* 543 * Reset the hardware debug registers if they were in use. 544 * They won't have any meaning for the newly exec'd process. 545 */ 546 if (pcb->pcb_flags & PCB_DBREGS) { 547 pcb->pcb_dr0 = 0; 548 pcb->pcb_dr1 = 0; 549 pcb->pcb_dr2 = 0; 550 pcb->pcb_dr3 = 0; 551 pcb->pcb_dr6 = 0; 552 pcb->pcb_dr7 = 0; 553 if (pcb == PCPU_GET(curpcb)) { 554 /* 555 * Clear the debug registers on the running 556 * CPU, otherwise they will end up affecting 557 * the next process we switch to. 558 */ 559 reset_dbregs(); 560 } 561 pcb->pcb_flags &= ~PCB_DBREGS; 562 } 563 564 /* 565 * Drop the FP state if we hold it, so that the process gets a 566 * clean FP state if it uses the FPU again. 567 */ 568 fpstate_drop(td); 569} 570 571void 572cpu_setregs(void) 573{ 574 register_t cr0; 575 576 cr0 = rcr0(); 577 /* 578 * CR0_MP, CR0_NE and CR0_TS are also set by npx_probe() for the 579 * BSP. See the comments there about why we set them. 580 */ 581 cr0 |= CR0_MP | CR0_NE | CR0_TS; 582 cr0 |= CR0_WP | CR0_AM; 583 load_cr0(cr0); 584} 585 586static int 587sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS) 588{ 589 int error; 590 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 591 req); 592 if (!error && req->newptr) 593 resettodr(); 594 return (error); 595} 596 597SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 598 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 599 600SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, 601 CTLFLAG_RW, &disable_rtc_set, 0, ""); 602 603SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, 604 CTLFLAG_RW, &wall_cmos_clock, 0, ""); 605 606/* 607 * Initialize 386 and configure to run kernel 608 */ 609 610/* 611 * Initialize segments & interrupt table 612 */ 613 614struct user_segment_descriptor gdt[NGDT * MAXCPU];/* global descriptor table */ 615static struct gate_descriptor idt0[NIDT]; 616struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 617 618static char dblfault_stack[PAGE_SIZE] __aligned(16); 619 620struct amd64tss common_tss[MAXCPU]; 621 622/* software prototypes -- in more palatable form */ 623struct soft_segment_descriptor gdt_segs[] = { 624/* GNULL_SEL 0 Null Descriptor */ 625{ 0x0, /* segment base address */ 626 0x0, /* length */ 627 0, /* segment type */ 628 0, /* segment descriptor priority level */ 629 0, /* segment descriptor present */ 630 0, /* long */ 631 0, /* default 32 vs 16 bit size */ 632 0 /* limit granularity (byte/page units)*/ }, 633/* GCODE_SEL 1 Code Descriptor for kernel */ 634{ 0x0, /* segment base address */ 635 0xfffff, /* length - all address space */ 636 SDT_MEMERA, /* segment type */ 637 SEL_KPL, /* segment descriptor priority level */ 638 1, /* segment descriptor present */ 639 1, /* long */ 640 0, /* default 32 vs 16 bit size */ 641 1 /* limit granularity (byte/page units)*/ }, 642/* GDATA_SEL 2 Data Descriptor for kernel */ 643{ 0x0, /* segment base address */ 644 0xfffff, /* length - all address space */ 645 SDT_MEMRWA, /* segment type */ 646 SEL_KPL, /* segment descriptor priority level */ 647 1, /* segment descriptor present */ 648 1, /* long */ 649 0, /* default 32 vs 16 bit size */ 650 1 /* limit granularity (byte/page units)*/ }, 651/* GUCODE32_SEL 3 32 bit Code Descriptor for user */ 652{ 0x0, /* segment base address */ 653 0xfffff, /* length - all address space */ 654 SDT_MEMERA, /* segment type */ 655 SEL_UPL, /* segment descriptor priority level */ 656 1, /* segment descriptor present */ 657 0, /* long */ 658 1, /* default 32 vs 16 bit size */ 659 1 /* limit granularity (byte/page units)*/ }, 660/* GUDATA_SEL 4 32/64 bit Data Descriptor for user */ 661{ 0x0, /* segment base address */ 662 0xfffff, /* length - all address space */ 663 SDT_MEMRWA, /* segment type */ 664 SEL_UPL, /* segment descriptor priority level */ 665 1, /* segment descriptor present */ 666 0, /* long */ 667 1, /* default 32 vs 16 bit size */ 668 1 /* limit granularity (byte/page units)*/ }, 669/* GUCODE_SEL 5 64 bit Code Descriptor for user */ 670{ 0x0, /* segment base address */ 671 0xfffff, /* length - all address space */ 672 SDT_MEMERA, /* segment type */ 673 SEL_UPL, /* segment descriptor priority level */ 674 1, /* segment descriptor present */ 675 1, /* long */ 676 0, /* default 32 vs 16 bit size */ 677 1 /* limit granularity (byte/page units)*/ }, 678/* GPROC0_SEL 6 Proc 0 Tss Descriptor */ 679{ 680 0x0, /* segment base address */ 681 sizeof(struct amd64tss)-1,/* length - all address space */ 682 SDT_SYSTSS, /* segment type */ 683 SEL_KPL, /* segment descriptor priority level */ 684 1, /* segment descriptor present */ 685 0, /* long */ 686 0, /* unused - default 32 vs 16 bit size */ 687 0 /* limit granularity (byte/page units)*/ }, 688/* Actually, the TSS is a system descriptor which is double size */ 689{ 0x0, /* segment base address */ 690 0x0, /* length */ 691 0, /* segment type */ 692 0, /* segment descriptor priority level */ 693 0, /* segment descriptor present */ 694 0, /* long */ 695 0, /* default 32 vs 16 bit size */ 696 0 /* limit granularity (byte/page units)*/ }, 697}; 698 699void 700setidt(idx, func, typ, dpl, ist) 701 int idx; 702 inthand_t *func; 703 int typ; 704 int dpl; 705 int ist; 706{ 707 struct gate_descriptor *ip; 708 709 ip = idt + idx; 710 ip->gd_looffset = (uintptr_t)func; 711 ip->gd_selector = GSEL(GCODE_SEL, SEL_KPL); 712 ip->gd_ist = ist; 713 ip->gd_xx = 0; 714 ip->gd_type = typ; 715 ip->gd_dpl = dpl; 716 ip->gd_p = 1; 717 ip->gd_hioffset = ((uintptr_t)func)>>16 ; 718} 719 720#define IDTVEC(name) __CONCAT(X,name) 721 722extern inthand_t 723 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 724 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 725 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 726 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 727 IDTVEC(xmm), IDTVEC(dblfault), 728 IDTVEC(fast_syscall), IDTVEC(fast_syscall32); 729 730void 731sdtossd(sd, ssd) 732 struct user_segment_descriptor *sd; 733 struct soft_segment_descriptor *ssd; 734{ 735 736 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 737 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 738 ssd->ssd_type = sd->sd_type; 739 ssd->ssd_dpl = sd->sd_dpl; 740 ssd->ssd_p = sd->sd_p; 741 ssd->ssd_long = sd->sd_long; 742 ssd->ssd_def32 = sd->sd_def32; 743 ssd->ssd_gran = sd->sd_gran; 744} 745 746void 747ssdtosd(ssd, sd) 748 struct soft_segment_descriptor *ssd; 749 struct user_segment_descriptor *sd; 750{ 751 752 sd->sd_lobase = (ssd->ssd_base) & 0xffffff; 753 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xff; 754 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff; 755 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf; 756 sd->sd_type = ssd->ssd_type; 757 sd->sd_dpl = ssd->ssd_dpl; 758 sd->sd_p = ssd->ssd_p; 759 sd->sd_long = ssd->ssd_long; 760 sd->sd_def32 = ssd->ssd_def32; 761 sd->sd_gran = ssd->ssd_gran; 762} 763 764void 765ssdtosyssd(ssd, sd) 766 struct soft_segment_descriptor *ssd; 767 struct system_segment_descriptor *sd; 768{ 769 770 sd->sd_lobase = (ssd->ssd_base) & 0xffffff; 771 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xfffffffffful; 772 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff; 773 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf; 774 sd->sd_type = ssd->ssd_type; 775 sd->sd_dpl = ssd->ssd_dpl; 776 sd->sd_p = ssd->ssd_p; 777 sd->sd_gran = ssd->ssd_gran; 778} 779 780#if !defined(DEV_ATPIC) && defined(DEV_ISA) 781#include <isa/isavar.h> 782u_int 783isa_irq_pending(void) 784{ 785 786 return (0); 787} 788#endif 789 790#define PHYSMAP_SIZE (2 * 8) 791 792struct bios_smap { 793 u_int64_t base; 794 u_int64_t length; 795 u_int32_t type; 796} __packed; 797 798u_int basemem; 799 800/* 801 * Populate the (physmap) array with base/bound pairs describing the 802 * available physical memory in the system, then test this memory and 803 * build the phys_avail array describing the actually-available memory. 804 * 805 * If we cannot accurately determine the physical memory map, then use 806 * value from the 0xE801 call, and failing that, the RTC. 807 * 808 * Total memory size may be set by the kernel environment variable 809 * hw.physmem or the compile-time define MAXMEM. 810 * 811 * XXX first should be vm_paddr_t. 812 */ 813static void 814getmemsize(caddr_t kmdp, u_int64_t first) 815{ 816 int i, physmap_idx, pa_indx; 817 vm_paddr_t pa, physmap[PHYSMAP_SIZE]; 818 pt_entry_t *pte; 819 char *cp; 820 struct bios_smap *smapbase, *smap, *smapend; 821 u_int32_t smapsize; 822 823 bzero(physmap, sizeof(physmap)); 824 basemem = 0; 825 physmap_idx = 0; 826 827 /* 828 * get memory map from INT 15:E820, kindly supplied by the loader. 829 * 830 * subr_module.c says: 831 * "Consumer may safely assume that size value precedes data." 832 * ie: an int32_t immediately precedes smap. 833 */ 834 smapbase = (struct bios_smap *)preload_search_info(kmdp, 835 MODINFO_METADATA | MODINFOMD_SMAP); 836 if (smapbase == NULL) 837 panic("No BIOS smap info from loader!"); 838 839 smapsize = *((u_int32_t *)smapbase - 1); 840 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize); 841 842 for (smap = smapbase; smap < smapend; smap++) { 843 if (boothowto & RB_VERBOSE) 844 printf("SMAP type=%02x base=%016lx len=%016lx\n", 845 smap->type, smap->base, smap->length); 846 847 if (smap->type != 0x01) 848 continue; 849 850 if (smap->length == 0) 851 continue; 852 853 for (i = 0; i <= physmap_idx; i += 2) { 854 if (smap->base < physmap[i + 1]) { 855 if (boothowto & RB_VERBOSE) 856 printf( 857 "Overlapping or non-montonic memory region, ignoring second region\n"); 858 goto next_run; 859 } 860 } 861 862 if (smap->base == physmap[physmap_idx + 1]) { 863 physmap[physmap_idx + 1] += smap->length; 864next_run: 865 continue; 866 } 867 868 physmap_idx += 2; 869 if (physmap_idx == PHYSMAP_SIZE) { 870 printf( 871 "Too many segments in the physical address map, giving up\n"); 872 break; 873 } 874 physmap[physmap_idx] = smap->base; 875 physmap[physmap_idx + 1] = smap->base + smap->length; 876 } 877 878 /* 879 * Find the 'base memory' segment for SMP 880 */ 881 basemem = 0; 882 for (i = 0; i <= physmap_idx; i += 2) { 883 if (physmap[i] == 0x00000000) { 884 basemem = physmap[i + 1] / 1024; 885 break; 886 } 887 } 888 if (basemem == 0) 889 panic("BIOS smap did not include a basemem segment!"); 890 891#ifdef SMP 892 /* make hole for AP bootstrap code */ 893 physmap[1] = mp_bootaddress(physmap[1] / 1024); 894#endif 895 896 /* 897 * Maxmem isn't the "maximum memory", it's one larger than the 898 * highest page of the physical address space. It should be 899 * called something like "Maxphyspage". We may adjust this 900 * based on ``hw.physmem'' and the results of the memory test. 901 */ 902 Maxmem = atop(physmap[physmap_idx + 1]); 903 904#ifdef MAXMEM 905 Maxmem = MAXMEM / 4; 906#endif 907 908 /* 909 * hw.physmem is a size in bytes; we also allow k, m, and g suffixes 910 * for the appropriate modifiers. This overrides MAXMEM. 911 */ 912 cp = getenv("hw.physmem"); 913 if (cp != NULL) { 914 u_int64_t AllowMem, sanity; 915 char *ep; 916 917 sanity = AllowMem = strtouq(cp, &ep, 0); 918 if ((ep != cp) && (*ep != 0)) { 919 switch(*ep) { 920 case 'g': 921 case 'G': 922 AllowMem <<= 10; 923 case 'm': 924 case 'M': 925 AllowMem <<= 10; 926 case 'k': 927 case 'K': 928 AllowMem <<= 10; 929 break; 930 default: 931 AllowMem = sanity = 0; 932 } 933 if (AllowMem < sanity) 934 AllowMem = 0; 935 } 936 if (AllowMem == 0) 937 printf("Ignoring invalid memory size of '%s'\n", cp); 938 else 939 Maxmem = atop(AllowMem); 940 freeenv(cp); 941 } 942 943 if (atop(physmap[physmap_idx + 1]) != Maxmem && 944 (boothowto & RB_VERBOSE)) 945 printf("Physical memory use set to %ldK\n", Maxmem * 4); 946 947 /* 948 * If Maxmem has been increased beyond what the system has detected, 949 * extend the last memory segment to the new limit. 950 */ 951 if (atop(physmap[physmap_idx + 1]) < Maxmem) 952 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem); 953 954 /* call pmap initialization to make new kernel address space */ 955 pmap_bootstrap(&first); 956 957 /* 958 * Size up each available chunk of physical memory. 959 */ 960 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 961 pa_indx = 0; 962 phys_avail[pa_indx++] = physmap[0]; 963 phys_avail[pa_indx] = physmap[0]; 964 pte = CMAP1; 965 966 /* 967 * physmap is in bytes, so when converting to page boundaries, 968 * round up the start address and round down the end address. 969 */ 970 for (i = 0; i <= physmap_idx; i += 2) { 971 vm_paddr_t end; 972 973 end = ptoa((vm_paddr_t)Maxmem); 974 if (physmap[i + 1] < end) 975 end = trunc_page(physmap[i + 1]); 976 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 977 int tmp, page_bad; 978 int *ptr = (int *)CADDR1; 979 980 /* 981 * block out kernel memory as not available. 982 */ 983 if (pa >= 0x100000 && pa < first) 984 continue; 985 986 page_bad = FALSE; 987 988 /* 989 * map page into kernel: valid, read/write,non-cacheable 990 */ 991 *pte = pa | PG_V | PG_RW | PG_N; 992 invltlb(); 993 994 tmp = *(int *)ptr; 995 /* 996 * Test for alternating 1's and 0's 997 */ 998 *(volatile int *)ptr = 0xaaaaaaaa; 999 if (*(volatile int *)ptr != 0xaaaaaaaa) 1000 page_bad = TRUE; 1001 /* 1002 * Test for alternating 0's and 1's 1003 */ 1004 *(volatile int *)ptr = 0x55555555; 1005 if (*(volatile int *)ptr != 0x55555555) 1006 page_bad = TRUE; 1007 /* 1008 * Test for all 1's 1009 */ 1010 *(volatile int *)ptr = 0xffffffff; 1011 if (*(volatile int *)ptr != 0xffffffff) 1012 page_bad = TRUE; 1013 /* 1014 * Test for all 0's 1015 */ 1016 *(volatile int *)ptr = 0x0; 1017 if (*(volatile int *)ptr != 0x0) 1018 page_bad = TRUE; 1019 /* 1020 * Restore original value. 1021 */ 1022 *(int *)ptr = tmp; 1023 1024 /* 1025 * Adjust array of valid/good pages. 1026 */ 1027 if (page_bad == TRUE) 1028 continue; 1029 /* 1030 * If this good page is a continuation of the 1031 * previous set of good pages, then just increase 1032 * the end pointer. Otherwise start a new chunk. 1033 * Note that "end" points one higher than end, 1034 * making the range >= start and < end. 1035 * If we're also doing a speculative memory 1036 * test and we at or past the end, bump up Maxmem 1037 * so that we keep going. The first bad page 1038 * will terminate the loop. 1039 */ 1040 if (phys_avail[pa_indx] == pa) { 1041 phys_avail[pa_indx] += PAGE_SIZE; 1042 } else { 1043 pa_indx++; 1044 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1045 printf( 1046 "Too many holes in the physical address space, giving up\n"); 1047 pa_indx--; 1048 break; 1049 } 1050 phys_avail[pa_indx++] = pa; /* start */ 1051 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1052 } 1053 physmem++; 1054 } 1055 } 1056 *pte = 0; 1057 invltlb(); 1058 1059 /* 1060 * XXX 1061 * The last chunk must contain at least one page plus the message 1062 * buffer to avoid complicating other code (message buffer address 1063 * calculation, etc.). 1064 */ 1065 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1066 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { 1067 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1068 phys_avail[pa_indx--] = 0; 1069 phys_avail[pa_indx--] = 0; 1070 } 1071 1072 Maxmem = atop(phys_avail[pa_indx]); 1073 1074 /* Trim off space for the message buffer. */ 1075 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); 1076 1077 avail_end = phys_avail[pa_indx]; 1078} 1079 1080u_int64_t 1081hammer_time(u_int64_t modulep, u_int64_t physfree) 1082{ 1083 caddr_t kmdp; 1084 int gsel_tss, off, x; 1085 struct pcpu *pc; 1086 u_int64_t msr; 1087 char *env; 1088 1089#ifdef DEV_ISA 1090 /* Preemptively mask the atpics and leave them shut down */ 1091 outb(IO_ICU1 + ICU_IMR_OFFSET, 0xff); 1092 outb(IO_ICU2 + ICU_IMR_OFFSET, 0xff); 1093#else 1094#error "have you forgotten the isa device?"; 1095#endif 1096 1097 proc0.p_uarea = (struct user *)(physfree + KERNBASE); 1098 bzero(proc0.p_uarea, UAREA_PAGES * PAGE_SIZE); 1099 physfree += UAREA_PAGES * PAGE_SIZE; 1100 thread0.td_kstack = physfree + KERNBASE; 1101 bzero((void *)thread0.td_kstack, KSTACK_PAGES * PAGE_SIZE); 1102 physfree += KSTACK_PAGES * PAGE_SIZE; 1103 thread0.td_pcb = (struct pcb *) 1104 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; 1105 1106 /* 1107 * This may be done better later if it gets more high level 1108 * components in it. If so just link td->td_proc here. 1109 */ 1110 proc_linkup(&proc0, &ksegrp0, &kse0, &thread0); 1111 1112 preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE); 1113 preload_bootstrap_relocate(KERNBASE); 1114 kmdp = preload_search_by_type("elf kernel"); 1115 if (kmdp == NULL) 1116 kmdp = preload_search_by_type("elf64 kernel"); 1117 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); 1118 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *) + KERNBASE; 1119#ifdef DDB 1120 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); 1121 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); 1122#endif 1123 1124 /* Init basic tunables, hz etc */ 1125 init_param1(); 1126 1127 /* 1128 * make gdt memory segments 1129 */ 1130 gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&common_tss[0]; 1131 1132 for (x = 0; x < NGDT; x++) { 1133 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1)) 1134 ssdtosd(&gdt_segs[x], &gdt[x]); 1135 } 1136 ssdtosyssd(&gdt_segs[GPROC0_SEL], 1137 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]); 1138 1139 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1140 r_gdt.rd_base = (long) gdt; 1141 lgdt(&r_gdt); 1142 pc = &__pcpu[0]; 1143 1144 wrmsr(MSR_FSBASE, 0); /* User value */ 1145 wrmsr(MSR_GSBASE, (u_int64_t)pc); 1146 wrmsr(MSR_KGSBASE, 0); /* User value while in the kernel */ 1147 1148 pcpu_init(pc, 0, sizeof(struct pcpu)); 1149 PCPU_SET(prvspace, pc); 1150 PCPU_SET(curthread, &thread0); 1151 PCPU_SET(curpcb, thread0.td_pcb); 1152 PCPU_SET(tssp, &common_tss[0]); 1153 1154 /* 1155 * Initialize mutexes. 1156 * 1157 * icu_lock: in order to allow an interrupt to occur in a critical 1158 * section, to set pcpu->ipending (etc...) properly, we 1159 * must be able to get the icu lock, so it can't be 1160 * under witness. 1161 */ 1162 mutex_init(); 1163 mtx_init(&clock_lock, "clk", NULL, MTX_SPIN); 1164 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS); 1165 1166 /* exceptions */ 1167 for (x = 0; x < NIDT; x++) 1168 setidt(x, &IDTVEC(rsvd), SDT_SYSIGT, SEL_KPL, 0); 1169 setidt(IDT_DE, &IDTVEC(div), SDT_SYSIGT, SEL_KPL, 0); 1170 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYSIGT, SEL_KPL, 0); 1171 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYSIGT, SEL_KPL, 0); 1172 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYSIGT, SEL_UPL, 0); 1173 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYSIGT, SEL_KPL, 0); 1174 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYSIGT, SEL_KPL, 0); 1175 setidt(IDT_UD, &IDTVEC(ill), SDT_SYSIGT, SEL_KPL, 0); 1176 setidt(IDT_NM, &IDTVEC(dna), SDT_SYSIGT, SEL_KPL, 0); 1177 setidt(IDT_DF, &IDTVEC(dblfault), SDT_SYSIGT, SEL_KPL, 1); 1178 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYSIGT, SEL_KPL, 0); 1179 setidt(IDT_TS, &IDTVEC(tss), SDT_SYSIGT, SEL_KPL, 0); 1180 setidt(IDT_NP, &IDTVEC(missing), SDT_SYSIGT, SEL_KPL, 0); 1181 setidt(IDT_SS, &IDTVEC(stk), SDT_SYSIGT, SEL_KPL, 0); 1182 setidt(IDT_GP, &IDTVEC(prot), SDT_SYSIGT, SEL_KPL, 0); 1183 setidt(IDT_PF, &IDTVEC(page), SDT_SYSIGT, SEL_KPL, 0); 1184 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYSIGT, SEL_KPL, 0); 1185 setidt(IDT_AC, &IDTVEC(align), SDT_SYSIGT, SEL_KPL, 0); 1186 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYSIGT, SEL_KPL, 0); 1187 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYSIGT, SEL_KPL, 0); 1188 1189 r_idt.rd_limit = sizeof(idt0) - 1; 1190 r_idt.rd_base = (long) idt; 1191 lidt(&r_idt); 1192 1193 /* 1194 * Initialize the console before we print anything out. 1195 */ 1196 cninit(); 1197 1198#ifdef DEV_ATPIC 1199 atpic_startup(); 1200#endif 1201 1202 kdb_init(); 1203 1204#ifdef KDB 1205 if (boothowto & RB_KDB) 1206 kdb_enter("Boot flags requested debugger"); 1207#endif 1208 1209 identify_cpu(); /* Final stage of CPU initialization */ 1210 initializecpu(); /* Initialize CPU registers */ 1211 1212 /* make an initial tss so cpu can get interrupt stack on syscall! */ 1213 common_tss[0].tss_rsp0 = thread0.td_kstack + \ 1214 KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb); 1215 /* Ensure the stack is aligned to 16 bytes */ 1216 common_tss[0].tss_rsp0 &= ~0xFul; 1217 PCPU_SET(rsp0, common_tss[0].tss_rsp0); 1218 1219 /* doublefault stack space, runs on ist1 */ 1220 common_tss[0].tss_ist1 = (long)&dblfault_stack[sizeof(dblfault_stack)]; 1221 1222 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1223 ltr(gsel_tss); 1224 1225 /* Set up the fast syscall stuff */ 1226 msr = rdmsr(MSR_EFER) | EFER_SCE; 1227 wrmsr(MSR_EFER, msr); 1228 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall)); 1229 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32)); 1230 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) | 1231 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48); 1232 wrmsr(MSR_STAR, msr); 1233 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D); 1234 1235 getmemsize(kmdp, physfree); 1236 init_param2(physmem); 1237 1238 /* now running on new page tables, configured,and u/iom is accessible */ 1239 1240 /* Map the message buffer. */ 1241 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) 1242 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off); 1243 1244 msgbufinit(msgbufp, MSGBUF_SIZE); 1245 fpuinit(); 1246 1247 /* transfer to user mode */ 1248 1249 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL); 1250 _udatasel = GSEL(GUDATA_SEL, SEL_UPL); 1251 _ucode32sel = GSEL(GUCODE32_SEL, SEL_UPL); 1252 1253 /* setup proc 0's pcb */ 1254 thread0.td_pcb->pcb_flags = 0; /* XXXKSE */ 1255 thread0.td_pcb->pcb_cr3 = KPML4phys; 1256 thread0.td_frame = &proc0_tf; 1257 1258 env = getenv("kernelname"); 1259 if (env != NULL) 1260 strlcpy(kernelname, env, sizeof(kernelname)); 1261 1262 /* Location of kernel stack for locore */ 1263 return ((u_int64_t)thread0.td_pcb); 1264} 1265 1266void 1267cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) 1268{ 1269 1270 pcpu->pc_acpi_id = 0xffffffff; 1271} 1272 1273/* 1274 * Construct a PCB from a trapframe. This is called from kdb_trap() where 1275 * we want to start a backtrace from the function that caused us to enter 1276 * the debugger. We have the context in the trapframe, but base the trace 1277 * on the PCB. The PCB doesn't have to be perfect, as long as it contains 1278 * enough for a backtrace. 1279 */ 1280void 1281makectx(struct trapframe *tf, struct pcb *pcb) 1282{ 1283 1284 pcb->pcb_r12 = tf->tf_r12; 1285 pcb->pcb_r13 = tf->tf_r13; 1286 pcb->pcb_r14 = tf->tf_r14; 1287 pcb->pcb_r15 = tf->tf_r15; 1288 pcb->pcb_rbp = tf->tf_rbp; 1289 pcb->pcb_rbx = tf->tf_rbx; 1290 pcb->pcb_rip = tf->tf_rip; 1291 pcb->pcb_rsp = (ISPL(tf->tf_cs)) ? tf->tf_rsp : (long)(tf + 1) - 8; 1292} 1293 1294int 1295ptrace_set_pc(struct thread *td, unsigned long addr) 1296{ 1297 td->td_frame->tf_rip = addr; 1298 return (0); 1299} 1300 1301int 1302ptrace_single_step(struct thread *td) 1303{ 1304 td->td_frame->tf_rflags |= PSL_T; 1305 return (0); 1306} 1307 1308int 1309ptrace_clear_single_step(struct thread *td) 1310{ 1311 td->td_frame->tf_rflags &= ~PSL_T; 1312 return (0); 1313} 1314 1315int 1316fill_regs(struct thread *td, struct reg *regs) 1317{ 1318 struct pcb *pcb; 1319 struct trapframe *tp; 1320 1321 tp = td->td_frame; 1322 regs->r_r15 = tp->tf_r15; 1323 regs->r_r14 = tp->tf_r14; 1324 regs->r_r13 = tp->tf_r13; 1325 regs->r_r12 = tp->tf_r12; 1326 regs->r_r11 = tp->tf_r11; 1327 regs->r_r10 = tp->tf_r10; 1328 regs->r_r9 = tp->tf_r9; 1329 regs->r_r8 = tp->tf_r8; 1330 regs->r_rdi = tp->tf_rdi; 1331 regs->r_rsi = tp->tf_rsi; 1332 regs->r_rbp = tp->tf_rbp; 1333 regs->r_rbx = tp->tf_rbx; 1334 regs->r_rdx = tp->tf_rdx; 1335 regs->r_rcx = tp->tf_rcx; 1336 regs->r_rax = tp->tf_rax; 1337 regs->r_rip = tp->tf_rip; 1338 regs->r_cs = tp->tf_cs; 1339 regs->r_rflags = tp->tf_rflags; 1340 regs->r_rsp = tp->tf_rsp; 1341 regs->r_ss = tp->tf_ss; 1342 pcb = td->td_pcb; 1343 return (0); 1344} 1345 1346int 1347set_regs(struct thread *td, struct reg *regs) 1348{ 1349 struct pcb *pcb; 1350 struct trapframe *tp; 1351 register_t rflags; 1352 1353 tp = td->td_frame; 1354 rflags = regs->r_rflags & 0xffffffff; 1355 if (!EFL_SECURE(rflags, tp->tf_rflags) || !CS_SECURE(regs->r_cs)) 1356 return (EINVAL); 1357 tp->tf_r15 = regs->r_r15; 1358 tp->tf_r14 = regs->r_r14; 1359 tp->tf_r13 = regs->r_r13; 1360 tp->tf_r12 = regs->r_r12; 1361 tp->tf_r11 = regs->r_r11; 1362 tp->tf_r10 = regs->r_r10; 1363 tp->tf_r9 = regs->r_r9; 1364 tp->tf_r8 = regs->r_r8; 1365 tp->tf_rdi = regs->r_rdi; 1366 tp->tf_rsi = regs->r_rsi; 1367 tp->tf_rbp = regs->r_rbp; 1368 tp->tf_rbx = regs->r_rbx; 1369 tp->tf_rdx = regs->r_rdx; 1370 tp->tf_rcx = regs->r_rcx; 1371 tp->tf_rax = regs->r_rax; 1372 tp->tf_rip = regs->r_rip; 1373 tp->tf_cs = regs->r_cs; 1374 tp->tf_rflags = rflags; 1375 tp->tf_rsp = regs->r_rsp; 1376 tp->tf_ss = regs->r_ss; 1377 pcb = td->td_pcb; 1378 return (0); 1379} 1380 1381/* XXX check all this stuff! */ 1382/* externalize from sv_xmm */ 1383static void 1384fill_fpregs_xmm(struct savefpu *sv_xmm, struct fpreg *fpregs) 1385{ 1386 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env; 1387 struct envxmm *penv_xmm = &sv_xmm->sv_env; 1388 int i; 1389 1390 /* pcb -> fpregs */ 1391 bzero(fpregs, sizeof(*fpregs)); 1392 1393 /* FPU control/status */ 1394 penv_fpreg->en_cw = penv_xmm->en_cw; 1395 penv_fpreg->en_sw = penv_xmm->en_sw; 1396 penv_fpreg->en_tw = penv_xmm->en_tw; 1397 penv_fpreg->en_opcode = penv_xmm->en_opcode; 1398 penv_fpreg->en_rip = penv_xmm->en_rip; 1399 penv_fpreg->en_rdp = penv_xmm->en_rdp; 1400 penv_fpreg->en_mxcsr = penv_xmm->en_mxcsr; 1401 penv_fpreg->en_mxcsr_mask = penv_xmm->en_mxcsr_mask; 1402 1403 /* FPU registers */ 1404 for (i = 0; i < 8; ++i) 1405 bcopy(sv_xmm->sv_fp[i].fp_acc.fp_bytes, fpregs->fpr_acc[i], 10); 1406 1407 /* SSE registers */ 1408 for (i = 0; i < 16; ++i) 1409 bcopy(sv_xmm->sv_xmm[i].xmm_bytes, fpregs->fpr_xacc[i], 16); 1410} 1411 1412/* internalize from fpregs into sv_xmm */ 1413static void 1414set_fpregs_xmm(struct fpreg *fpregs, struct savefpu *sv_xmm) 1415{ 1416 struct envxmm *penv_xmm = &sv_xmm->sv_env; 1417 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env; 1418 int i; 1419 1420 /* fpregs -> pcb */ 1421 /* FPU control/status */ 1422 penv_xmm->en_cw = penv_fpreg->en_cw; 1423 penv_xmm->en_sw = penv_fpreg->en_sw; 1424 penv_xmm->en_tw = penv_fpreg->en_tw; 1425 penv_xmm->en_opcode = penv_fpreg->en_opcode; 1426 penv_xmm->en_rip = penv_fpreg->en_rip; 1427 penv_xmm->en_rdp = penv_fpreg->en_rdp; 1428 penv_xmm->en_mxcsr = penv_fpreg->en_mxcsr; 1429 penv_xmm->en_mxcsr_mask = penv_fpreg->en_mxcsr_mask; 1430 1431 /* FPU registers */ 1432 for (i = 0; i < 8; ++i) 1433 bcopy(fpregs->fpr_acc[i], sv_xmm->sv_fp[i].fp_acc.fp_bytes, 10); 1434 1435 /* SSE registers */ 1436 for (i = 0; i < 16; ++i) 1437 bcopy(fpregs->fpr_xacc[i], sv_xmm->sv_xmm[i].xmm_bytes, 16); 1438} 1439 1440/* externalize from td->pcb */ 1441int 1442fill_fpregs(struct thread *td, struct fpreg *fpregs) 1443{ 1444 1445 fill_fpregs_xmm(&td->td_pcb->pcb_save, fpregs); 1446 return (0); 1447} 1448 1449/* internalize to td->pcb */ 1450int 1451set_fpregs(struct thread *td, struct fpreg *fpregs) 1452{ 1453 1454 set_fpregs_xmm(fpregs, &td->td_pcb->pcb_save); 1455 return (0); 1456} 1457 1458/* 1459 * Get machine context. 1460 */ 1461int 1462get_mcontext(struct thread *td, mcontext_t *mcp, int flags) 1463{ 1464 struct trapframe *tp; 1465 1466 tp = td->td_frame; 1467 PROC_LOCK(curthread->td_proc); 1468 mcp->mc_onstack = sigonstack(tp->tf_rsp); 1469 PROC_UNLOCK(curthread->td_proc); 1470 mcp->mc_r15 = tp->tf_r15; 1471 mcp->mc_r14 = tp->tf_r14; 1472 mcp->mc_r13 = tp->tf_r13; 1473 mcp->mc_r12 = tp->tf_r12; 1474 mcp->mc_r11 = tp->tf_r11; 1475 mcp->mc_r10 = tp->tf_r10; 1476 mcp->mc_r9 = tp->tf_r9; 1477 mcp->mc_r8 = tp->tf_r8; 1478 mcp->mc_rdi = tp->tf_rdi; 1479 mcp->mc_rsi = tp->tf_rsi; 1480 mcp->mc_rbp = tp->tf_rbp; 1481 mcp->mc_rbx = tp->tf_rbx; 1482 mcp->mc_rcx = tp->tf_rcx; 1483 if (flags & GET_MC_CLEAR_RET) { 1484 mcp->mc_rax = 0; 1485 mcp->mc_rdx = 0; 1486 } else { 1487 mcp->mc_rax = tp->tf_rax; 1488 mcp->mc_rdx = tp->tf_rdx; 1489 } 1490 mcp->mc_rip = tp->tf_rip; 1491 mcp->mc_cs = tp->tf_cs; 1492 mcp->mc_rflags = tp->tf_rflags; 1493 mcp->mc_rsp = tp->tf_rsp; 1494 mcp->mc_ss = tp->tf_ss; 1495 mcp->mc_len = sizeof(*mcp); 1496 get_fpcontext(td, mcp); 1497 return (0); 1498} 1499 1500/* 1501 * Set machine context. 1502 * 1503 * However, we don't set any but the user modifiable flags, and we won't 1504 * touch the cs selector. 1505 */ 1506int 1507set_mcontext(struct thread *td, const mcontext_t *mcp) 1508{ 1509 struct trapframe *tp; 1510 long rflags; 1511 int ret; 1512 1513 tp = td->td_frame; 1514 if (mcp->mc_len != sizeof(*mcp)) 1515 return (EINVAL); 1516 rflags = (mcp->mc_rflags & PSL_USERCHANGE) | 1517 (tp->tf_rflags & ~PSL_USERCHANGE); 1518 ret = set_fpcontext(td, mcp); 1519 if (ret != 0) 1520 return (ret); 1521 tp->tf_r15 = mcp->mc_r15; 1522 tp->tf_r14 = mcp->mc_r14; 1523 tp->tf_r13 = mcp->mc_r13; 1524 tp->tf_r12 = mcp->mc_r12; 1525 tp->tf_r11 = mcp->mc_r11; 1526 tp->tf_r10 = mcp->mc_r10; 1527 tp->tf_r9 = mcp->mc_r9; 1528 tp->tf_r8 = mcp->mc_r8; 1529 tp->tf_rdi = mcp->mc_rdi; 1530 tp->tf_rsi = mcp->mc_rsi; 1531 tp->tf_rbp = mcp->mc_rbp; 1532 tp->tf_rbx = mcp->mc_rbx; 1533 tp->tf_rdx = mcp->mc_rdx; 1534 tp->tf_rcx = mcp->mc_rcx; 1535 tp->tf_rax = mcp->mc_rax; 1536 tp->tf_rip = mcp->mc_rip; 1537 tp->tf_rflags = rflags; 1538 tp->tf_rsp = mcp->mc_rsp; 1539 tp->tf_ss = mcp->mc_ss; 1540 td->td_pcb->pcb_flags |= PCB_FULLCTX; 1541 return (0); 1542} 1543 1544static void 1545get_fpcontext(struct thread *td, mcontext_t *mcp) 1546{ 1547 1548 mcp->mc_ownedfp = fpugetregs(td, (struct savefpu *)&mcp->mc_fpstate); 1549 mcp->mc_fpformat = fpuformat(); 1550} 1551 1552static int 1553set_fpcontext(struct thread *td, const mcontext_t *mcp) 1554{ 1555 1556 if (mcp->mc_fpformat == _MC_FPFMT_NODEV) 1557 return (0); 1558 else if (mcp->mc_fpformat != _MC_FPFMT_XMM) 1559 return (EINVAL); 1560 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) 1561 /* We don't care what state is left in the FPU or PCB. */ 1562 fpstate_drop(td); 1563 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU || 1564 mcp->mc_ownedfp == _MC_FPOWNED_PCB) { 1565 /* 1566 * XXX we violate the dubious requirement that fpusetregs() 1567 * be called with interrupts disabled. 1568 * XXX obsolete on trap-16 systems? 1569 */ 1570 fpusetregs(td, (struct savefpu *)&mcp->mc_fpstate); 1571 } else 1572 return (EINVAL); 1573 return (0); 1574} 1575 1576void 1577fpstate_drop(struct thread *td) 1578{ 1579 register_t s; 1580 1581 s = intr_disable(); 1582 if (PCPU_GET(fpcurthread) == td) 1583 fpudrop(); 1584 /* 1585 * XXX force a full drop of the fpu. The above only drops it if we 1586 * owned it. 1587 * 1588 * XXX I don't much like fpugetregs()'s semantics of doing a full 1589 * drop. Dropping only to the pcb matches fnsave's behaviour. 1590 * We only need to drop to !PCB_INITDONE in sendsig(). But 1591 * sendsig() is the only caller of fpugetregs()... perhaps we just 1592 * have too many layers. 1593 */ 1594 curthread->td_pcb->pcb_flags &= ~PCB_FPUINITDONE; 1595 intr_restore(s); 1596} 1597 1598int 1599fill_dbregs(struct thread *td, struct dbreg *dbregs) 1600{ 1601 struct pcb *pcb; 1602 1603 if (td == NULL) { 1604 dbregs->dr[0] = rdr0(); 1605 dbregs->dr[1] = rdr1(); 1606 dbregs->dr[2] = rdr2(); 1607 dbregs->dr[3] = rdr3(); 1608 dbregs->dr[6] = rdr6(); 1609 dbregs->dr[7] = rdr7(); 1610 } else { 1611 pcb = td->td_pcb; 1612 dbregs->dr[0] = pcb->pcb_dr0; 1613 dbregs->dr[1] = pcb->pcb_dr1; 1614 dbregs->dr[2] = pcb->pcb_dr2; 1615 dbregs->dr[3] = pcb->pcb_dr3; 1616 dbregs->dr[6] = pcb->pcb_dr6; 1617 dbregs->dr[7] = pcb->pcb_dr7; 1618 } 1619 dbregs->dr[4] = 0; 1620 dbregs->dr[5] = 0; 1621 dbregs->dr[8] = 0; 1622 dbregs->dr[9] = 0; 1623 dbregs->dr[10] = 0; 1624 dbregs->dr[11] = 0; 1625 dbregs->dr[12] = 0; 1626 dbregs->dr[13] = 0; 1627 dbregs->dr[14] = 0; 1628 dbregs->dr[15] = 0; 1629 return (0); 1630} 1631 1632int 1633set_dbregs(struct thread *td, struct dbreg *dbregs) 1634{ 1635 struct pcb *pcb; 1636 int i; 1637 u_int64_t mask1, mask2; 1638 1639 if (td == NULL) { 1640 load_dr0(dbregs->dr[0]); 1641 load_dr1(dbregs->dr[1]); 1642 load_dr2(dbregs->dr[2]); 1643 load_dr3(dbregs->dr[3]); 1644 load_dr6(dbregs->dr[6]); 1645 load_dr7(dbregs->dr[7]); 1646 } else { 1647 /* 1648 * Don't let an illegal value for dr7 get set. Specifically, 1649 * check for undefined settings. Setting these bit patterns 1650 * result in undefined behaviour and can lead to an unexpected 1651 * TRCTRAP or a general protection fault right here. 1652 */ 1653 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 8; 1654 i++, mask1 <<= 2, mask2 <<= 2) 1655 if ((dbregs->dr[7] & mask1) == mask2) 1656 return (EINVAL); 1657 1658 pcb = td->td_pcb; 1659 1660 /* 1661 * Don't let a process set a breakpoint that is not within the 1662 * process's address space. If a process could do this, it 1663 * could halt the system by setting a breakpoint in the kernel 1664 * (if ddb was enabled). Thus, we need to check to make sure 1665 * that no breakpoints are being enabled for addresses outside 1666 * process's address space, unless, perhaps, we were called by 1667 * uid 0. 1668 * 1669 * XXX - what about when the watched area of the user's 1670 * address space is written into from within the kernel 1671 * ... wouldn't that still cause a breakpoint to be generated 1672 * from within kernel mode? 1673 */ 1674 1675 if (suser(td) != 0) { 1676 if (dbregs->dr[7] & 0x3) { 1677 /* dr0 is enabled */ 1678 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS) 1679 return (EINVAL); 1680 } 1681 if (dbregs->dr[7] & 0x3<<2) { 1682 /* dr1 is enabled */ 1683 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS) 1684 return (EINVAL); 1685 } 1686 if (dbregs->dr[7] & 0x3<<4) { 1687 /* dr2 is enabled */ 1688 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS) 1689 return (EINVAL); 1690 } 1691 if (dbregs->dr[7] & 0x3<<6) { 1692 /* dr3 is enabled */ 1693 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS) 1694 return (EINVAL); 1695 } 1696 } 1697 1698 pcb->pcb_dr0 = dbregs->dr[0]; 1699 pcb->pcb_dr1 = dbregs->dr[1]; 1700 pcb->pcb_dr2 = dbregs->dr[2]; 1701 pcb->pcb_dr3 = dbregs->dr[3]; 1702 pcb->pcb_dr6 = dbregs->dr[6]; 1703 pcb->pcb_dr7 = dbregs->dr[7]; 1704 1705 pcb->pcb_flags |= PCB_DBREGS; 1706 } 1707 1708 return (0); 1709} 1710 1711void 1712reset_dbregs(void) 1713{ 1714 1715 load_dr7(0); /* Turn off the control bits first */ 1716 load_dr0(0); 1717 load_dr1(0); 1718 load_dr2(0); 1719 load_dr3(0); 1720 load_dr6(0); 1721} 1722 1723/* 1724 * Return > 0 if a hardware breakpoint has been hit, and the 1725 * breakpoint was in user space. Return 0, otherwise. 1726 */ 1727int 1728user_dbreg_trap(void) 1729{ 1730 u_int64_t dr7, dr6; /* debug registers dr6 and dr7 */ 1731 u_int64_t bp; /* breakpoint bits extracted from dr6 */ 1732 int nbp; /* number of breakpoints that triggered */ 1733 caddr_t addr[4]; /* breakpoint addresses */ 1734 int i; 1735 1736 dr7 = rdr7(); 1737 if ((dr7 & 0x000000ff) == 0) { 1738 /* 1739 * all GE and LE bits in the dr7 register are zero, 1740 * thus the trap couldn't have been caused by the 1741 * hardware debug registers 1742 */ 1743 return 0; 1744 } 1745 1746 nbp = 0; 1747 dr6 = rdr6(); 1748 bp = dr6 & 0x0000000f; 1749 1750 if (!bp) { 1751 /* 1752 * None of the breakpoint bits are set meaning this 1753 * trap was not caused by any of the debug registers 1754 */ 1755 return 0; 1756 } 1757 1758 /* 1759 * at least one of the breakpoints were hit, check to see 1760 * which ones and if any of them are user space addresses 1761 */ 1762 1763 if (bp & 0x01) { 1764 addr[nbp++] = (caddr_t)rdr0(); 1765 } 1766 if (bp & 0x02) { 1767 addr[nbp++] = (caddr_t)rdr1(); 1768 } 1769 if (bp & 0x04) { 1770 addr[nbp++] = (caddr_t)rdr2(); 1771 } 1772 if (bp & 0x08) { 1773 addr[nbp++] = (caddr_t)rdr3(); 1774 } 1775 1776 for (i=0; i<nbp; i++) { 1777 if (addr[i] < 1778 (caddr_t)VM_MAXUSER_ADDRESS) { 1779 /* 1780 * addr[i] is in user space 1781 */ 1782 return nbp; 1783 } 1784 } 1785 1786 /* 1787 * None of the breakpoints are in user space. 1788 */ 1789 return 0; 1790} 1791 1792#ifdef KDB 1793 1794/* 1795 * Provide inb() and outb() as functions. They are normally only 1796 * available as macros calling inlined functions, thus cannot be 1797 * called from the debugger. 1798 * 1799 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 1800 */ 1801 1802#undef inb 1803#undef outb 1804 1805/* silence compiler warnings */ 1806u_char inb(u_int); 1807void outb(u_int, u_char); 1808 1809u_char 1810inb(u_int port) 1811{ 1812 u_char data; 1813 /* 1814 * We use %%dx and not %1 here because i/o is done at %dx and not at 1815 * %edx, while gcc generates inferior code (movw instead of movl) 1816 * if we tell it to load (u_short) port. 1817 */ 1818 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 1819 return (data); 1820} 1821 1822void 1823outb(u_int port, u_char data) 1824{ 1825 u_char al; 1826 /* 1827 * Use an unnecessary assignment to help gcc's register allocator. 1828 * This make a large difference for gcc-1.40 and a tiny difference 1829 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 1830 * best results. gcc-2.6.0 can't handle this. 1831 */ 1832 al = data; 1833 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 1834} 1835 1836#endif /* KDB */ 1837