machdep.c revision 178471
1/*- 2 * Copyright (c) 2003 Peter Wemm. 3 * Copyright (c) 1992 Terrence R. Lambert. 4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * William Jolitz. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 39 */ 40 41#include <sys/cdefs.h> 42__FBSDID("$FreeBSD: head/sys/amd64/amd64/machdep.c 178471 2008-04-25 05:18:50Z jeff $"); 43 44#include "opt_atalk.h" 45#include "opt_atpic.h" 46#include "opt_compat.h" 47#include "opt_cpu.h" 48#include "opt_ddb.h" 49#include "opt_inet.h" 50#include "opt_ipx.h" 51#include "opt_isa.h" 52#include "opt_kstack_pages.h" 53#include "opt_maxmem.h" 54#include "opt_msgbuf.h" 55#include "opt_perfmon.h" 56#include "opt_sched.h" 57 58#include <sys/param.h> 59#include <sys/proc.h> 60#include <sys/systm.h> 61#include <sys/bio.h> 62#include <sys/buf.h> 63#include <sys/bus.h> 64#include <sys/callout.h> 65#include <sys/cons.h> 66#include <sys/cpu.h> 67#include <sys/eventhandler.h> 68#include <sys/exec.h> 69#include <sys/imgact.h> 70#include <sys/kdb.h> 71#include <sys/kernel.h> 72#include <sys/ktr.h> 73#include <sys/linker.h> 74#include <sys/lock.h> 75#include <sys/malloc.h> 76#include <sys/memrange.h> 77#include <sys/msgbuf.h> 78#include <sys/mutex.h> 79#include <sys/pcpu.h> 80#include <sys/ptrace.h> 81#include <sys/reboot.h> 82#include <sys/sched.h> 83#include <sys/signalvar.h> 84#include <sys/sysctl.h> 85#include <sys/sysent.h> 86#include <sys/sysproto.h> 87#include <sys/ucontext.h> 88#include <sys/vmmeter.h> 89 90#include <vm/vm.h> 91#include <vm/vm_extern.h> 92#include <vm/vm_kern.h> 93#include <vm/vm_page.h> 94#include <vm/vm_map.h> 95#include <vm/vm_object.h> 96#include <vm/vm_pager.h> 97#include <vm/vm_param.h> 98 99#ifdef DDB 100#ifndef KDB 101#error KDB must be enabled in order for DDB to work! 102#endif 103#endif 104#include <ddb/ddb.h> 105 106#include <net/netisr.h> 107 108#include <machine/clock.h> 109#include <machine/cpu.h> 110#include <machine/cputypes.h> 111#include <machine/intr_machdep.h> 112#include <machine/md_var.h> 113#include <machine/metadata.h> 114#include <machine/pc/bios.h> 115#include <machine/pcb.h> 116#include <machine/proc.h> 117#include <machine/reg.h> 118#include <machine/sigframe.h> 119#include <machine/specialreg.h> 120#ifdef PERFMON 121#include <machine/perfmon.h> 122#endif 123#include <machine/tss.h> 124#ifdef SMP 125#include <machine/smp.h> 126#endif 127 128#ifdef DEV_ATPIC 129#include <amd64/isa/icu.h> 130#else 131#include <machine/apicvar.h> 132#endif 133 134#include <isa/isareg.h> 135#include <isa/rtc.h> 136 137/* Sanity check for __curthread() */ 138CTASSERT(offsetof(struct pcpu, pc_curthread) == 0); 139 140extern u_int64_t hammer_time(u_int64_t, u_int64_t); 141 142extern void printcpuinfo(void); /* XXX header file */ 143extern void identify_cpu(void); 144extern void panicifcpuunsupported(void); 145 146#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 147#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 148 149static void cpu_startup(void *); 150static void get_fpcontext(struct thread *td, mcontext_t *mcp); 151static int set_fpcontext(struct thread *td, const mcontext_t *mcp); 152SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); 153 154#ifdef DDB 155extern vm_offset_t ksym_start, ksym_end; 156#endif 157 158/* Intel ICH registers */ 159#define ICH_PMBASE 0x400 160#define ICH_SMI_EN ICH_PMBASE + 0x30 161 162int _udatasel, _ucodesel, _ucode32sel; 163 164int cold = 1; 165 166long Maxmem = 0; 167long realmem = 0; 168 169/* 170 * The number of PHYSMAP entries must be one less than the number of 171 * PHYSSEG entries because the PHYSMAP entry that spans the largest 172 * physical address that is accessible by ISA DMA is split into two 173 * PHYSSEG entries. 174 */ 175#define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1)) 176 177vm_paddr_t phys_avail[PHYSMAP_SIZE + 2]; 178vm_paddr_t dump_avail[PHYSMAP_SIZE + 2]; 179 180/* must be 2 less so 0 0 can signal end of chunks */ 181#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2) 182#define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2) 183 184struct kva_md_info kmi; 185 186static struct trapframe proc0_tf; 187struct region_descriptor r_gdt, r_idt; 188 189struct pcpu __pcpu[MAXCPU]; 190 191struct mtx icu_lock; 192 193struct mem_range_softc mem_range_softc; 194 195static void 196cpu_startup(dummy) 197 void *dummy; 198{ 199 char *sysenv; 200 201 /* 202 * On MacBooks, we need to disallow the legacy USB circuit to 203 * generate an SMI# because this can cause several problems, 204 * namely: incorrect CPU frequency detection and failure to 205 * start the APs. 206 * We do this by disabling a bit in the SMI_EN (SMI Control and 207 * Enable register) of the Intel ICH LPC Interface Bridge. 208 */ 209 sysenv = getenv("smbios.system.product"); 210 if (sysenv != NULL) { 211 if (strncmp(sysenv, "MacBook", 7) == 0) { 212 if (bootverbose) 213 printf("Disabling LEGACY_USB_EN bit on " 214 "Intel ICH.\n"); 215 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8); 216 } 217 freeenv(sysenv); 218 } 219 220 /* 221 * Good {morning,afternoon,evening,night}. 222 */ 223 startrtclock(); 224 printcpuinfo(); 225 panicifcpuunsupported(); 226#ifdef PERFMON 227 perfmon_init(); 228#endif 229 printf("usable memory = %ju (%ju MB)\n", ptoa((uintmax_t)physmem), 230 ptoa((uintmax_t)physmem) / 1048576); 231 realmem = Maxmem; 232 /* 233 * Display any holes after the first chunk of extended memory. 234 */ 235 if (bootverbose) { 236 int indx; 237 238 printf("Physical memory chunk(s):\n"); 239 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 240 vm_paddr_t size; 241 242 size = phys_avail[indx + 1] - phys_avail[indx]; 243 printf( 244 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n", 245 (uintmax_t)phys_avail[indx], 246 (uintmax_t)phys_avail[indx + 1] - 1, 247 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE); 248 } 249 } 250 251 vm_ksubmap_init(&kmi); 252 253 printf("avail memory = %ju (%ju MB)\n", 254 ptoa((uintmax_t)cnt.v_free_count), 255 ptoa((uintmax_t)cnt.v_free_count) / 1048576); 256 257 /* 258 * Set up buffers, so they can be used to read disk labels. 259 */ 260 bufinit(); 261 vm_pager_bufferinit(); 262 263 cpu_setregs(); 264} 265 266/* 267 * Send an interrupt to process. 268 * 269 * Stack is set up to allow sigcode stored 270 * at top to call routine, followed by kcall 271 * to sigreturn routine below. After sigreturn 272 * resets the signal mask, the stack, and the 273 * frame pointer, it returns to the user 274 * specified pc, psl. 275 */ 276void 277sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) 278{ 279 struct sigframe sf, *sfp; 280 struct proc *p; 281 struct thread *td; 282 struct sigacts *psp; 283 char *sp; 284 struct trapframe *regs; 285 int sig; 286 int oonstack; 287 288 td = curthread; 289 p = td->td_proc; 290 PROC_LOCK_ASSERT(p, MA_OWNED); 291 sig = ksi->ksi_signo; 292 psp = p->p_sigacts; 293 mtx_assert(&psp->ps_mtx, MA_OWNED); 294 regs = td->td_frame; 295 oonstack = sigonstack(regs->tf_rsp); 296 297 /* Save user context. */ 298 bzero(&sf, sizeof(sf)); 299 sf.sf_uc.uc_sigmask = *mask; 300 sf.sf_uc.uc_stack = td->td_sigstk; 301 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) 302 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 303 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 304 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(*regs)); 305 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */ 306 get_fpcontext(td, &sf.sf_uc.uc_mcontext); 307 fpstate_drop(td); 308 309 /* Allocate space for the signal handler context. */ 310 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && 311 SIGISMEMBER(psp->ps_sigonstack, sig)) { 312 sp = td->td_sigstk.ss_sp + 313 td->td_sigstk.ss_size - sizeof(struct sigframe); 314#if defined(COMPAT_43) 315 td->td_sigstk.ss_flags |= SS_ONSTACK; 316#endif 317 } else 318 sp = (char *)regs->tf_rsp - sizeof(struct sigframe) - 128; 319 /* Align to 16 bytes. */ 320 sfp = (struct sigframe *)((unsigned long)sp & ~0xFul); 321 322 /* Translate the signal if appropriate. */ 323 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 324 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 325 326 /* Build the argument list for the signal handler. */ 327 regs->tf_rdi = sig; /* arg 1 in %rdi */ 328 regs->tf_rdx = (register_t)&sfp->sf_uc; /* arg 3 in %rdx */ 329 if (SIGISMEMBER(psp->ps_siginfo, sig)) { 330 /* Signal handler installed with SA_SIGINFO. */ 331 regs->tf_rsi = (register_t)&sfp->sf_si; /* arg 2 in %rsi */ 332 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 333 334 /* Fill in POSIX parts */ 335 sf.sf_si = ksi->ksi_info; 336 sf.sf_si.si_signo = sig; /* maybe a translated signal */ 337 regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */ 338 } else { 339 /* Old FreeBSD-style arguments. */ 340 regs->tf_rsi = ksi->ksi_code; /* arg 2 in %rsi */ 341 regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */ 342 sf.sf_ahu.sf_handler = catcher; 343 } 344 mtx_unlock(&psp->ps_mtx); 345 PROC_UNLOCK(p); 346 347 /* 348 * Copy the sigframe out to the user's stack. 349 */ 350 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { 351#ifdef DEBUG 352 printf("process %ld has trashed its stack\n", (long)p->p_pid); 353#endif 354 PROC_LOCK(p); 355 sigexit(td, SIGILL); 356 } 357 358 regs->tf_rsp = (long)sfp; 359 regs->tf_rip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 360 regs->tf_rflags &= ~(PSL_T | PSL_D); 361 regs->tf_cs = _ucodesel; 362 PROC_LOCK(p); 363 mtx_lock(&psp->ps_mtx); 364} 365 366/* 367 * System call to cleanup state after a signal 368 * has been taken. Reset signal mask and 369 * stack state from context left by sendsig (above). 370 * Return to previous pc and psl as specified by 371 * context left by sendsig. Check carefully to 372 * make sure that the user has not modified the 373 * state to gain improper privileges. 374 * 375 * MPSAFE 376 */ 377int 378sigreturn(td, uap) 379 struct thread *td; 380 struct sigreturn_args /* { 381 const struct __ucontext *sigcntxp; 382 } */ *uap; 383{ 384 ucontext_t uc; 385 struct proc *p = td->td_proc; 386 struct trapframe *regs; 387 const ucontext_t *ucp; 388 long rflags; 389 int cs, error, ret; 390 ksiginfo_t ksi; 391 392 error = copyin(uap->sigcntxp, &uc, sizeof(uc)); 393 if (error != 0) 394 return (error); 395 ucp = &uc; 396 regs = td->td_frame; 397 rflags = ucp->uc_mcontext.mc_rflags; 398 /* 399 * Don't allow users to change privileged or reserved flags. 400 */ 401 /* 402 * XXX do allow users to change the privileged flag PSL_RF. 403 * The cpu sets PSL_RF in tf_rflags for faults. Debuggers 404 * should sometimes set it there too. tf_rflags is kept in 405 * the signal context during signal handling and there is no 406 * other place to remember it, so the PSL_RF bit may be 407 * corrupted by the signal handler without us knowing. 408 * Corruption of the PSL_RF bit at worst causes one more or 409 * one less debugger trap, so allowing it is fairly harmless. 410 */ 411 if (!EFL_SECURE(rflags & ~PSL_RF, regs->tf_rflags & ~PSL_RF)) { 412 printf("sigreturn: rflags = 0x%lx\n", rflags); 413 return (EINVAL); 414 } 415 416 /* 417 * Don't allow users to load a valid privileged %cs. Let the 418 * hardware check for invalid selectors, excess privilege in 419 * other selectors, invalid %eip's and invalid %esp's. 420 */ 421 cs = ucp->uc_mcontext.mc_cs; 422 if (!CS_SECURE(cs)) { 423 printf("sigreturn: cs = 0x%x\n", cs); 424 ksiginfo_init_trap(&ksi); 425 ksi.ksi_signo = SIGBUS; 426 ksi.ksi_code = BUS_OBJERR; 427 ksi.ksi_trapno = T_PROTFLT; 428 ksi.ksi_addr = (void *)regs->tf_rip; 429 trapsignal(td, &ksi); 430 return (EINVAL); 431 } 432 433 ret = set_fpcontext(td, &ucp->uc_mcontext); 434 if (ret != 0) 435 return (ret); 436 bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(*regs)); 437 438 PROC_LOCK(p); 439#if defined(COMPAT_43) 440 if (ucp->uc_mcontext.mc_onstack & 1) 441 td->td_sigstk.ss_flags |= SS_ONSTACK; 442 else 443 td->td_sigstk.ss_flags &= ~SS_ONSTACK; 444#endif 445 446 td->td_sigmask = ucp->uc_sigmask; 447 SIG_CANTMASK(td->td_sigmask); 448 signotify(td); 449 PROC_UNLOCK(p); 450 td->td_pcb->pcb_flags |= PCB_FULLCTX; 451 return (EJUSTRETURN); 452} 453 454#ifdef COMPAT_FREEBSD4 455int 456freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap) 457{ 458 459 return sigreturn(td, (struct sigreturn_args *)uap); 460} 461#endif 462 463 464/* 465 * Machine dependent boot() routine 466 * 467 * I haven't seen anything to put here yet 468 * Possibly some stuff might be grafted back here from boot() 469 */ 470void 471cpu_boot(int howto) 472{ 473} 474 475/* Get current clock frequency for the given cpu id. */ 476int 477cpu_est_clockrate(int cpu_id, uint64_t *rate) 478{ 479 register_t reg; 480 uint64_t tsc1, tsc2; 481 482 if (pcpu_find(cpu_id) == NULL || rate == NULL) 483 return (EINVAL); 484 485 /* If we're booting, trust the rate calibrated moments ago. */ 486 if (cold) { 487 *rate = tsc_freq; 488 return (0); 489 } 490 491#ifdef SMP 492 /* Schedule ourselves on the indicated cpu. */ 493 thread_lock(curthread); 494 sched_bind(curthread, cpu_id); 495 thread_unlock(curthread); 496#endif 497 498 /* Calibrate by measuring a short delay. */ 499 reg = intr_disable(); 500 tsc1 = rdtsc(); 501 DELAY(1000); 502 tsc2 = rdtsc(); 503 intr_restore(reg); 504 505#ifdef SMP 506 thread_lock(curthread); 507 sched_unbind(curthread); 508 thread_unlock(curthread); 509#endif 510 511 /* 512 * Calculate the difference in readings, convert to Mhz, and 513 * subtract 0.5% of the total. Empirical testing has shown that 514 * overhead in DELAY() works out to approximately this value. 515 */ 516 tsc2 -= tsc1; 517 *rate = tsc2 * 1000 - tsc2 * 5; 518 return (0); 519} 520 521/* 522 * Shutdown the CPU as much as possible 523 */ 524void 525cpu_halt(void) 526{ 527 for (;;) 528 __asm__ ("hlt"); 529} 530 531void (*cpu_idle_hook)(void) = NULL; /* ACPI idle hook. */ 532 533static void 534cpu_idle_hlt(int busy) 535{ 536 /* 537 * we must absolutely guarentee that hlt is the next instruction 538 * after sti or we introduce a timing window. 539 */ 540 disable_intr(); 541 if (sched_runnable()) 542 enable_intr(); 543 else 544 __asm __volatile("sti; hlt"); 545} 546 547static void 548cpu_idle_acpi(int busy) 549{ 550 disable_intr(); 551 if (sched_runnable()) 552 enable_intr(); 553 else if (cpu_idle_hook) 554 cpu_idle_hook(); 555 else 556 __asm __volatile("sti; hlt"); 557} 558 559static void 560cpu_idle_spin(int busy) 561{ 562 return; 563} 564 565void (*cpu_idle_fn)(int) = cpu_idle_acpi; 566 567void 568cpu_idle(int busy) 569{ 570#ifdef SMP 571 if (mp_grab_cpu_hlt()) 572 return; 573#endif 574 cpu_idle_fn(busy); 575} 576 577/* 578 * mwait cpu power states. Lower 4 bits are sub-states. 579 */ 580#define MWAIT_C0 0xf0 581#define MWAIT_C1 0x00 582#define MWAIT_C2 0x10 583#define MWAIT_C3 0x20 584#define MWAIT_C4 0x30 585 586#define MWAIT_DISABLED 0x0 587#define MWAIT_WOKEN 0x1 588#define MWAIT_WAITING 0x2 589 590static void 591cpu_idle_mwait(int busy) 592{ 593 int *mwait; 594 595 mwait = (int *)PCPU_PTR(monitorbuf); 596 *mwait = MWAIT_WAITING; 597 if (sched_runnable()) 598 return; 599 cpu_monitor(mwait, 0, 0); 600 if (*mwait == MWAIT_WAITING) 601 cpu_mwait(0, MWAIT_C1); 602} 603 604static void 605cpu_idle_mwait_hlt(int busy) 606{ 607 int *mwait; 608 609 mwait = (int *)PCPU_PTR(monitorbuf); 610 if (busy == 0) { 611 *mwait = MWAIT_DISABLED; 612 cpu_idle_hlt(busy); 613 return; 614 } 615 *mwait = MWAIT_WAITING; 616 if (sched_runnable()) 617 return; 618 cpu_monitor(mwait, 0, 0); 619 if (*mwait == MWAIT_WAITING) 620 cpu_mwait(0, MWAIT_C1); 621} 622 623int 624cpu_idle_wakeup(int cpu) 625{ 626 struct pcpu *pcpu; 627 int *mwait; 628 629 if (cpu_idle_fn == cpu_idle_spin) 630 return (1); 631 if (cpu_idle_fn != cpu_idle_mwait && cpu_idle_fn != cpu_idle_mwait_hlt) 632 return (0); 633 pcpu = pcpu_find(cpu); 634 mwait = (int *)pcpu->pc_monitorbuf; 635 /* 636 * This doesn't need to be atomic since missing the race will 637 * simply result in unnecessary IPIs. 638 */ 639 if (cpu_idle_fn == cpu_idle_mwait_hlt && *mwait == MWAIT_DISABLED) 640 return (0); 641 *mwait = MWAIT_WOKEN; 642 643 return (1); 644} 645 646/* 647 * Ordered by speed/power consumption. 648 */ 649struct { 650 void *id_fn; 651 char *id_name; 652} idle_tbl[] = { 653 { cpu_idle_spin, "spin" }, 654 { cpu_idle_mwait, "mwait" }, 655 { cpu_idle_mwait_hlt, "mwait_hlt" }, 656 { cpu_idle_hlt, "hlt" }, 657 { cpu_idle_acpi, "acpi" }, 658 { NULL, NULL } 659}; 660 661static int 662idle_sysctl_available(SYSCTL_HANDLER_ARGS) 663{ 664 char *avail, *p; 665 int error; 666 int i; 667 668 avail = malloc(256, M_TEMP, M_WAITOK); 669 p = avail; 670 for (i = 0; idle_tbl[i].id_name != NULL; i++) { 671 if (strstr(idle_tbl[i].id_name, "mwait") && 672 (cpu_feature2 & CPUID2_MON) == 0) 673 continue; 674 p += sprintf(p, "%s, ", idle_tbl[i].id_name); 675 } 676 error = sysctl_handle_string(oidp, avail, 0, req); 677 free(avail, M_TEMP); 678 return (error); 679} 680 681static int 682idle_sysctl(SYSCTL_HANDLER_ARGS) 683{ 684 char buf[16]; 685 int error; 686 char *p; 687 int i; 688 689 p = "unknown"; 690 for (i = 0; idle_tbl[i].id_name != NULL; i++) { 691 if (idle_tbl[i].id_fn == cpu_idle_fn) { 692 p = idle_tbl[i].id_name; 693 break; 694 } 695 } 696 strncpy(buf, p, sizeof(buf)); 697 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 698 if (error != 0 || req->newptr == NULL) 699 return (error); 700 for (i = 0; idle_tbl[i].id_name != NULL; i++) { 701 if (strstr(idle_tbl[i].id_name, "mwait") && 702 (cpu_feature2 & CPUID2_MON) == 0) 703 continue; 704 if (strcmp(idle_tbl[i].id_name, buf)) 705 continue; 706 cpu_idle_fn = idle_tbl[i].id_fn; 707 return (0); 708 } 709 return (EINVAL); 710} 711 712SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD, 713 0, 0, idle_sysctl_available, "A", "list of available idle functions"); 714 715SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0, 716 idle_sysctl, "A", "currently selected idle function"); 717 718/* 719 * Clear registers on exec 720 */ 721void 722exec_setregs(td, entry, stack, ps_strings) 723 struct thread *td; 724 u_long entry; 725 u_long stack; 726 u_long ps_strings; 727{ 728 struct trapframe *regs = td->td_frame; 729 struct pcb *pcb = td->td_pcb; 730 731 critical_enter(); 732 wrmsr(MSR_FSBASE, 0); 733 wrmsr(MSR_KGSBASE, 0); /* User value while we're in the kernel */ 734 pcb->pcb_fsbase = 0; 735 pcb->pcb_gsbase = 0; 736 critical_exit(); 737 load_ds(_udatasel); 738 load_es(_udatasel); 739 load_fs(_udatasel); 740 load_gs(_udatasel); 741 pcb->pcb_ds = _udatasel; 742 pcb->pcb_es = _udatasel; 743 pcb->pcb_fs = _udatasel; 744 pcb->pcb_gs = _udatasel; 745 746 bzero((char *)regs, sizeof(struct trapframe)); 747 regs->tf_rip = entry; 748 regs->tf_rsp = ((stack - 8) & ~0xFul) + 8; 749 regs->tf_rdi = stack; /* argv */ 750 regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T); 751 regs->tf_ss = _udatasel; 752 regs->tf_cs = _ucodesel; 753 754 /* 755 * Reset the hardware debug registers if they were in use. 756 * They won't have any meaning for the newly exec'd process. 757 */ 758 if (pcb->pcb_flags & PCB_DBREGS) { 759 pcb->pcb_dr0 = 0; 760 pcb->pcb_dr1 = 0; 761 pcb->pcb_dr2 = 0; 762 pcb->pcb_dr3 = 0; 763 pcb->pcb_dr6 = 0; 764 pcb->pcb_dr7 = 0; 765 if (pcb == PCPU_GET(curpcb)) { 766 /* 767 * Clear the debug registers on the running 768 * CPU, otherwise they will end up affecting 769 * the next process we switch to. 770 */ 771 reset_dbregs(); 772 } 773 pcb->pcb_flags &= ~PCB_DBREGS; 774 } 775 776 /* 777 * Drop the FP state if we hold it, so that the process gets a 778 * clean FP state if it uses the FPU again. 779 */ 780 fpstate_drop(td); 781} 782 783void 784cpu_setregs(void) 785{ 786 register_t cr0; 787 788 cr0 = rcr0(); 789 /* 790 * CR0_MP, CR0_NE and CR0_TS are also set by npx_probe() for the 791 * BSP. See the comments there about why we set them. 792 */ 793 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM; 794 load_cr0(cr0); 795} 796 797/* 798 * Initialize amd64 and configure to run kernel 799 */ 800 801/* 802 * Initialize segments & interrupt table 803 */ 804 805struct user_segment_descriptor gdt[NGDT * MAXCPU];/* global descriptor table */ 806static struct gate_descriptor idt0[NIDT]; 807struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 808 809static char dblfault_stack[PAGE_SIZE] __aligned(16); 810 811struct amd64tss common_tss[MAXCPU]; 812 813/* software prototypes -- in more palatable form */ 814struct soft_segment_descriptor gdt_segs[] = { 815/* GNULL_SEL 0 Null Descriptor */ 816{ 0x0, /* segment base address */ 817 0x0, /* length */ 818 0, /* segment type */ 819 0, /* segment descriptor priority level */ 820 0, /* segment descriptor present */ 821 0, /* long */ 822 0, /* default 32 vs 16 bit size */ 823 0 /* limit granularity (byte/page units)*/ }, 824/* GCODE_SEL 1 Code Descriptor for kernel */ 825{ 0x0, /* segment base address */ 826 0xfffff, /* length - all address space */ 827 SDT_MEMERA, /* segment type */ 828 SEL_KPL, /* segment descriptor priority level */ 829 1, /* segment descriptor present */ 830 1, /* long */ 831 0, /* default 32 vs 16 bit size */ 832 1 /* limit granularity (byte/page units)*/ }, 833/* GDATA_SEL 2 Data Descriptor for kernel */ 834{ 0x0, /* segment base address */ 835 0xfffff, /* length - all address space */ 836 SDT_MEMRWA, /* segment type */ 837 SEL_KPL, /* segment descriptor priority level */ 838 1, /* segment descriptor present */ 839 1, /* long */ 840 0, /* default 32 vs 16 bit size */ 841 1 /* limit granularity (byte/page units)*/ }, 842/* GUCODE32_SEL 3 32 bit Code Descriptor for user */ 843{ 0x0, /* segment base address */ 844 0xfffff, /* length - all address space */ 845 SDT_MEMERA, /* segment type */ 846 SEL_UPL, /* segment descriptor priority level */ 847 1, /* segment descriptor present */ 848 0, /* long */ 849 1, /* default 32 vs 16 bit size */ 850 1 /* limit granularity (byte/page units)*/ }, 851/* GUDATA_SEL 4 32/64 bit Data Descriptor for user */ 852{ 0x0, /* segment base address */ 853 0xfffff, /* length - all address space */ 854 SDT_MEMRWA, /* segment type */ 855 SEL_UPL, /* segment descriptor priority level */ 856 1, /* segment descriptor present */ 857 0, /* long */ 858 1, /* default 32 vs 16 bit size */ 859 1 /* limit granularity (byte/page units)*/ }, 860/* GUCODE_SEL 5 64 bit Code Descriptor for user */ 861{ 0x0, /* segment base address */ 862 0xfffff, /* length - all address space */ 863 SDT_MEMERA, /* segment type */ 864 SEL_UPL, /* segment descriptor priority level */ 865 1, /* segment descriptor present */ 866 1, /* long */ 867 0, /* default 32 vs 16 bit size */ 868 1 /* limit granularity (byte/page units)*/ }, 869/* GPROC0_SEL 6 Proc 0 Tss Descriptor */ 870{ 871 0x0, /* segment base address */ 872 sizeof(struct amd64tss)-1,/* length - all address space */ 873 SDT_SYSTSS, /* segment type */ 874 SEL_KPL, /* segment descriptor priority level */ 875 1, /* segment descriptor present */ 876 0, /* long */ 877 0, /* unused - default 32 vs 16 bit size */ 878 0 /* limit granularity (byte/page units)*/ }, 879/* Actually, the TSS is a system descriptor which is double size */ 880{ 0x0, /* segment base address */ 881 0x0, /* length */ 882 0, /* segment type */ 883 0, /* segment descriptor priority level */ 884 0, /* segment descriptor present */ 885 0, /* long */ 886 0, /* default 32 vs 16 bit size */ 887 0 /* limit granularity (byte/page units)*/ }, 888/* GUGS32_SEL 8 32 bit GS Descriptor for user */ 889{ 0x0, /* segment base address */ 890 0xfffff, /* length - all address space */ 891 SDT_MEMRWA, /* segment type */ 892 SEL_UPL, /* segment descriptor priority level */ 893 1, /* segment descriptor present */ 894 0, /* long */ 895 1, /* default 32 vs 16 bit size */ 896 1 /* limit granularity (byte/page units)*/ }, 897}; 898 899void 900setidt(idx, func, typ, dpl, ist) 901 int idx; 902 inthand_t *func; 903 int typ; 904 int dpl; 905 int ist; 906{ 907 struct gate_descriptor *ip; 908 909 ip = idt + idx; 910 ip->gd_looffset = (uintptr_t)func; 911 ip->gd_selector = GSEL(GCODE_SEL, SEL_KPL); 912 ip->gd_ist = ist; 913 ip->gd_xx = 0; 914 ip->gd_type = typ; 915 ip->gd_dpl = dpl; 916 ip->gd_p = 1; 917 ip->gd_hioffset = ((uintptr_t)func)>>16 ; 918} 919 920extern inthand_t 921 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 922 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 923 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 924 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 925 IDTVEC(xmm), IDTVEC(dblfault), 926 IDTVEC(fast_syscall), IDTVEC(fast_syscall32); 927 928void 929sdtossd(sd, ssd) 930 struct user_segment_descriptor *sd; 931 struct soft_segment_descriptor *ssd; 932{ 933 934 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 935 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 936 ssd->ssd_type = sd->sd_type; 937 ssd->ssd_dpl = sd->sd_dpl; 938 ssd->ssd_p = sd->sd_p; 939 ssd->ssd_long = sd->sd_long; 940 ssd->ssd_def32 = sd->sd_def32; 941 ssd->ssd_gran = sd->sd_gran; 942} 943 944void 945ssdtosd(ssd, sd) 946 struct soft_segment_descriptor *ssd; 947 struct user_segment_descriptor *sd; 948{ 949 950 sd->sd_lobase = (ssd->ssd_base) & 0xffffff; 951 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xff; 952 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff; 953 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf; 954 sd->sd_type = ssd->ssd_type; 955 sd->sd_dpl = ssd->ssd_dpl; 956 sd->sd_p = ssd->ssd_p; 957 sd->sd_long = ssd->ssd_long; 958 sd->sd_def32 = ssd->ssd_def32; 959 sd->sd_gran = ssd->ssd_gran; 960} 961 962void 963ssdtosyssd(ssd, sd) 964 struct soft_segment_descriptor *ssd; 965 struct system_segment_descriptor *sd; 966{ 967 968 sd->sd_lobase = (ssd->ssd_base) & 0xffffff; 969 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xfffffffffful; 970 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff; 971 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf; 972 sd->sd_type = ssd->ssd_type; 973 sd->sd_dpl = ssd->ssd_dpl; 974 sd->sd_p = ssd->ssd_p; 975 sd->sd_gran = ssd->ssd_gran; 976} 977 978#if !defined(DEV_ATPIC) && defined(DEV_ISA) 979#include <isa/isavar.h> 980#include <isa/isareg.h> 981/* 982 * Return a bitmap of the current interrupt requests. This is 8259-specific 983 * and is only suitable for use at probe time. 984 * This is only here to pacify sio. It is NOT FATAL if this doesn't work. 985 * It shouldn't be here. There should probably be an APIC centric 986 * implementation in the apic driver code, if at all. 987 */ 988intrmask_t 989isa_irq_pending(void) 990{ 991 u_char irr1; 992 u_char irr2; 993 994 irr1 = inb(IO_ICU1); 995 irr2 = inb(IO_ICU2); 996 return ((irr2 << 8) | irr1); 997} 998#endif 999 1000u_int basemem; 1001 1002/* 1003 * Populate the (physmap) array with base/bound pairs describing the 1004 * available physical memory in the system, then test this memory and 1005 * build the phys_avail array describing the actually-available memory. 1006 * 1007 * If we cannot accurately determine the physical memory map, then use 1008 * value from the 0xE801 call, and failing that, the RTC. 1009 * 1010 * Total memory size may be set by the kernel environment variable 1011 * hw.physmem or the compile-time define MAXMEM. 1012 * 1013 * XXX first should be vm_paddr_t. 1014 */ 1015static void 1016getmemsize(caddr_t kmdp, u_int64_t first) 1017{ 1018 int i, off, physmap_idx, pa_indx, da_indx; 1019 vm_paddr_t pa, physmap[PHYSMAP_SIZE]; 1020 u_long physmem_tunable; 1021 pt_entry_t *pte; 1022 struct bios_smap *smapbase, *smap, *smapend; 1023 u_int32_t smapsize; 1024 quad_t dcons_addr, dcons_size; 1025 1026 bzero(physmap, sizeof(physmap)); 1027 basemem = 0; 1028 physmap_idx = 0; 1029 1030 /* 1031 * get memory map from INT 15:E820, kindly supplied by the loader. 1032 * 1033 * subr_module.c says: 1034 * "Consumer may safely assume that size value precedes data." 1035 * ie: an int32_t immediately precedes smap. 1036 */ 1037 smapbase = (struct bios_smap *)preload_search_info(kmdp, 1038 MODINFO_METADATA | MODINFOMD_SMAP); 1039 if (smapbase == NULL) 1040 panic("No BIOS smap info from loader!"); 1041 1042 smapsize = *((u_int32_t *)smapbase - 1); 1043 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize); 1044 1045 for (smap = smapbase; smap < smapend; smap++) { 1046 if (boothowto & RB_VERBOSE) 1047 printf("SMAP type=%02x base=%016lx len=%016lx\n", 1048 smap->type, smap->base, smap->length); 1049 1050 if (smap->type != SMAP_TYPE_MEMORY) 1051 continue; 1052 1053 if (smap->length == 0) 1054 continue; 1055 1056 for (i = 0; i <= physmap_idx; i += 2) { 1057 if (smap->base < physmap[i + 1]) { 1058 if (boothowto & RB_VERBOSE) 1059 printf( 1060 "Overlapping or non-monotonic memory region, ignoring second region\n"); 1061 continue; 1062 } 1063 } 1064 1065 if (smap->base == physmap[physmap_idx + 1]) { 1066 physmap[physmap_idx + 1] += smap->length; 1067 continue; 1068 } 1069 1070 physmap_idx += 2; 1071 if (physmap_idx == PHYSMAP_SIZE) { 1072 printf( 1073 "Too many segments in the physical address map, giving up\n"); 1074 break; 1075 } 1076 physmap[physmap_idx] = smap->base; 1077 physmap[physmap_idx + 1] = smap->base + smap->length; 1078 } 1079 1080 /* 1081 * Find the 'base memory' segment for SMP 1082 */ 1083 basemem = 0; 1084 for (i = 0; i <= physmap_idx; i += 2) { 1085 if (physmap[i] == 0x00000000) { 1086 basemem = physmap[i + 1] / 1024; 1087 break; 1088 } 1089 } 1090 if (basemem == 0) 1091 panic("BIOS smap did not include a basemem segment!"); 1092 1093#ifdef SMP 1094 /* make hole for AP bootstrap code */ 1095 physmap[1] = mp_bootaddress(physmap[1] / 1024); 1096#endif 1097 1098 /* 1099 * Maxmem isn't the "maximum memory", it's one larger than the 1100 * highest page of the physical address space. It should be 1101 * called something like "Maxphyspage". We may adjust this 1102 * based on ``hw.physmem'' and the results of the memory test. 1103 */ 1104 Maxmem = atop(physmap[physmap_idx + 1]); 1105 1106#ifdef MAXMEM 1107 Maxmem = MAXMEM / 4; 1108#endif 1109 1110 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable)) 1111 Maxmem = atop(physmem_tunable); 1112 1113 /* 1114 * Don't allow MAXMEM or hw.physmem to extend the amount of memory 1115 * in the system. 1116 */ 1117 if (Maxmem > atop(physmap[physmap_idx + 1])) 1118 Maxmem = atop(physmap[physmap_idx + 1]); 1119 1120 if (atop(physmap[physmap_idx + 1]) != Maxmem && 1121 (boothowto & RB_VERBOSE)) 1122 printf("Physical memory use set to %ldK\n", Maxmem * 4); 1123 1124 /* call pmap initialization to make new kernel address space */ 1125 pmap_bootstrap(&first); 1126 1127 /* 1128 * Size up each available chunk of physical memory. 1129 */ 1130 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 1131 pa_indx = 0; 1132 da_indx = 1; 1133 phys_avail[pa_indx++] = physmap[0]; 1134 phys_avail[pa_indx] = physmap[0]; 1135 dump_avail[da_indx] = physmap[0]; 1136 pte = CMAP1; 1137 1138 /* 1139 * Get dcons buffer address 1140 */ 1141 if (getenv_quad("dcons.addr", &dcons_addr) == 0 || 1142 getenv_quad("dcons.size", &dcons_size) == 0) 1143 dcons_addr = 0; 1144 1145 /* 1146 * physmap is in bytes, so when converting to page boundaries, 1147 * round up the start address and round down the end address. 1148 */ 1149 for (i = 0; i <= physmap_idx; i += 2) { 1150 vm_paddr_t end; 1151 1152 end = ptoa((vm_paddr_t)Maxmem); 1153 if (physmap[i + 1] < end) 1154 end = trunc_page(physmap[i + 1]); 1155 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 1156 int tmp, page_bad, full; 1157 int *ptr = (int *)CADDR1; 1158 1159 full = FALSE; 1160 /* 1161 * block out kernel memory as not available. 1162 */ 1163 if (pa >= 0x100000 && pa < first) 1164 goto do_dump_avail; 1165 1166 /* 1167 * block out dcons buffer 1168 */ 1169 if (dcons_addr > 0 1170 && pa >= trunc_page(dcons_addr) 1171 && pa < dcons_addr + dcons_size) 1172 goto do_dump_avail; 1173 1174 page_bad = FALSE; 1175 1176 /* 1177 * map page into kernel: valid, read/write,non-cacheable 1178 */ 1179 *pte = pa | PG_V | PG_RW | PG_N; 1180 invltlb(); 1181 1182 tmp = *(int *)ptr; 1183 /* 1184 * Test for alternating 1's and 0's 1185 */ 1186 *(volatile int *)ptr = 0xaaaaaaaa; 1187 if (*(volatile int *)ptr != 0xaaaaaaaa) 1188 page_bad = TRUE; 1189 /* 1190 * Test for alternating 0's and 1's 1191 */ 1192 *(volatile int *)ptr = 0x55555555; 1193 if (*(volatile int *)ptr != 0x55555555) 1194 page_bad = TRUE; 1195 /* 1196 * Test for all 1's 1197 */ 1198 *(volatile int *)ptr = 0xffffffff; 1199 if (*(volatile int *)ptr != 0xffffffff) 1200 page_bad = TRUE; 1201 /* 1202 * Test for all 0's 1203 */ 1204 *(volatile int *)ptr = 0x0; 1205 if (*(volatile int *)ptr != 0x0) 1206 page_bad = TRUE; 1207 /* 1208 * Restore original value. 1209 */ 1210 *(int *)ptr = tmp; 1211 1212 /* 1213 * Adjust array of valid/good pages. 1214 */ 1215 if (page_bad == TRUE) 1216 continue; 1217 /* 1218 * If this good page is a continuation of the 1219 * previous set of good pages, then just increase 1220 * the end pointer. Otherwise start a new chunk. 1221 * Note that "end" points one higher than end, 1222 * making the range >= start and < end. 1223 * If we're also doing a speculative memory 1224 * test and we at or past the end, bump up Maxmem 1225 * so that we keep going. The first bad page 1226 * will terminate the loop. 1227 */ 1228 if (phys_avail[pa_indx] == pa) { 1229 phys_avail[pa_indx] += PAGE_SIZE; 1230 } else { 1231 pa_indx++; 1232 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1233 printf( 1234 "Too many holes in the physical address space, giving up\n"); 1235 pa_indx--; 1236 full = TRUE; 1237 goto do_dump_avail; 1238 } 1239 phys_avail[pa_indx++] = pa; /* start */ 1240 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1241 } 1242 physmem++; 1243do_dump_avail: 1244 if (dump_avail[da_indx] == pa) { 1245 dump_avail[da_indx] += PAGE_SIZE; 1246 } else { 1247 da_indx++; 1248 if (da_indx == DUMP_AVAIL_ARRAY_END) { 1249 da_indx--; 1250 goto do_next; 1251 } 1252 dump_avail[da_indx++] = pa; /* start */ 1253 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */ 1254 } 1255do_next: 1256 if (full) 1257 break; 1258 } 1259 } 1260 *pte = 0; 1261 invltlb(); 1262 1263 /* 1264 * XXX 1265 * The last chunk must contain at least one page plus the message 1266 * buffer to avoid complicating other code (message buffer address 1267 * calculation, etc.). 1268 */ 1269 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1270 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { 1271 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1272 phys_avail[pa_indx--] = 0; 1273 phys_avail[pa_indx--] = 0; 1274 } 1275 1276 Maxmem = atop(phys_avail[pa_indx]); 1277 1278 /* Trim off space for the message buffer. */ 1279 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); 1280 1281 /* Map the message buffer. */ 1282 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) 1283 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] + 1284 off); 1285} 1286 1287u_int64_t 1288hammer_time(u_int64_t modulep, u_int64_t physfree) 1289{ 1290 caddr_t kmdp; 1291 int gsel_tss, x; 1292 struct pcpu *pc; 1293 u_int64_t msr; 1294 char *env; 1295 1296 thread0.td_kstack = physfree + KERNBASE; 1297 bzero((void *)thread0.td_kstack, KSTACK_PAGES * PAGE_SIZE); 1298 physfree += KSTACK_PAGES * PAGE_SIZE; 1299 thread0.td_pcb = (struct pcb *) 1300 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; 1301 1302 /* 1303 * This may be done better later if it gets more high level 1304 * components in it. If so just link td->td_proc here. 1305 */ 1306 proc_linkup0(&proc0, &thread0); 1307 1308 preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE); 1309 preload_bootstrap_relocate(KERNBASE); 1310 kmdp = preload_search_by_type("elf kernel"); 1311 if (kmdp == NULL) 1312 kmdp = preload_search_by_type("elf64 kernel"); 1313 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); 1314 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *) + KERNBASE; 1315#ifdef DDB 1316 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); 1317 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); 1318#endif 1319 1320 /* Init basic tunables, hz etc */ 1321 init_param1(); 1322 1323 /* 1324 * make gdt memory segments 1325 */ 1326 gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&common_tss[0]; 1327 1328 for (x = 0; x < NGDT; x++) { 1329 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1)) 1330 ssdtosd(&gdt_segs[x], &gdt[x]); 1331 } 1332 ssdtosyssd(&gdt_segs[GPROC0_SEL], 1333 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]); 1334 1335 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1336 r_gdt.rd_base = (long) gdt; 1337 lgdt(&r_gdt); 1338 pc = &__pcpu[0]; 1339 1340 wrmsr(MSR_FSBASE, 0); /* User value */ 1341 wrmsr(MSR_GSBASE, (u_int64_t)pc); 1342 wrmsr(MSR_KGSBASE, 0); /* User value while in the kernel */ 1343 1344 pcpu_init(pc, 0, sizeof(struct pcpu)); 1345 PCPU_SET(prvspace, pc); 1346 PCPU_SET(curthread, &thread0); 1347 PCPU_SET(curpcb, thread0.td_pcb); 1348 PCPU_SET(tssp, &common_tss[0]); 1349 1350 /* 1351 * Initialize mutexes. 1352 * 1353 * icu_lock: in order to allow an interrupt to occur in a critical 1354 * section, to set pcpu->ipending (etc...) properly, we 1355 * must be able to get the icu lock, so it can't be 1356 * under witness. 1357 */ 1358 mutex_init(); 1359 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS); 1360 1361 /* exceptions */ 1362 for (x = 0; x < NIDT; x++) 1363 setidt(x, &IDTVEC(rsvd), SDT_SYSIGT, SEL_KPL, 0); 1364 setidt(IDT_DE, &IDTVEC(div), SDT_SYSIGT, SEL_KPL, 0); 1365 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYSIGT, SEL_KPL, 0); 1366 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYSIGT, SEL_KPL, 1); 1367 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYSIGT, SEL_UPL, 0); 1368 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYSIGT, SEL_KPL, 0); 1369 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYSIGT, SEL_KPL, 0); 1370 setidt(IDT_UD, &IDTVEC(ill), SDT_SYSIGT, SEL_KPL, 0); 1371 setidt(IDT_NM, &IDTVEC(dna), SDT_SYSIGT, SEL_KPL, 0); 1372 setidt(IDT_DF, &IDTVEC(dblfault), SDT_SYSIGT, SEL_KPL, 1); 1373 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYSIGT, SEL_KPL, 0); 1374 setidt(IDT_TS, &IDTVEC(tss), SDT_SYSIGT, SEL_KPL, 0); 1375 setidt(IDT_NP, &IDTVEC(missing), SDT_SYSIGT, SEL_KPL, 0); 1376 setidt(IDT_SS, &IDTVEC(stk), SDT_SYSIGT, SEL_KPL, 0); 1377 setidt(IDT_GP, &IDTVEC(prot), SDT_SYSIGT, SEL_KPL, 0); 1378 setidt(IDT_PF, &IDTVEC(page), SDT_SYSIGT, SEL_KPL, 0); 1379 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYSIGT, SEL_KPL, 0); 1380 setidt(IDT_AC, &IDTVEC(align), SDT_SYSIGT, SEL_KPL, 0); 1381 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYSIGT, SEL_KPL, 0); 1382 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYSIGT, SEL_KPL, 0); 1383 1384 r_idt.rd_limit = sizeof(idt0) - 1; 1385 r_idt.rd_base = (long) idt; 1386 lidt(&r_idt); 1387 1388 /* 1389 * Initialize the i8254 before the console so that console 1390 * initialization can use DELAY(). 1391 */ 1392 i8254_init(); 1393 1394 /* 1395 * Initialize the console before we print anything out. 1396 */ 1397 cninit(); 1398 1399#ifdef DEV_ISA 1400#ifdef DEV_ATPIC 1401 elcr_probe(); 1402 atpic_startup(); 1403#else 1404 /* Reset and mask the atpics and leave them shut down. */ 1405 atpic_reset(); 1406 1407 /* 1408 * Point the ICU spurious interrupt vectors at the APIC spurious 1409 * interrupt handler. 1410 */ 1411 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0); 1412 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0); 1413#endif 1414#else 1415#error "have you forgotten the isa device?"; 1416#endif 1417 1418 kdb_init(); 1419 1420#ifdef KDB 1421 if (boothowto & RB_KDB) 1422 kdb_enter(KDB_WHY_BOOTFLAGS, 1423 "Boot flags requested debugger"); 1424#endif 1425 1426 identify_cpu(); /* Final stage of CPU initialization */ 1427 initializecpu(); /* Initialize CPU registers */ 1428 1429 /* make an initial tss so cpu can get interrupt stack on syscall! */ 1430 common_tss[0].tss_rsp0 = thread0.td_kstack + \ 1431 KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb); 1432 /* Ensure the stack is aligned to 16 bytes */ 1433 common_tss[0].tss_rsp0 &= ~0xFul; 1434 PCPU_SET(rsp0, common_tss[0].tss_rsp0); 1435 1436 /* doublefault stack space, runs on ist1 */ 1437 common_tss[0].tss_ist1 = (long)&dblfault_stack[sizeof(dblfault_stack)]; 1438 1439 /* Set the IO permission bitmap (empty due to tss seg limit) */ 1440 common_tss[0].tss_iobase = sizeof(struct amd64tss); 1441 1442 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1443 ltr(gsel_tss); 1444 1445 /* Set up the fast syscall stuff */ 1446 msr = rdmsr(MSR_EFER) | EFER_SCE; 1447 wrmsr(MSR_EFER, msr); 1448 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall)); 1449 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32)); 1450 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) | 1451 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48); 1452 wrmsr(MSR_STAR, msr); 1453 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D); 1454 1455 getmemsize(kmdp, physfree); 1456 init_param2(physmem); 1457 1458 /* now running on new page tables, configured,and u/iom is accessible */ 1459 1460 msgbufinit(msgbufp, MSGBUF_SIZE); 1461 fpuinit(); 1462 1463 /* transfer to user mode */ 1464 1465 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL); 1466 _udatasel = GSEL(GUDATA_SEL, SEL_UPL); 1467 _ucode32sel = GSEL(GUCODE32_SEL, SEL_UPL); 1468 1469 /* setup proc 0's pcb */ 1470 thread0.td_pcb->pcb_flags = 0; 1471 thread0.td_pcb->pcb_cr3 = KPML4phys; 1472 thread0.td_frame = &proc0_tf; 1473 1474 env = getenv("kernelname"); 1475 if (env != NULL) 1476 strlcpy(kernelname, env, sizeof(kernelname)); 1477 1478 /* Location of kernel stack for locore */ 1479 return ((u_int64_t)thread0.td_pcb); 1480} 1481 1482void 1483cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) 1484{ 1485 1486 pcpu->pc_acpi_id = 0xffffffff; 1487} 1488 1489void 1490spinlock_enter(void) 1491{ 1492 struct thread *td; 1493 1494 td = curthread; 1495 if (td->td_md.md_spinlock_count == 0) 1496 td->td_md.md_saved_flags = intr_disable(); 1497 td->td_md.md_spinlock_count++; 1498 critical_enter(); 1499} 1500 1501void 1502spinlock_exit(void) 1503{ 1504 struct thread *td; 1505 1506 td = curthread; 1507 critical_exit(); 1508 td->td_md.md_spinlock_count--; 1509 if (td->td_md.md_spinlock_count == 0) 1510 intr_restore(td->td_md.md_saved_flags); 1511} 1512 1513/* 1514 * Construct a PCB from a trapframe. This is called from kdb_trap() where 1515 * we want to start a backtrace from the function that caused us to enter 1516 * the debugger. We have the context in the trapframe, but base the trace 1517 * on the PCB. The PCB doesn't have to be perfect, as long as it contains 1518 * enough for a backtrace. 1519 */ 1520void 1521makectx(struct trapframe *tf, struct pcb *pcb) 1522{ 1523 1524 pcb->pcb_r12 = tf->tf_r12; 1525 pcb->pcb_r13 = tf->tf_r13; 1526 pcb->pcb_r14 = tf->tf_r14; 1527 pcb->pcb_r15 = tf->tf_r15; 1528 pcb->pcb_rbp = tf->tf_rbp; 1529 pcb->pcb_rbx = tf->tf_rbx; 1530 pcb->pcb_rip = tf->tf_rip; 1531 pcb->pcb_rsp = (ISPL(tf->tf_cs)) ? tf->tf_rsp : (long)(tf + 1) - 8; 1532} 1533 1534int 1535ptrace_set_pc(struct thread *td, unsigned long addr) 1536{ 1537 td->td_frame->tf_rip = addr; 1538 return (0); 1539} 1540 1541int 1542ptrace_single_step(struct thread *td) 1543{ 1544 td->td_frame->tf_rflags |= PSL_T; 1545 return (0); 1546} 1547 1548int 1549ptrace_clear_single_step(struct thread *td) 1550{ 1551 td->td_frame->tf_rflags &= ~PSL_T; 1552 return (0); 1553} 1554 1555int 1556fill_regs(struct thread *td, struct reg *regs) 1557{ 1558 struct trapframe *tp; 1559 1560 tp = td->td_frame; 1561 regs->r_r15 = tp->tf_r15; 1562 regs->r_r14 = tp->tf_r14; 1563 regs->r_r13 = tp->tf_r13; 1564 regs->r_r12 = tp->tf_r12; 1565 regs->r_r11 = tp->tf_r11; 1566 regs->r_r10 = tp->tf_r10; 1567 regs->r_r9 = tp->tf_r9; 1568 regs->r_r8 = tp->tf_r8; 1569 regs->r_rdi = tp->tf_rdi; 1570 regs->r_rsi = tp->tf_rsi; 1571 regs->r_rbp = tp->tf_rbp; 1572 regs->r_rbx = tp->tf_rbx; 1573 regs->r_rdx = tp->tf_rdx; 1574 regs->r_rcx = tp->tf_rcx; 1575 regs->r_rax = tp->tf_rax; 1576 regs->r_rip = tp->tf_rip; 1577 regs->r_cs = tp->tf_cs; 1578 regs->r_rflags = tp->tf_rflags; 1579 regs->r_rsp = tp->tf_rsp; 1580 regs->r_ss = tp->tf_ss; 1581 return (0); 1582} 1583 1584int 1585set_regs(struct thread *td, struct reg *regs) 1586{ 1587 struct trapframe *tp; 1588 register_t rflags; 1589 1590 tp = td->td_frame; 1591 rflags = regs->r_rflags & 0xffffffff; 1592 if (!EFL_SECURE(rflags, tp->tf_rflags) || !CS_SECURE(regs->r_cs)) 1593 return (EINVAL); 1594 tp->tf_r15 = regs->r_r15; 1595 tp->tf_r14 = regs->r_r14; 1596 tp->tf_r13 = regs->r_r13; 1597 tp->tf_r12 = regs->r_r12; 1598 tp->tf_r11 = regs->r_r11; 1599 tp->tf_r10 = regs->r_r10; 1600 tp->tf_r9 = regs->r_r9; 1601 tp->tf_r8 = regs->r_r8; 1602 tp->tf_rdi = regs->r_rdi; 1603 tp->tf_rsi = regs->r_rsi; 1604 tp->tf_rbp = regs->r_rbp; 1605 tp->tf_rbx = regs->r_rbx; 1606 tp->tf_rdx = regs->r_rdx; 1607 tp->tf_rcx = regs->r_rcx; 1608 tp->tf_rax = regs->r_rax; 1609 tp->tf_rip = regs->r_rip; 1610 tp->tf_cs = regs->r_cs; 1611 tp->tf_rflags = rflags; 1612 tp->tf_rsp = regs->r_rsp; 1613 tp->tf_ss = regs->r_ss; 1614 td->td_pcb->pcb_flags |= PCB_FULLCTX; 1615 return (0); 1616} 1617 1618/* XXX check all this stuff! */ 1619/* externalize from sv_xmm */ 1620static void 1621fill_fpregs_xmm(struct savefpu *sv_xmm, struct fpreg *fpregs) 1622{ 1623 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env; 1624 struct envxmm *penv_xmm = &sv_xmm->sv_env; 1625 int i; 1626 1627 /* pcb -> fpregs */ 1628 bzero(fpregs, sizeof(*fpregs)); 1629 1630 /* FPU control/status */ 1631 penv_fpreg->en_cw = penv_xmm->en_cw; 1632 penv_fpreg->en_sw = penv_xmm->en_sw; 1633 penv_fpreg->en_tw = penv_xmm->en_tw; 1634 penv_fpreg->en_opcode = penv_xmm->en_opcode; 1635 penv_fpreg->en_rip = penv_xmm->en_rip; 1636 penv_fpreg->en_rdp = penv_xmm->en_rdp; 1637 penv_fpreg->en_mxcsr = penv_xmm->en_mxcsr; 1638 penv_fpreg->en_mxcsr_mask = penv_xmm->en_mxcsr_mask; 1639 1640 /* FPU registers */ 1641 for (i = 0; i < 8; ++i) 1642 bcopy(sv_xmm->sv_fp[i].fp_acc.fp_bytes, fpregs->fpr_acc[i], 10); 1643 1644 /* SSE registers */ 1645 for (i = 0; i < 16; ++i) 1646 bcopy(sv_xmm->sv_xmm[i].xmm_bytes, fpregs->fpr_xacc[i], 16); 1647} 1648 1649/* internalize from fpregs into sv_xmm */ 1650static void 1651set_fpregs_xmm(struct fpreg *fpregs, struct savefpu *sv_xmm) 1652{ 1653 struct envxmm *penv_xmm = &sv_xmm->sv_env; 1654 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env; 1655 int i; 1656 1657 /* fpregs -> pcb */ 1658 /* FPU control/status */ 1659 penv_xmm->en_cw = penv_fpreg->en_cw; 1660 penv_xmm->en_sw = penv_fpreg->en_sw; 1661 penv_xmm->en_tw = penv_fpreg->en_tw; 1662 penv_xmm->en_opcode = penv_fpreg->en_opcode; 1663 penv_xmm->en_rip = penv_fpreg->en_rip; 1664 penv_xmm->en_rdp = penv_fpreg->en_rdp; 1665 penv_xmm->en_mxcsr = penv_fpreg->en_mxcsr; 1666 penv_xmm->en_mxcsr_mask = penv_fpreg->en_mxcsr_mask & cpu_mxcsr_mask; 1667 1668 /* FPU registers */ 1669 for (i = 0; i < 8; ++i) 1670 bcopy(fpregs->fpr_acc[i], sv_xmm->sv_fp[i].fp_acc.fp_bytes, 10); 1671 1672 /* SSE registers */ 1673 for (i = 0; i < 16; ++i) 1674 bcopy(fpregs->fpr_xacc[i], sv_xmm->sv_xmm[i].xmm_bytes, 16); 1675} 1676 1677/* externalize from td->pcb */ 1678int 1679fill_fpregs(struct thread *td, struct fpreg *fpregs) 1680{ 1681 1682 fill_fpregs_xmm(&td->td_pcb->pcb_save, fpregs); 1683 return (0); 1684} 1685 1686/* internalize to td->pcb */ 1687int 1688set_fpregs(struct thread *td, struct fpreg *fpregs) 1689{ 1690 1691 set_fpregs_xmm(fpregs, &td->td_pcb->pcb_save); 1692 return (0); 1693} 1694 1695/* 1696 * Get machine context. 1697 */ 1698int 1699get_mcontext(struct thread *td, mcontext_t *mcp, int flags) 1700{ 1701 struct trapframe *tp; 1702 1703 tp = td->td_frame; 1704 PROC_LOCK(curthread->td_proc); 1705 mcp->mc_onstack = sigonstack(tp->tf_rsp); 1706 PROC_UNLOCK(curthread->td_proc); 1707 mcp->mc_r15 = tp->tf_r15; 1708 mcp->mc_r14 = tp->tf_r14; 1709 mcp->mc_r13 = tp->tf_r13; 1710 mcp->mc_r12 = tp->tf_r12; 1711 mcp->mc_r11 = tp->tf_r11; 1712 mcp->mc_r10 = tp->tf_r10; 1713 mcp->mc_r9 = tp->tf_r9; 1714 mcp->mc_r8 = tp->tf_r8; 1715 mcp->mc_rdi = tp->tf_rdi; 1716 mcp->mc_rsi = tp->tf_rsi; 1717 mcp->mc_rbp = tp->tf_rbp; 1718 mcp->mc_rbx = tp->tf_rbx; 1719 mcp->mc_rcx = tp->tf_rcx; 1720 mcp->mc_rflags = tp->tf_rflags; 1721 if (flags & GET_MC_CLEAR_RET) { 1722 mcp->mc_rax = 0; 1723 mcp->mc_rdx = 0; 1724 mcp->mc_rflags &= ~PSL_C; 1725 } else { 1726 mcp->mc_rax = tp->tf_rax; 1727 mcp->mc_rdx = tp->tf_rdx; 1728 } 1729 mcp->mc_rip = tp->tf_rip; 1730 mcp->mc_cs = tp->tf_cs; 1731 mcp->mc_rsp = tp->tf_rsp; 1732 mcp->mc_ss = tp->tf_ss; 1733 mcp->mc_len = sizeof(*mcp); 1734 get_fpcontext(td, mcp); 1735 return (0); 1736} 1737 1738/* 1739 * Set machine context. 1740 * 1741 * However, we don't set any but the user modifiable flags, and we won't 1742 * touch the cs selector. 1743 */ 1744int 1745set_mcontext(struct thread *td, const mcontext_t *mcp) 1746{ 1747 struct trapframe *tp; 1748 long rflags; 1749 int ret; 1750 1751 tp = td->td_frame; 1752 if (mcp->mc_len != sizeof(*mcp)) 1753 return (EINVAL); 1754 rflags = (mcp->mc_rflags & PSL_USERCHANGE) | 1755 (tp->tf_rflags & ~PSL_USERCHANGE); 1756 ret = set_fpcontext(td, mcp); 1757 if (ret != 0) 1758 return (ret); 1759 tp->tf_r15 = mcp->mc_r15; 1760 tp->tf_r14 = mcp->mc_r14; 1761 tp->tf_r13 = mcp->mc_r13; 1762 tp->tf_r12 = mcp->mc_r12; 1763 tp->tf_r11 = mcp->mc_r11; 1764 tp->tf_r10 = mcp->mc_r10; 1765 tp->tf_r9 = mcp->mc_r9; 1766 tp->tf_r8 = mcp->mc_r8; 1767 tp->tf_rdi = mcp->mc_rdi; 1768 tp->tf_rsi = mcp->mc_rsi; 1769 tp->tf_rbp = mcp->mc_rbp; 1770 tp->tf_rbx = mcp->mc_rbx; 1771 tp->tf_rdx = mcp->mc_rdx; 1772 tp->tf_rcx = mcp->mc_rcx; 1773 tp->tf_rax = mcp->mc_rax; 1774 tp->tf_rip = mcp->mc_rip; 1775 tp->tf_rflags = rflags; 1776 tp->tf_rsp = mcp->mc_rsp; 1777 tp->tf_ss = mcp->mc_ss; 1778 td->td_pcb->pcb_flags |= PCB_FULLCTX; 1779 return (0); 1780} 1781 1782static void 1783get_fpcontext(struct thread *td, mcontext_t *mcp) 1784{ 1785 1786 mcp->mc_ownedfp = fpugetregs(td, (struct savefpu *)&mcp->mc_fpstate); 1787 mcp->mc_fpformat = fpuformat(); 1788} 1789 1790static int 1791set_fpcontext(struct thread *td, const mcontext_t *mcp) 1792{ 1793 struct savefpu *fpstate; 1794 1795 if (mcp->mc_fpformat == _MC_FPFMT_NODEV) 1796 return (0); 1797 else if (mcp->mc_fpformat != _MC_FPFMT_XMM) 1798 return (EINVAL); 1799 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) 1800 /* We don't care what state is left in the FPU or PCB. */ 1801 fpstate_drop(td); 1802 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU || 1803 mcp->mc_ownedfp == _MC_FPOWNED_PCB) { 1804 /* 1805 * XXX we violate the dubious requirement that fpusetregs() 1806 * be called with interrupts disabled. 1807 * XXX obsolete on trap-16 systems? 1808 */ 1809 fpstate = (struct savefpu *)&mcp->mc_fpstate; 1810 fpstate->sv_env.en_mxcsr &= cpu_mxcsr_mask; 1811 fpusetregs(td, fpstate); 1812 } else 1813 return (EINVAL); 1814 return (0); 1815} 1816 1817void 1818fpstate_drop(struct thread *td) 1819{ 1820 register_t s; 1821 1822 s = intr_disable(); 1823 if (PCPU_GET(fpcurthread) == td) 1824 fpudrop(); 1825 /* 1826 * XXX force a full drop of the fpu. The above only drops it if we 1827 * owned it. 1828 * 1829 * XXX I don't much like fpugetregs()'s semantics of doing a full 1830 * drop. Dropping only to the pcb matches fnsave's behaviour. 1831 * We only need to drop to !PCB_INITDONE in sendsig(). But 1832 * sendsig() is the only caller of fpugetregs()... perhaps we just 1833 * have too many layers. 1834 */ 1835 curthread->td_pcb->pcb_flags &= ~PCB_FPUINITDONE; 1836 intr_restore(s); 1837} 1838 1839int 1840fill_dbregs(struct thread *td, struct dbreg *dbregs) 1841{ 1842 struct pcb *pcb; 1843 1844 if (td == NULL) { 1845 dbregs->dr[0] = rdr0(); 1846 dbregs->dr[1] = rdr1(); 1847 dbregs->dr[2] = rdr2(); 1848 dbregs->dr[3] = rdr3(); 1849 dbregs->dr[6] = rdr6(); 1850 dbregs->dr[7] = rdr7(); 1851 } else { 1852 pcb = td->td_pcb; 1853 dbregs->dr[0] = pcb->pcb_dr0; 1854 dbregs->dr[1] = pcb->pcb_dr1; 1855 dbregs->dr[2] = pcb->pcb_dr2; 1856 dbregs->dr[3] = pcb->pcb_dr3; 1857 dbregs->dr[6] = pcb->pcb_dr6; 1858 dbregs->dr[7] = pcb->pcb_dr7; 1859 } 1860 dbregs->dr[4] = 0; 1861 dbregs->dr[5] = 0; 1862 dbregs->dr[8] = 0; 1863 dbregs->dr[9] = 0; 1864 dbregs->dr[10] = 0; 1865 dbregs->dr[11] = 0; 1866 dbregs->dr[12] = 0; 1867 dbregs->dr[13] = 0; 1868 dbregs->dr[14] = 0; 1869 dbregs->dr[15] = 0; 1870 return (0); 1871} 1872 1873int 1874set_dbregs(struct thread *td, struct dbreg *dbregs) 1875{ 1876 struct pcb *pcb; 1877 int i; 1878 1879 if (td == NULL) { 1880 load_dr0(dbregs->dr[0]); 1881 load_dr1(dbregs->dr[1]); 1882 load_dr2(dbregs->dr[2]); 1883 load_dr3(dbregs->dr[3]); 1884 load_dr6(dbregs->dr[6]); 1885 load_dr7(dbregs->dr[7]); 1886 } else { 1887 /* 1888 * Don't let an illegal value for dr7 get set. Specifically, 1889 * check for undefined settings. Setting these bit patterns 1890 * result in undefined behaviour and can lead to an unexpected 1891 * TRCTRAP or a general protection fault right here. 1892 * Upper bits of dr6 and dr7 must not be set 1893 */ 1894 for (i = 0; i < 4; i++) { 1895 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02) 1896 return (EINVAL); 1897 if (td->td_frame->tf_cs == _ucode32sel && 1898 DBREG_DR7_LEN(dbregs->dr[7], i) == DBREG_DR7_LEN_8) 1899 return (EINVAL); 1900 } 1901 if ((dbregs->dr[6] & 0xffffffff00000000ul) != 0 || 1902 (dbregs->dr[7] & 0xffffffff00000000ul) != 0) 1903 return (EINVAL); 1904 1905 pcb = td->td_pcb; 1906 1907 /* 1908 * Don't let a process set a breakpoint that is not within the 1909 * process's address space. If a process could do this, it 1910 * could halt the system by setting a breakpoint in the kernel 1911 * (if ddb was enabled). Thus, we need to check to make sure 1912 * that no breakpoints are being enabled for addresses outside 1913 * process's address space. 1914 * 1915 * XXX - what about when the watched area of the user's 1916 * address space is written into from within the kernel 1917 * ... wouldn't that still cause a breakpoint to be generated 1918 * from within kernel mode? 1919 */ 1920 1921 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) { 1922 /* dr0 is enabled */ 1923 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS) 1924 return (EINVAL); 1925 } 1926 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) { 1927 /* dr1 is enabled */ 1928 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS) 1929 return (EINVAL); 1930 } 1931 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) { 1932 /* dr2 is enabled */ 1933 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS) 1934 return (EINVAL); 1935 } 1936 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) { 1937 /* dr3 is enabled */ 1938 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS) 1939 return (EINVAL); 1940 } 1941 1942 pcb->pcb_dr0 = dbregs->dr[0]; 1943 pcb->pcb_dr1 = dbregs->dr[1]; 1944 pcb->pcb_dr2 = dbregs->dr[2]; 1945 pcb->pcb_dr3 = dbregs->dr[3]; 1946 pcb->pcb_dr6 = dbregs->dr[6]; 1947 pcb->pcb_dr7 = dbregs->dr[7]; 1948 1949 pcb->pcb_flags |= PCB_DBREGS; 1950 } 1951 1952 return (0); 1953} 1954 1955void 1956reset_dbregs(void) 1957{ 1958 1959 load_dr7(0); /* Turn off the control bits first */ 1960 load_dr0(0); 1961 load_dr1(0); 1962 load_dr2(0); 1963 load_dr3(0); 1964 load_dr6(0); 1965} 1966 1967/* 1968 * Return > 0 if a hardware breakpoint has been hit, and the 1969 * breakpoint was in user space. Return 0, otherwise. 1970 */ 1971int 1972user_dbreg_trap(void) 1973{ 1974 u_int64_t dr7, dr6; /* debug registers dr6 and dr7 */ 1975 u_int64_t bp; /* breakpoint bits extracted from dr6 */ 1976 int nbp; /* number of breakpoints that triggered */ 1977 caddr_t addr[4]; /* breakpoint addresses */ 1978 int i; 1979 1980 dr7 = rdr7(); 1981 if ((dr7 & 0x000000ff) == 0) { 1982 /* 1983 * all GE and LE bits in the dr7 register are zero, 1984 * thus the trap couldn't have been caused by the 1985 * hardware debug registers 1986 */ 1987 return 0; 1988 } 1989 1990 nbp = 0; 1991 dr6 = rdr6(); 1992 bp = dr6 & 0x0000000f; 1993 1994 if (!bp) { 1995 /* 1996 * None of the breakpoint bits are set meaning this 1997 * trap was not caused by any of the debug registers 1998 */ 1999 return 0; 2000 } 2001 2002 /* 2003 * at least one of the breakpoints were hit, check to see 2004 * which ones and if any of them are user space addresses 2005 */ 2006 2007 if (bp & 0x01) { 2008 addr[nbp++] = (caddr_t)rdr0(); 2009 } 2010 if (bp & 0x02) { 2011 addr[nbp++] = (caddr_t)rdr1(); 2012 } 2013 if (bp & 0x04) { 2014 addr[nbp++] = (caddr_t)rdr2(); 2015 } 2016 if (bp & 0x08) { 2017 addr[nbp++] = (caddr_t)rdr3(); 2018 } 2019 2020 for (i = 0; i < nbp; i++) { 2021 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) { 2022 /* 2023 * addr[i] is in user space 2024 */ 2025 return nbp; 2026 } 2027 } 2028 2029 /* 2030 * None of the breakpoints are in user space. 2031 */ 2032 return 0; 2033} 2034 2035#ifdef KDB 2036 2037/* 2038 * Provide inb() and outb() as functions. They are normally only 2039 * available as macros calling inlined functions, thus cannot be 2040 * called from the debugger. 2041 * 2042 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 2043 */ 2044 2045#undef inb 2046#undef outb 2047 2048/* silence compiler warnings */ 2049u_char inb(u_int); 2050void outb(u_int, u_char); 2051 2052u_char 2053inb(u_int port) 2054{ 2055 u_char data; 2056 /* 2057 * We use %%dx and not %1 here because i/o is done at %dx and not at 2058 * %edx, while gcc generates inferior code (movw instead of movl) 2059 * if we tell it to load (u_short) port. 2060 */ 2061 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 2062 return (data); 2063} 2064 2065void 2066outb(u_int port, u_char data) 2067{ 2068 u_char al; 2069 /* 2070 * Use an unnecessary assignment to help gcc's register allocator. 2071 * This make a large difference for gcc-1.40 and a tiny difference 2072 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 2073 * best results. gcc-2.6.0 can't handle this. 2074 */ 2075 al = data; 2076 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 2077} 2078 2079#endif /* KDB */ 2080