machdep.c revision 174898
1/*- 2 * Copyright (c) 2003 Peter Wemm. 3 * Copyright (c) 1992 Terrence R. Lambert. 4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * William Jolitz. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 39 */ 40 41#include <sys/cdefs.h> 42__FBSDID("$FreeBSD: head/sys/amd64/amd64/machdep.c 174898 2007-12-25 17:52:02Z rwatson $"); 43 44#include "opt_atalk.h" 45#include "opt_atpic.h" 46#include "opt_compat.h" 47#include "opt_cpu.h" 48#include "opt_ddb.h" 49#include "opt_inet.h" 50#include "opt_ipx.h" 51#include "opt_isa.h" 52#include "opt_kstack_pages.h" 53#include "opt_maxmem.h" 54#include "opt_msgbuf.h" 55#include "opt_perfmon.h" 56 57#include <sys/param.h> 58#include <sys/proc.h> 59#include <sys/systm.h> 60#include <sys/bio.h> 61#include <sys/buf.h> 62#include <sys/bus.h> 63#include <sys/callout.h> 64#include <sys/clock.h> 65#include <sys/cons.h> 66#include <sys/cpu.h> 67#include <sys/eventhandler.h> 68#include <sys/exec.h> 69#include <sys/imgact.h> 70#include <sys/kdb.h> 71#include <sys/kernel.h> 72#include <sys/ktr.h> 73#include <sys/linker.h> 74#include <sys/lock.h> 75#include <sys/malloc.h> 76#include <sys/memrange.h> 77#include <sys/msgbuf.h> 78#include <sys/mutex.h> 79#include <sys/pcpu.h> 80#include <sys/ptrace.h> 81#include <sys/reboot.h> 82#include <sys/sched.h> 83#include <sys/signalvar.h> 84#include <sys/sysctl.h> 85#include <sys/sysent.h> 86#include <sys/sysproto.h> 87#include <sys/ucontext.h> 88#include <sys/vmmeter.h> 89 90#include <vm/vm.h> 91#include <vm/vm_extern.h> 92#include <vm/vm_kern.h> 93#include <vm/vm_page.h> 94#include <vm/vm_map.h> 95#include <vm/vm_object.h> 96#include <vm/vm_pager.h> 97#include <vm/vm_param.h> 98 99#ifdef DDB 100#ifndef KDB 101#error KDB must be enabled in order for DDB to work! 102#endif 103#endif 104#include <ddb/ddb.h> 105 106#include <net/netisr.h> 107 108#include <machine/clock.h> 109#include <machine/cpu.h> 110#include <machine/cputypes.h> 111#include <machine/intr_machdep.h> 112#include <machine/md_var.h> 113#include <machine/metadata.h> 114#include <machine/pc/bios.h> 115#include <machine/pcb.h> 116#include <machine/proc.h> 117#include <machine/reg.h> 118#include <machine/sigframe.h> 119#include <machine/specialreg.h> 120#ifdef PERFMON 121#include <machine/perfmon.h> 122#endif 123#include <machine/tss.h> 124#ifdef SMP 125#include <machine/smp.h> 126#endif 127 128#ifdef DEV_ATPIC 129#include <amd64/isa/icu.h> 130#else 131#include <machine/apicvar.h> 132#endif 133 134#include <isa/isareg.h> 135#include <isa/rtc.h> 136 137/* Sanity check for __curthread() */ 138CTASSERT(offsetof(struct pcpu, pc_curthread) == 0); 139 140extern u_int64_t hammer_time(u_int64_t, u_int64_t); 141 142extern void printcpuinfo(void); /* XXX header file */ 143extern void identify_cpu(void); 144extern void panicifcpuunsupported(void); 145 146#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 147#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 148 149static void cpu_startup(void *); 150static void get_fpcontext(struct thread *td, mcontext_t *mcp); 151static int set_fpcontext(struct thread *td, const mcontext_t *mcp); 152SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) 153 154#ifdef DDB 155extern vm_offset_t ksym_start, ksym_end; 156#endif 157 158/* Intel ICH registers */ 159#define ICH_PMBASE 0x400 160#define ICH_SMI_EN ICH_PMBASE + 0x30 161 162int _udatasel, _ucodesel, _ucode32sel; 163 164int cold = 1; 165 166long Maxmem = 0; 167long realmem = 0; 168 169/* 170 * The number of PHYSMAP entries must be one less than the number of 171 * PHYSSEG entries because the PHYSMAP entry that spans the largest 172 * physical address that is accessible by ISA DMA is split into two 173 * PHYSSEG entries. 174 */ 175#define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1)) 176 177vm_paddr_t phys_avail[PHYSMAP_SIZE + 2]; 178vm_paddr_t dump_avail[PHYSMAP_SIZE + 2]; 179 180/* must be 2 less so 0 0 can signal end of chunks */ 181#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2) 182#define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2) 183 184struct kva_md_info kmi; 185 186static struct trapframe proc0_tf; 187struct region_descriptor r_gdt, r_idt; 188 189struct pcpu __pcpu[MAXCPU]; 190 191struct mtx icu_lock; 192 193struct mem_range_softc mem_range_softc; 194 195static void 196cpu_startup(dummy) 197 void *dummy; 198{ 199 char *sysenv; 200 201 /* 202 * On MacBooks, we need to disallow the legacy USB circuit to 203 * generate an SMI# because this can cause several problems, 204 * namely: incorrect CPU frequency detection and failure to 205 * start the APs. 206 * We do this by disabling a bit in the SMI_EN (SMI Control and 207 * Enable register) of the Intel ICH LPC Interface Bridge. 208 */ 209 sysenv = getenv("smbios.system.product"); 210 if (sysenv != NULL) { 211 if (strncmp(sysenv, "MacBook", 7) == 0) { 212 if (bootverbose) 213 printf("Disabling LEGACY_USB_EN bit on " 214 "Intel ICH.\n"); 215 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8); 216 } 217 freeenv(sysenv); 218 } 219 220 /* 221 * Good {morning,afternoon,evening,night}. 222 */ 223 startrtclock(); 224 printcpuinfo(); 225 panicifcpuunsupported(); 226#ifdef PERFMON 227 perfmon_init(); 228#endif 229 printf("usable memory = %ju (%ju MB)\n", ptoa((uintmax_t)physmem), 230 ptoa((uintmax_t)physmem) / 1048576); 231 realmem = Maxmem; 232 /* 233 * Display any holes after the first chunk of extended memory. 234 */ 235 if (bootverbose) { 236 int indx; 237 238 printf("Physical memory chunk(s):\n"); 239 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 240 vm_paddr_t size; 241 242 size = phys_avail[indx + 1] - phys_avail[indx]; 243 printf( 244 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n", 245 (uintmax_t)phys_avail[indx], 246 (uintmax_t)phys_avail[indx + 1] - 1, 247 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE); 248 } 249 } 250 251 vm_ksubmap_init(&kmi); 252 253 printf("avail memory = %ju (%ju MB)\n", 254 ptoa((uintmax_t)cnt.v_free_count), 255 ptoa((uintmax_t)cnt.v_free_count) / 1048576); 256 257 /* 258 * Set up buffers, so they can be used to read disk labels. 259 */ 260 bufinit(); 261 vm_pager_bufferinit(); 262 263 cpu_setregs(); 264} 265 266/* 267 * Send an interrupt to process. 268 * 269 * Stack is set up to allow sigcode stored 270 * at top to call routine, followed by kcall 271 * to sigreturn routine below. After sigreturn 272 * resets the signal mask, the stack, and the 273 * frame pointer, it returns to the user 274 * specified pc, psl. 275 */ 276void 277sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) 278{ 279 struct sigframe sf, *sfp; 280 struct proc *p; 281 struct thread *td; 282 struct sigacts *psp; 283 char *sp; 284 struct trapframe *regs; 285 int sig; 286 int oonstack; 287 288 td = curthread; 289 p = td->td_proc; 290 PROC_LOCK_ASSERT(p, MA_OWNED); 291 sig = ksi->ksi_signo; 292 psp = p->p_sigacts; 293 mtx_assert(&psp->ps_mtx, MA_OWNED); 294 regs = td->td_frame; 295 oonstack = sigonstack(regs->tf_rsp); 296 297 /* Save user context. */ 298 bzero(&sf, sizeof(sf)); 299 sf.sf_uc.uc_sigmask = *mask; 300 sf.sf_uc.uc_stack = td->td_sigstk; 301 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) 302 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 303 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 304 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(*regs)); 305 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */ 306 get_fpcontext(td, &sf.sf_uc.uc_mcontext); 307 fpstate_drop(td); 308 309 /* Allocate space for the signal handler context. */ 310 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && 311 SIGISMEMBER(psp->ps_sigonstack, sig)) { 312 sp = td->td_sigstk.ss_sp + 313 td->td_sigstk.ss_size - sizeof(struct sigframe); 314#if defined(COMPAT_43) 315 td->td_sigstk.ss_flags |= SS_ONSTACK; 316#endif 317 } else 318 sp = (char *)regs->tf_rsp - sizeof(struct sigframe) - 128; 319 /* Align to 16 bytes. */ 320 sfp = (struct sigframe *)((unsigned long)sp & ~0xFul); 321 322 /* Translate the signal if appropriate. */ 323 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 324 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 325 326 /* Build the argument list for the signal handler. */ 327 regs->tf_rdi = sig; /* arg 1 in %rdi */ 328 regs->tf_rdx = (register_t)&sfp->sf_uc; /* arg 3 in %rdx */ 329 if (SIGISMEMBER(psp->ps_siginfo, sig)) { 330 /* Signal handler installed with SA_SIGINFO. */ 331 regs->tf_rsi = (register_t)&sfp->sf_si; /* arg 2 in %rsi */ 332 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 333 334 /* Fill in POSIX parts */ 335 sf.sf_si = ksi->ksi_info; 336 sf.sf_si.si_signo = sig; /* maybe a translated signal */ 337 regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */ 338 } else { 339 /* Old FreeBSD-style arguments. */ 340 regs->tf_rsi = ksi->ksi_code; /* arg 2 in %rsi */ 341 regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */ 342 sf.sf_ahu.sf_handler = catcher; 343 } 344 mtx_unlock(&psp->ps_mtx); 345 PROC_UNLOCK(p); 346 347 /* 348 * Copy the sigframe out to the user's stack. 349 */ 350 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { 351#ifdef DEBUG 352 printf("process %ld has trashed its stack\n", (long)p->p_pid); 353#endif 354 PROC_LOCK(p); 355 sigexit(td, SIGILL); 356 } 357 358 regs->tf_rsp = (long)sfp; 359 regs->tf_rip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 360 regs->tf_rflags &= ~PSL_T; 361 regs->tf_cs = _ucodesel; 362 PROC_LOCK(p); 363 mtx_lock(&psp->ps_mtx); 364} 365 366/* 367 * System call to cleanup state after a signal 368 * has been taken. Reset signal mask and 369 * stack state from context left by sendsig (above). 370 * Return to previous pc and psl as specified by 371 * context left by sendsig. Check carefully to 372 * make sure that the user has not modified the 373 * state to gain improper privileges. 374 * 375 * MPSAFE 376 */ 377int 378sigreturn(td, uap) 379 struct thread *td; 380 struct sigreturn_args /* { 381 const struct __ucontext *sigcntxp; 382 } */ *uap; 383{ 384 ucontext_t uc; 385 struct proc *p = td->td_proc; 386 struct trapframe *regs; 387 const ucontext_t *ucp; 388 long rflags; 389 int cs, error, ret; 390 ksiginfo_t ksi; 391 392 error = copyin(uap->sigcntxp, &uc, sizeof(uc)); 393 if (error != 0) 394 return (error); 395 ucp = &uc; 396 regs = td->td_frame; 397 rflags = ucp->uc_mcontext.mc_rflags; 398 /* 399 * Don't allow users to change privileged or reserved flags. 400 */ 401 /* 402 * XXX do allow users to change the privileged flag PSL_RF. 403 * The cpu sets PSL_RF in tf_rflags for faults. Debuggers 404 * should sometimes set it there too. tf_rflags is kept in 405 * the signal context during signal handling and there is no 406 * other place to remember it, so the PSL_RF bit may be 407 * corrupted by the signal handler without us knowing. 408 * Corruption of the PSL_RF bit at worst causes one more or 409 * one less debugger trap, so allowing it is fairly harmless. 410 */ 411 if (!EFL_SECURE(rflags & ~PSL_RF, regs->tf_rflags & ~PSL_RF)) { 412 printf("sigreturn: rflags = 0x%lx\n", rflags); 413 return (EINVAL); 414 } 415 416 /* 417 * Don't allow users to load a valid privileged %cs. Let the 418 * hardware check for invalid selectors, excess privilege in 419 * other selectors, invalid %eip's and invalid %esp's. 420 */ 421 cs = ucp->uc_mcontext.mc_cs; 422 if (!CS_SECURE(cs)) { 423 printf("sigreturn: cs = 0x%x\n", cs); 424 ksiginfo_init_trap(&ksi); 425 ksi.ksi_signo = SIGBUS; 426 ksi.ksi_code = BUS_OBJERR; 427 ksi.ksi_trapno = T_PROTFLT; 428 ksi.ksi_addr = (void *)regs->tf_rip; 429 trapsignal(td, &ksi); 430 return (EINVAL); 431 } 432 433 ret = set_fpcontext(td, &ucp->uc_mcontext); 434 if (ret != 0) 435 return (ret); 436 bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(*regs)); 437 438 PROC_LOCK(p); 439#if defined(COMPAT_43) 440 if (ucp->uc_mcontext.mc_onstack & 1) 441 td->td_sigstk.ss_flags |= SS_ONSTACK; 442 else 443 td->td_sigstk.ss_flags &= ~SS_ONSTACK; 444#endif 445 446 td->td_sigmask = ucp->uc_sigmask; 447 SIG_CANTMASK(td->td_sigmask); 448 signotify(td); 449 PROC_UNLOCK(p); 450 td->td_pcb->pcb_flags |= PCB_FULLCTX; 451 return (EJUSTRETURN); 452} 453 454#ifdef COMPAT_FREEBSD4 455int 456freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap) 457{ 458 459 return sigreturn(td, (struct sigreturn_args *)uap); 460} 461#endif 462 463 464/* 465 * Machine dependent boot() routine 466 * 467 * I haven't seen anything to put here yet 468 * Possibly some stuff might be grafted back here from boot() 469 */ 470void 471cpu_boot(int howto) 472{ 473} 474 475/* Get current clock frequency for the given cpu id. */ 476int 477cpu_est_clockrate(int cpu_id, uint64_t *rate) 478{ 479 register_t reg; 480 uint64_t tsc1, tsc2; 481 482 if (pcpu_find(cpu_id) == NULL || rate == NULL) 483 return (EINVAL); 484 485 /* If we're booting, trust the rate calibrated moments ago. */ 486 if (cold) { 487 *rate = tsc_freq; 488 return (0); 489 } 490 491#ifdef SMP 492 /* Schedule ourselves on the indicated cpu. */ 493 thread_lock(curthread); 494 sched_bind(curthread, cpu_id); 495 thread_unlock(curthread); 496#endif 497 498 /* Calibrate by measuring a short delay. */ 499 reg = intr_disable(); 500 tsc1 = rdtsc(); 501 DELAY(1000); 502 tsc2 = rdtsc(); 503 intr_restore(reg); 504 505#ifdef SMP 506 thread_lock(curthread); 507 sched_unbind(curthread); 508 thread_unlock(curthread); 509#endif 510 511 /* 512 * Calculate the difference in readings, convert to Mhz, and 513 * subtract 0.5% of the total. Empirical testing has shown that 514 * overhead in DELAY() works out to approximately this value. 515 */ 516 tsc2 -= tsc1; 517 *rate = tsc2 * 1000 - tsc2 * 5; 518 return (0); 519} 520 521/* 522 * Shutdown the CPU as much as possible 523 */ 524void 525cpu_halt(void) 526{ 527 for (;;) 528 __asm__ ("hlt"); 529} 530 531/* 532 * Hook to idle the CPU when possible. In the SMP case we default to 533 * off because a halted cpu will not currently pick up a new thread in the 534 * run queue until the next timer tick. If turned on this will result in 535 * approximately a 4.2% loss in real time performance in buildworld tests 536 * (but improves user and sys times oddly enough), and saves approximately 537 * 5% in power consumption on an idle machine (tests w/2xCPU 1.1GHz P3). 538 * 539 * XXX we need to have a cpu mask of idle cpus and generate an IPI or 540 * otherwise generate some sort of interrupt to wake up cpus sitting in HLT. 541 * Then we can have our cake and eat it too. 542 * 543 * XXX I'm turning it on for SMP as well by default for now. It seems to 544 * help lock contention somewhat, and this is critical for HTT. -Peter 545 */ 546static int cpu_idle_hlt = 1; 547TUNABLE_INT("machdep.cpu_idle_hlt", &cpu_idle_hlt); 548SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, 549 &cpu_idle_hlt, 0, "Idle loop HLT enable"); 550 551static void 552cpu_idle_default(void) 553{ 554 /* 555 * we must absolutely guarentee that hlt is the 556 * absolute next instruction after sti or we 557 * introduce a timing window. 558 */ 559 __asm __volatile("sti; hlt"); 560} 561 562/* 563 * Note that we have to be careful here to avoid a race between checking 564 * sched_runnable() and actually halting. If we don't do this, we may waste 565 * the time between calling hlt and the next interrupt even though there 566 * is a runnable process. 567 */ 568void 569cpu_idle(void) 570{ 571 572#ifdef SMP 573 if (mp_grab_cpu_hlt()) 574 return; 575#endif 576 if (cpu_idle_hlt) { 577 disable_intr(); 578 if (sched_runnable()) 579 enable_intr(); 580 else 581 (*cpu_idle_hook)(); 582 } 583} 584 585/* Other subsystems (e.g., ACPI) can hook this later. */ 586void (*cpu_idle_hook)(void) = cpu_idle_default; 587 588/* 589 * Clear registers on exec 590 */ 591void 592exec_setregs(td, entry, stack, ps_strings) 593 struct thread *td; 594 u_long entry; 595 u_long stack; 596 u_long ps_strings; 597{ 598 struct trapframe *regs = td->td_frame; 599 struct pcb *pcb = td->td_pcb; 600 601 critical_enter(); 602 wrmsr(MSR_FSBASE, 0); 603 wrmsr(MSR_KGSBASE, 0); /* User value while we're in the kernel */ 604 pcb->pcb_fsbase = 0; 605 pcb->pcb_gsbase = 0; 606 critical_exit(); 607 load_ds(_udatasel); 608 load_es(_udatasel); 609 load_fs(_udatasel); 610 load_gs(_udatasel); 611 pcb->pcb_ds = _udatasel; 612 pcb->pcb_es = _udatasel; 613 pcb->pcb_fs = _udatasel; 614 pcb->pcb_gs = _udatasel; 615 616 bzero((char *)regs, sizeof(struct trapframe)); 617 regs->tf_rip = entry; 618 regs->tf_rsp = ((stack - 8) & ~0xFul) + 8; 619 regs->tf_rdi = stack; /* argv */ 620 regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T); 621 regs->tf_ss = _udatasel; 622 regs->tf_cs = _ucodesel; 623 624 /* 625 * Reset the hardware debug registers if they were in use. 626 * They won't have any meaning for the newly exec'd process. 627 */ 628 if (pcb->pcb_flags & PCB_DBREGS) { 629 pcb->pcb_dr0 = 0; 630 pcb->pcb_dr1 = 0; 631 pcb->pcb_dr2 = 0; 632 pcb->pcb_dr3 = 0; 633 pcb->pcb_dr6 = 0; 634 pcb->pcb_dr7 = 0; 635 if (pcb == PCPU_GET(curpcb)) { 636 /* 637 * Clear the debug registers on the running 638 * CPU, otherwise they will end up affecting 639 * the next process we switch to. 640 */ 641 reset_dbregs(); 642 } 643 pcb->pcb_flags &= ~PCB_DBREGS; 644 } 645 646 /* 647 * Drop the FP state if we hold it, so that the process gets a 648 * clean FP state if it uses the FPU again. 649 */ 650 fpstate_drop(td); 651} 652 653void 654cpu_setregs(void) 655{ 656 register_t cr0; 657 658 cr0 = rcr0(); 659 /* 660 * CR0_MP, CR0_NE and CR0_TS are also set by npx_probe() for the 661 * BSP. See the comments there about why we set them. 662 */ 663 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM; 664 load_cr0(cr0); 665} 666 667/* 668 * Initialize amd64 and configure to run kernel 669 */ 670 671/* 672 * Initialize segments & interrupt table 673 */ 674 675struct user_segment_descriptor gdt[NGDT * MAXCPU];/* global descriptor table */ 676static struct gate_descriptor idt0[NIDT]; 677struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 678 679static char dblfault_stack[PAGE_SIZE] __aligned(16); 680 681struct amd64tss common_tss[MAXCPU]; 682 683/* software prototypes -- in more palatable form */ 684struct soft_segment_descriptor gdt_segs[] = { 685/* GNULL_SEL 0 Null Descriptor */ 686{ 0x0, /* segment base address */ 687 0x0, /* length */ 688 0, /* segment type */ 689 0, /* segment descriptor priority level */ 690 0, /* segment descriptor present */ 691 0, /* long */ 692 0, /* default 32 vs 16 bit size */ 693 0 /* limit granularity (byte/page units)*/ }, 694/* GCODE_SEL 1 Code Descriptor for kernel */ 695{ 0x0, /* segment base address */ 696 0xfffff, /* length - all address space */ 697 SDT_MEMERA, /* segment type */ 698 SEL_KPL, /* segment descriptor priority level */ 699 1, /* segment descriptor present */ 700 1, /* long */ 701 0, /* default 32 vs 16 bit size */ 702 1 /* limit granularity (byte/page units)*/ }, 703/* GDATA_SEL 2 Data Descriptor for kernel */ 704{ 0x0, /* segment base address */ 705 0xfffff, /* length - all address space */ 706 SDT_MEMRWA, /* segment type */ 707 SEL_KPL, /* segment descriptor priority level */ 708 1, /* segment descriptor present */ 709 1, /* long */ 710 0, /* default 32 vs 16 bit size */ 711 1 /* limit granularity (byte/page units)*/ }, 712/* GUCODE32_SEL 3 32 bit Code Descriptor for user */ 713{ 0x0, /* segment base address */ 714 0xfffff, /* length - all address space */ 715 SDT_MEMERA, /* segment type */ 716 SEL_UPL, /* segment descriptor priority level */ 717 1, /* segment descriptor present */ 718 0, /* long */ 719 1, /* default 32 vs 16 bit size */ 720 1 /* limit granularity (byte/page units)*/ }, 721/* GUDATA_SEL 4 32/64 bit Data Descriptor for user */ 722{ 0x0, /* segment base address */ 723 0xfffff, /* length - all address space */ 724 SDT_MEMRWA, /* segment type */ 725 SEL_UPL, /* segment descriptor priority level */ 726 1, /* segment descriptor present */ 727 0, /* long */ 728 1, /* default 32 vs 16 bit size */ 729 1 /* limit granularity (byte/page units)*/ }, 730/* GUCODE_SEL 5 64 bit Code Descriptor for user */ 731{ 0x0, /* segment base address */ 732 0xfffff, /* length - all address space */ 733 SDT_MEMERA, /* segment type */ 734 SEL_UPL, /* segment descriptor priority level */ 735 1, /* segment descriptor present */ 736 1, /* long */ 737 0, /* default 32 vs 16 bit size */ 738 1 /* limit granularity (byte/page units)*/ }, 739/* GPROC0_SEL 6 Proc 0 Tss Descriptor */ 740{ 741 0x0, /* segment base address */ 742 sizeof(struct amd64tss)-1,/* length - all address space */ 743 SDT_SYSTSS, /* segment type */ 744 SEL_KPL, /* segment descriptor priority level */ 745 1, /* segment descriptor present */ 746 0, /* long */ 747 0, /* unused - default 32 vs 16 bit size */ 748 0 /* limit granularity (byte/page units)*/ }, 749/* Actually, the TSS is a system descriptor which is double size */ 750{ 0x0, /* segment base address */ 751 0x0, /* length */ 752 0, /* segment type */ 753 0, /* segment descriptor priority level */ 754 0, /* segment descriptor present */ 755 0, /* long */ 756 0, /* default 32 vs 16 bit size */ 757 0 /* limit granularity (byte/page units)*/ }, 758/* GUGS32_SEL 8 32 bit GS Descriptor for user */ 759{ 0x0, /* segment base address */ 760 0xfffff, /* length - all address space */ 761 SDT_MEMRWA, /* segment type */ 762 SEL_UPL, /* segment descriptor priority level */ 763 1, /* segment descriptor present */ 764 0, /* long */ 765 1, /* default 32 vs 16 bit size */ 766 1 /* limit granularity (byte/page units)*/ }, 767}; 768 769void 770setidt(idx, func, typ, dpl, ist) 771 int idx; 772 inthand_t *func; 773 int typ; 774 int dpl; 775 int ist; 776{ 777 struct gate_descriptor *ip; 778 779 ip = idt + idx; 780 ip->gd_looffset = (uintptr_t)func; 781 ip->gd_selector = GSEL(GCODE_SEL, SEL_KPL); 782 ip->gd_ist = ist; 783 ip->gd_xx = 0; 784 ip->gd_type = typ; 785 ip->gd_dpl = dpl; 786 ip->gd_p = 1; 787 ip->gd_hioffset = ((uintptr_t)func)>>16 ; 788} 789 790extern inthand_t 791 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 792 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 793 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 794 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 795 IDTVEC(xmm), IDTVEC(dblfault), 796 IDTVEC(fast_syscall), IDTVEC(fast_syscall32); 797 798void 799sdtossd(sd, ssd) 800 struct user_segment_descriptor *sd; 801 struct soft_segment_descriptor *ssd; 802{ 803 804 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 805 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 806 ssd->ssd_type = sd->sd_type; 807 ssd->ssd_dpl = sd->sd_dpl; 808 ssd->ssd_p = sd->sd_p; 809 ssd->ssd_long = sd->sd_long; 810 ssd->ssd_def32 = sd->sd_def32; 811 ssd->ssd_gran = sd->sd_gran; 812} 813 814void 815ssdtosd(ssd, sd) 816 struct soft_segment_descriptor *ssd; 817 struct user_segment_descriptor *sd; 818{ 819 820 sd->sd_lobase = (ssd->ssd_base) & 0xffffff; 821 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xff; 822 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff; 823 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf; 824 sd->sd_type = ssd->ssd_type; 825 sd->sd_dpl = ssd->ssd_dpl; 826 sd->sd_p = ssd->ssd_p; 827 sd->sd_long = ssd->ssd_long; 828 sd->sd_def32 = ssd->ssd_def32; 829 sd->sd_gran = ssd->ssd_gran; 830} 831 832void 833ssdtosyssd(ssd, sd) 834 struct soft_segment_descriptor *ssd; 835 struct system_segment_descriptor *sd; 836{ 837 838 sd->sd_lobase = (ssd->ssd_base) & 0xffffff; 839 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xfffffffffful; 840 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff; 841 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf; 842 sd->sd_type = ssd->ssd_type; 843 sd->sd_dpl = ssd->ssd_dpl; 844 sd->sd_p = ssd->ssd_p; 845 sd->sd_gran = ssd->ssd_gran; 846} 847 848#if !defined(DEV_ATPIC) && defined(DEV_ISA) 849#include <isa/isavar.h> 850u_int 851isa_irq_pending(void) 852{ 853 854 return (0); 855} 856#endif 857 858u_int basemem; 859 860/* 861 * Populate the (physmap) array with base/bound pairs describing the 862 * available physical memory in the system, then test this memory and 863 * build the phys_avail array describing the actually-available memory. 864 * 865 * If we cannot accurately determine the physical memory map, then use 866 * value from the 0xE801 call, and failing that, the RTC. 867 * 868 * Total memory size may be set by the kernel environment variable 869 * hw.physmem or the compile-time define MAXMEM. 870 * 871 * XXX first should be vm_paddr_t. 872 */ 873static void 874getmemsize(caddr_t kmdp, u_int64_t first) 875{ 876 int i, off, physmap_idx, pa_indx, da_indx; 877 vm_paddr_t pa, physmap[PHYSMAP_SIZE]; 878 u_long physmem_tunable; 879 pt_entry_t *pte; 880 struct bios_smap *smapbase, *smap, *smapend; 881 u_int32_t smapsize; 882 quad_t dcons_addr, dcons_size; 883 884 bzero(physmap, sizeof(physmap)); 885 basemem = 0; 886 physmap_idx = 0; 887 888 /* 889 * get memory map from INT 15:E820, kindly supplied by the loader. 890 * 891 * subr_module.c says: 892 * "Consumer may safely assume that size value precedes data." 893 * ie: an int32_t immediately precedes smap. 894 */ 895 smapbase = (struct bios_smap *)preload_search_info(kmdp, 896 MODINFO_METADATA | MODINFOMD_SMAP); 897 if (smapbase == NULL) 898 panic("No BIOS smap info from loader!"); 899 900 smapsize = *((u_int32_t *)smapbase - 1); 901 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize); 902 903 for (smap = smapbase; smap < smapend; smap++) { 904 if (boothowto & RB_VERBOSE) 905 printf("SMAP type=%02x base=%016lx len=%016lx\n", 906 smap->type, smap->base, smap->length); 907 908 if (smap->type != SMAP_TYPE_MEMORY) 909 continue; 910 911 if (smap->length == 0) 912 continue; 913 914 for (i = 0; i <= physmap_idx; i += 2) { 915 if (smap->base < physmap[i + 1]) { 916 if (boothowto & RB_VERBOSE) 917 printf( 918 "Overlapping or non-monotonic memory region, ignoring second region\n"); 919 continue; 920 } 921 } 922 923 if (smap->base == physmap[physmap_idx + 1]) { 924 physmap[physmap_idx + 1] += smap->length; 925 continue; 926 } 927 928 physmap_idx += 2; 929 if (physmap_idx == PHYSMAP_SIZE) { 930 printf( 931 "Too many segments in the physical address map, giving up\n"); 932 break; 933 } 934 physmap[physmap_idx] = smap->base; 935 physmap[physmap_idx + 1] = smap->base + smap->length; 936 } 937 938 /* 939 * Find the 'base memory' segment for SMP 940 */ 941 basemem = 0; 942 for (i = 0; i <= physmap_idx; i += 2) { 943 if (physmap[i] == 0x00000000) { 944 basemem = physmap[i + 1] / 1024; 945 break; 946 } 947 } 948 if (basemem == 0) 949 panic("BIOS smap did not include a basemem segment!"); 950 951#ifdef SMP 952 /* make hole for AP bootstrap code */ 953 physmap[1] = mp_bootaddress(physmap[1] / 1024); 954#endif 955 956 /* 957 * Maxmem isn't the "maximum memory", it's one larger than the 958 * highest page of the physical address space. It should be 959 * called something like "Maxphyspage". We may adjust this 960 * based on ``hw.physmem'' and the results of the memory test. 961 */ 962 Maxmem = atop(physmap[physmap_idx + 1]); 963 964#ifdef MAXMEM 965 Maxmem = MAXMEM / 4; 966#endif 967 968 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable)) 969 Maxmem = atop(physmem_tunable); 970 971 /* 972 * Don't allow MAXMEM or hw.physmem to extend the amount of memory 973 * in the system. 974 */ 975 if (Maxmem > atop(physmap[physmap_idx + 1])) 976 Maxmem = atop(physmap[physmap_idx + 1]); 977 978 if (atop(physmap[physmap_idx + 1]) != Maxmem && 979 (boothowto & RB_VERBOSE)) 980 printf("Physical memory use set to %ldK\n", Maxmem * 4); 981 982 /* call pmap initialization to make new kernel address space */ 983 pmap_bootstrap(&first); 984 985 /* 986 * Size up each available chunk of physical memory. 987 */ 988 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 989 pa_indx = 0; 990 da_indx = 1; 991 phys_avail[pa_indx++] = physmap[0]; 992 phys_avail[pa_indx] = physmap[0]; 993 dump_avail[da_indx] = physmap[0]; 994 pte = CMAP1; 995 996 /* 997 * Get dcons buffer address 998 */ 999 if (getenv_quad("dcons.addr", &dcons_addr) == 0 || 1000 getenv_quad("dcons.size", &dcons_size) == 0) 1001 dcons_addr = 0; 1002 1003 /* 1004 * physmap is in bytes, so when converting to page boundaries, 1005 * round up the start address and round down the end address. 1006 */ 1007 for (i = 0; i <= physmap_idx; i += 2) { 1008 vm_paddr_t end; 1009 1010 end = ptoa((vm_paddr_t)Maxmem); 1011 if (physmap[i + 1] < end) 1012 end = trunc_page(physmap[i + 1]); 1013 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 1014 int tmp, page_bad, full; 1015 int *ptr = (int *)CADDR1; 1016 1017 full = FALSE; 1018 /* 1019 * block out kernel memory as not available. 1020 */ 1021 if (pa >= 0x100000 && pa < first) 1022 goto do_dump_avail; 1023 1024 /* 1025 * block out dcons buffer 1026 */ 1027 if (dcons_addr > 0 1028 && pa >= trunc_page(dcons_addr) 1029 && pa < dcons_addr + dcons_size) 1030 goto do_dump_avail; 1031 1032 page_bad = FALSE; 1033 1034 /* 1035 * map page into kernel: valid, read/write,non-cacheable 1036 */ 1037 *pte = pa | PG_V | PG_RW | PG_N; 1038 invltlb(); 1039 1040 tmp = *(int *)ptr; 1041 /* 1042 * Test for alternating 1's and 0's 1043 */ 1044 *(volatile int *)ptr = 0xaaaaaaaa; 1045 if (*(volatile int *)ptr != 0xaaaaaaaa) 1046 page_bad = TRUE; 1047 /* 1048 * Test for alternating 0's and 1's 1049 */ 1050 *(volatile int *)ptr = 0x55555555; 1051 if (*(volatile int *)ptr != 0x55555555) 1052 page_bad = TRUE; 1053 /* 1054 * Test for all 1's 1055 */ 1056 *(volatile int *)ptr = 0xffffffff; 1057 if (*(volatile int *)ptr != 0xffffffff) 1058 page_bad = TRUE; 1059 /* 1060 * Test for all 0's 1061 */ 1062 *(volatile int *)ptr = 0x0; 1063 if (*(volatile int *)ptr != 0x0) 1064 page_bad = TRUE; 1065 /* 1066 * Restore original value. 1067 */ 1068 *(int *)ptr = tmp; 1069 1070 /* 1071 * Adjust array of valid/good pages. 1072 */ 1073 if (page_bad == TRUE) 1074 continue; 1075 /* 1076 * If this good page is a continuation of the 1077 * previous set of good pages, then just increase 1078 * the end pointer. Otherwise start a new chunk. 1079 * Note that "end" points one higher than end, 1080 * making the range >= start and < end. 1081 * If we're also doing a speculative memory 1082 * test and we at or past the end, bump up Maxmem 1083 * so that we keep going. The first bad page 1084 * will terminate the loop. 1085 */ 1086 if (phys_avail[pa_indx] == pa) { 1087 phys_avail[pa_indx] += PAGE_SIZE; 1088 } else { 1089 pa_indx++; 1090 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1091 printf( 1092 "Too many holes in the physical address space, giving up\n"); 1093 pa_indx--; 1094 full = TRUE; 1095 goto do_dump_avail; 1096 } 1097 phys_avail[pa_indx++] = pa; /* start */ 1098 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1099 } 1100 physmem++; 1101do_dump_avail: 1102 if (dump_avail[da_indx] == pa) { 1103 dump_avail[da_indx] += PAGE_SIZE; 1104 } else { 1105 da_indx++; 1106 if (da_indx == DUMP_AVAIL_ARRAY_END) { 1107 da_indx--; 1108 goto do_next; 1109 } 1110 dump_avail[da_indx++] = pa; /* start */ 1111 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */ 1112 } 1113do_next: 1114 if (full) 1115 break; 1116 } 1117 } 1118 *pte = 0; 1119 invltlb(); 1120 1121 /* 1122 * XXX 1123 * The last chunk must contain at least one page plus the message 1124 * buffer to avoid complicating other code (message buffer address 1125 * calculation, etc.). 1126 */ 1127 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1128 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { 1129 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1130 phys_avail[pa_indx--] = 0; 1131 phys_avail[pa_indx--] = 0; 1132 } 1133 1134 Maxmem = atop(phys_avail[pa_indx]); 1135 1136 /* Trim off space for the message buffer. */ 1137 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); 1138 1139 /* Map the message buffer. */ 1140 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) 1141 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] + 1142 off); 1143} 1144 1145u_int64_t 1146hammer_time(u_int64_t modulep, u_int64_t physfree) 1147{ 1148 caddr_t kmdp; 1149 int gsel_tss, x; 1150 struct pcpu *pc; 1151 u_int64_t msr; 1152 char *env; 1153 1154 thread0.td_kstack = physfree + KERNBASE; 1155 bzero((void *)thread0.td_kstack, KSTACK_PAGES * PAGE_SIZE); 1156 physfree += KSTACK_PAGES * PAGE_SIZE; 1157 thread0.td_pcb = (struct pcb *) 1158 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; 1159 1160 /* 1161 * This may be done better later if it gets more high level 1162 * components in it. If so just link td->td_proc here. 1163 */ 1164 proc_linkup0(&proc0, &thread0); 1165 1166 preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE); 1167 preload_bootstrap_relocate(KERNBASE); 1168 kmdp = preload_search_by_type("elf kernel"); 1169 if (kmdp == NULL) 1170 kmdp = preload_search_by_type("elf64 kernel"); 1171 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); 1172 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *) + KERNBASE; 1173#ifdef DDB 1174 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); 1175 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); 1176#endif 1177 1178 /* Init basic tunables, hz etc */ 1179 init_param1(); 1180 1181 /* 1182 * make gdt memory segments 1183 */ 1184 gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&common_tss[0]; 1185 1186 for (x = 0; x < NGDT; x++) { 1187 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1)) 1188 ssdtosd(&gdt_segs[x], &gdt[x]); 1189 } 1190 ssdtosyssd(&gdt_segs[GPROC0_SEL], 1191 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]); 1192 1193 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1194 r_gdt.rd_base = (long) gdt; 1195 lgdt(&r_gdt); 1196 pc = &__pcpu[0]; 1197 1198 wrmsr(MSR_FSBASE, 0); /* User value */ 1199 wrmsr(MSR_GSBASE, (u_int64_t)pc); 1200 wrmsr(MSR_KGSBASE, 0); /* User value while in the kernel */ 1201 1202 pcpu_init(pc, 0, sizeof(struct pcpu)); 1203 PCPU_SET(prvspace, pc); 1204 PCPU_SET(curthread, &thread0); 1205 PCPU_SET(curpcb, thread0.td_pcb); 1206 PCPU_SET(tssp, &common_tss[0]); 1207 1208 /* 1209 * Initialize mutexes. 1210 * 1211 * icu_lock: in order to allow an interrupt to occur in a critical 1212 * section, to set pcpu->ipending (etc...) properly, we 1213 * must be able to get the icu lock, so it can't be 1214 * under witness. 1215 */ 1216 mutex_init(); 1217 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS); 1218 1219 /* exceptions */ 1220 for (x = 0; x < NIDT; x++) 1221 setidt(x, &IDTVEC(rsvd), SDT_SYSIGT, SEL_KPL, 0); 1222 setidt(IDT_DE, &IDTVEC(div), SDT_SYSIGT, SEL_KPL, 0); 1223 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYSIGT, SEL_KPL, 0); 1224 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYSIGT, SEL_KPL, 1); 1225 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYSIGT, SEL_UPL, 0); 1226 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYSIGT, SEL_KPL, 0); 1227 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYSIGT, SEL_KPL, 0); 1228 setidt(IDT_UD, &IDTVEC(ill), SDT_SYSIGT, SEL_KPL, 0); 1229 setidt(IDT_NM, &IDTVEC(dna), SDT_SYSIGT, SEL_KPL, 0); 1230 setidt(IDT_DF, &IDTVEC(dblfault), SDT_SYSIGT, SEL_KPL, 1); 1231 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYSIGT, SEL_KPL, 0); 1232 setidt(IDT_TS, &IDTVEC(tss), SDT_SYSIGT, SEL_KPL, 0); 1233 setidt(IDT_NP, &IDTVEC(missing), SDT_SYSIGT, SEL_KPL, 0); 1234 setidt(IDT_SS, &IDTVEC(stk), SDT_SYSIGT, SEL_KPL, 0); 1235 setidt(IDT_GP, &IDTVEC(prot), SDT_SYSIGT, SEL_KPL, 0); 1236 setidt(IDT_PF, &IDTVEC(page), SDT_SYSIGT, SEL_KPL, 0); 1237 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYSIGT, SEL_KPL, 0); 1238 setidt(IDT_AC, &IDTVEC(align), SDT_SYSIGT, SEL_KPL, 0); 1239 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYSIGT, SEL_KPL, 0); 1240 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYSIGT, SEL_KPL, 0); 1241 1242 r_idt.rd_limit = sizeof(idt0) - 1; 1243 r_idt.rd_base = (long) idt; 1244 lidt(&r_idt); 1245 1246 /* 1247 * Initialize the i8254 before the console so that console 1248 * initialization can use DELAY(). 1249 */ 1250 i8254_init(); 1251 1252 /* 1253 * Initialize the console before we print anything out. 1254 */ 1255 cninit(); 1256 1257#ifdef DEV_ISA 1258#ifdef DEV_ATPIC 1259 elcr_probe(); 1260 atpic_startup(); 1261#else 1262 /* Reset and mask the atpics and leave them shut down. */ 1263 atpic_reset(); 1264 1265 /* 1266 * Point the ICU spurious interrupt vectors at the APIC spurious 1267 * interrupt handler. 1268 */ 1269 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0); 1270 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0); 1271#endif 1272#else 1273#error "have you forgotten the isa device?"; 1274#endif 1275 1276 kdb_init(); 1277 1278#ifdef KDB 1279 if (boothowto & RB_KDB) 1280 kdb_enter(KDB_WHY_BOOTFLAGS, 1281 "Boot flags requested debugger"); 1282#endif 1283 1284 identify_cpu(); /* Final stage of CPU initialization */ 1285 initializecpu(); /* Initialize CPU registers */ 1286 1287 /* make an initial tss so cpu can get interrupt stack on syscall! */ 1288 common_tss[0].tss_rsp0 = thread0.td_kstack + \ 1289 KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb); 1290 /* Ensure the stack is aligned to 16 bytes */ 1291 common_tss[0].tss_rsp0 &= ~0xFul; 1292 PCPU_SET(rsp0, common_tss[0].tss_rsp0); 1293 1294 /* doublefault stack space, runs on ist1 */ 1295 common_tss[0].tss_ist1 = (long)&dblfault_stack[sizeof(dblfault_stack)]; 1296 1297 /* Set the IO permission bitmap (empty due to tss seg limit) */ 1298 common_tss[0].tss_iobase = sizeof(struct amd64tss); 1299 1300 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1301 ltr(gsel_tss); 1302 1303 /* Set up the fast syscall stuff */ 1304 msr = rdmsr(MSR_EFER) | EFER_SCE; 1305 wrmsr(MSR_EFER, msr); 1306 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall)); 1307 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32)); 1308 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) | 1309 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48); 1310 wrmsr(MSR_STAR, msr); 1311 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D); 1312 1313 getmemsize(kmdp, physfree); 1314 init_param2(physmem); 1315 1316 /* now running on new page tables, configured,and u/iom is accessible */ 1317 1318 msgbufinit(msgbufp, MSGBUF_SIZE); 1319 fpuinit(); 1320 1321 /* transfer to user mode */ 1322 1323 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL); 1324 _udatasel = GSEL(GUDATA_SEL, SEL_UPL); 1325 _ucode32sel = GSEL(GUCODE32_SEL, SEL_UPL); 1326 1327 /* setup proc 0's pcb */ 1328 thread0.td_pcb->pcb_flags = 0; /* XXXKSE */ 1329 thread0.td_pcb->pcb_cr3 = KPML4phys; 1330 thread0.td_frame = &proc0_tf; 1331 1332 env = getenv("kernelname"); 1333 if (env != NULL) 1334 strlcpy(kernelname, env, sizeof(kernelname)); 1335 1336 /* Location of kernel stack for locore */ 1337 return ((u_int64_t)thread0.td_pcb); 1338} 1339 1340void 1341cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) 1342{ 1343 1344 pcpu->pc_acpi_id = 0xffffffff; 1345} 1346 1347void 1348spinlock_enter(void) 1349{ 1350 struct thread *td; 1351 1352 td = curthread; 1353 if (td->td_md.md_spinlock_count == 0) 1354 td->td_md.md_saved_flags = intr_disable(); 1355 td->td_md.md_spinlock_count++; 1356 critical_enter(); 1357} 1358 1359void 1360spinlock_exit(void) 1361{ 1362 struct thread *td; 1363 1364 td = curthread; 1365 critical_exit(); 1366 td->td_md.md_spinlock_count--; 1367 if (td->td_md.md_spinlock_count == 0) 1368 intr_restore(td->td_md.md_saved_flags); 1369} 1370 1371/* 1372 * Construct a PCB from a trapframe. This is called from kdb_trap() where 1373 * we want to start a backtrace from the function that caused us to enter 1374 * the debugger. We have the context in the trapframe, but base the trace 1375 * on the PCB. The PCB doesn't have to be perfect, as long as it contains 1376 * enough for a backtrace. 1377 */ 1378void 1379makectx(struct trapframe *tf, struct pcb *pcb) 1380{ 1381 1382 pcb->pcb_r12 = tf->tf_r12; 1383 pcb->pcb_r13 = tf->tf_r13; 1384 pcb->pcb_r14 = tf->tf_r14; 1385 pcb->pcb_r15 = tf->tf_r15; 1386 pcb->pcb_rbp = tf->tf_rbp; 1387 pcb->pcb_rbx = tf->tf_rbx; 1388 pcb->pcb_rip = tf->tf_rip; 1389 pcb->pcb_rsp = (ISPL(tf->tf_cs)) ? tf->tf_rsp : (long)(tf + 1) - 8; 1390} 1391 1392int 1393ptrace_set_pc(struct thread *td, unsigned long addr) 1394{ 1395 td->td_frame->tf_rip = addr; 1396 return (0); 1397} 1398 1399int 1400ptrace_single_step(struct thread *td) 1401{ 1402 td->td_frame->tf_rflags |= PSL_T; 1403 return (0); 1404} 1405 1406int 1407ptrace_clear_single_step(struct thread *td) 1408{ 1409 td->td_frame->tf_rflags &= ~PSL_T; 1410 return (0); 1411} 1412 1413int 1414fill_regs(struct thread *td, struct reg *regs) 1415{ 1416 struct trapframe *tp; 1417 1418 tp = td->td_frame; 1419 regs->r_r15 = tp->tf_r15; 1420 regs->r_r14 = tp->tf_r14; 1421 regs->r_r13 = tp->tf_r13; 1422 regs->r_r12 = tp->tf_r12; 1423 regs->r_r11 = tp->tf_r11; 1424 regs->r_r10 = tp->tf_r10; 1425 regs->r_r9 = tp->tf_r9; 1426 regs->r_r8 = tp->tf_r8; 1427 regs->r_rdi = tp->tf_rdi; 1428 regs->r_rsi = tp->tf_rsi; 1429 regs->r_rbp = tp->tf_rbp; 1430 regs->r_rbx = tp->tf_rbx; 1431 regs->r_rdx = tp->tf_rdx; 1432 regs->r_rcx = tp->tf_rcx; 1433 regs->r_rax = tp->tf_rax; 1434 regs->r_rip = tp->tf_rip; 1435 regs->r_cs = tp->tf_cs; 1436 regs->r_rflags = tp->tf_rflags; 1437 regs->r_rsp = tp->tf_rsp; 1438 regs->r_ss = tp->tf_ss; 1439 return (0); 1440} 1441 1442int 1443set_regs(struct thread *td, struct reg *regs) 1444{ 1445 struct trapframe *tp; 1446 register_t rflags; 1447 1448 tp = td->td_frame; 1449 rflags = regs->r_rflags & 0xffffffff; 1450 if (!EFL_SECURE(rflags, tp->tf_rflags) || !CS_SECURE(regs->r_cs)) 1451 return (EINVAL); 1452 tp->tf_r15 = regs->r_r15; 1453 tp->tf_r14 = regs->r_r14; 1454 tp->tf_r13 = regs->r_r13; 1455 tp->tf_r12 = regs->r_r12; 1456 tp->tf_r11 = regs->r_r11; 1457 tp->tf_r10 = regs->r_r10; 1458 tp->tf_r9 = regs->r_r9; 1459 tp->tf_r8 = regs->r_r8; 1460 tp->tf_rdi = regs->r_rdi; 1461 tp->tf_rsi = regs->r_rsi; 1462 tp->tf_rbp = regs->r_rbp; 1463 tp->tf_rbx = regs->r_rbx; 1464 tp->tf_rdx = regs->r_rdx; 1465 tp->tf_rcx = regs->r_rcx; 1466 tp->tf_rax = regs->r_rax; 1467 tp->tf_rip = regs->r_rip; 1468 tp->tf_cs = regs->r_cs; 1469 tp->tf_rflags = rflags; 1470 tp->tf_rsp = regs->r_rsp; 1471 tp->tf_ss = regs->r_ss; 1472 td->td_pcb->pcb_flags |= PCB_FULLCTX; 1473 return (0); 1474} 1475 1476/* XXX check all this stuff! */ 1477/* externalize from sv_xmm */ 1478static void 1479fill_fpregs_xmm(struct savefpu *sv_xmm, struct fpreg *fpregs) 1480{ 1481 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env; 1482 struct envxmm *penv_xmm = &sv_xmm->sv_env; 1483 int i; 1484 1485 /* pcb -> fpregs */ 1486 bzero(fpregs, sizeof(*fpregs)); 1487 1488 /* FPU control/status */ 1489 penv_fpreg->en_cw = penv_xmm->en_cw; 1490 penv_fpreg->en_sw = penv_xmm->en_sw; 1491 penv_fpreg->en_tw = penv_xmm->en_tw; 1492 penv_fpreg->en_opcode = penv_xmm->en_opcode; 1493 penv_fpreg->en_rip = penv_xmm->en_rip; 1494 penv_fpreg->en_rdp = penv_xmm->en_rdp; 1495 penv_fpreg->en_mxcsr = penv_xmm->en_mxcsr; 1496 penv_fpreg->en_mxcsr_mask = penv_xmm->en_mxcsr_mask; 1497 1498 /* FPU registers */ 1499 for (i = 0; i < 8; ++i) 1500 bcopy(sv_xmm->sv_fp[i].fp_acc.fp_bytes, fpregs->fpr_acc[i], 10); 1501 1502 /* SSE registers */ 1503 for (i = 0; i < 16; ++i) 1504 bcopy(sv_xmm->sv_xmm[i].xmm_bytes, fpregs->fpr_xacc[i], 16); 1505} 1506 1507/* internalize from fpregs into sv_xmm */ 1508static void 1509set_fpregs_xmm(struct fpreg *fpregs, struct savefpu *sv_xmm) 1510{ 1511 struct envxmm *penv_xmm = &sv_xmm->sv_env; 1512 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env; 1513 int i; 1514 1515 /* fpregs -> pcb */ 1516 /* FPU control/status */ 1517 penv_xmm->en_cw = penv_fpreg->en_cw; 1518 penv_xmm->en_sw = penv_fpreg->en_sw; 1519 penv_xmm->en_tw = penv_fpreg->en_tw; 1520 penv_xmm->en_opcode = penv_fpreg->en_opcode; 1521 penv_xmm->en_rip = penv_fpreg->en_rip; 1522 penv_xmm->en_rdp = penv_fpreg->en_rdp; 1523 penv_xmm->en_mxcsr = penv_fpreg->en_mxcsr; 1524 penv_xmm->en_mxcsr_mask = penv_fpreg->en_mxcsr_mask & cpu_mxcsr_mask; 1525 1526 /* FPU registers */ 1527 for (i = 0; i < 8; ++i) 1528 bcopy(fpregs->fpr_acc[i], sv_xmm->sv_fp[i].fp_acc.fp_bytes, 10); 1529 1530 /* SSE registers */ 1531 for (i = 0; i < 16; ++i) 1532 bcopy(fpregs->fpr_xacc[i], sv_xmm->sv_xmm[i].xmm_bytes, 16); 1533} 1534 1535/* externalize from td->pcb */ 1536int 1537fill_fpregs(struct thread *td, struct fpreg *fpregs) 1538{ 1539 1540 fill_fpregs_xmm(&td->td_pcb->pcb_save, fpregs); 1541 return (0); 1542} 1543 1544/* internalize to td->pcb */ 1545int 1546set_fpregs(struct thread *td, struct fpreg *fpregs) 1547{ 1548 1549 set_fpregs_xmm(fpregs, &td->td_pcb->pcb_save); 1550 return (0); 1551} 1552 1553/* 1554 * Get machine context. 1555 */ 1556int 1557get_mcontext(struct thread *td, mcontext_t *mcp, int flags) 1558{ 1559 struct trapframe *tp; 1560 1561 tp = td->td_frame; 1562 PROC_LOCK(curthread->td_proc); 1563 mcp->mc_onstack = sigonstack(tp->tf_rsp); 1564 PROC_UNLOCK(curthread->td_proc); 1565 mcp->mc_r15 = tp->tf_r15; 1566 mcp->mc_r14 = tp->tf_r14; 1567 mcp->mc_r13 = tp->tf_r13; 1568 mcp->mc_r12 = tp->tf_r12; 1569 mcp->mc_r11 = tp->tf_r11; 1570 mcp->mc_r10 = tp->tf_r10; 1571 mcp->mc_r9 = tp->tf_r9; 1572 mcp->mc_r8 = tp->tf_r8; 1573 mcp->mc_rdi = tp->tf_rdi; 1574 mcp->mc_rsi = tp->tf_rsi; 1575 mcp->mc_rbp = tp->tf_rbp; 1576 mcp->mc_rbx = tp->tf_rbx; 1577 mcp->mc_rcx = tp->tf_rcx; 1578 mcp->mc_rflags = tp->tf_rflags; 1579 if (flags & GET_MC_CLEAR_RET) { 1580 mcp->mc_rax = 0; 1581 mcp->mc_rdx = 0; 1582 mcp->mc_rflags &= ~PSL_C; 1583 } else { 1584 mcp->mc_rax = tp->tf_rax; 1585 mcp->mc_rdx = tp->tf_rdx; 1586 } 1587 mcp->mc_rip = tp->tf_rip; 1588 mcp->mc_cs = tp->tf_cs; 1589 mcp->mc_rsp = tp->tf_rsp; 1590 mcp->mc_ss = tp->tf_ss; 1591 mcp->mc_len = sizeof(*mcp); 1592 get_fpcontext(td, mcp); 1593 return (0); 1594} 1595 1596/* 1597 * Set machine context. 1598 * 1599 * However, we don't set any but the user modifiable flags, and we won't 1600 * touch the cs selector. 1601 */ 1602int 1603set_mcontext(struct thread *td, const mcontext_t *mcp) 1604{ 1605 struct trapframe *tp; 1606 long rflags; 1607 int ret; 1608 1609 tp = td->td_frame; 1610 if (mcp->mc_len != sizeof(*mcp)) 1611 return (EINVAL); 1612 rflags = (mcp->mc_rflags & PSL_USERCHANGE) | 1613 (tp->tf_rflags & ~PSL_USERCHANGE); 1614 ret = set_fpcontext(td, mcp); 1615 if (ret != 0) 1616 return (ret); 1617 tp->tf_r15 = mcp->mc_r15; 1618 tp->tf_r14 = mcp->mc_r14; 1619 tp->tf_r13 = mcp->mc_r13; 1620 tp->tf_r12 = mcp->mc_r12; 1621 tp->tf_r11 = mcp->mc_r11; 1622 tp->tf_r10 = mcp->mc_r10; 1623 tp->tf_r9 = mcp->mc_r9; 1624 tp->tf_r8 = mcp->mc_r8; 1625 tp->tf_rdi = mcp->mc_rdi; 1626 tp->tf_rsi = mcp->mc_rsi; 1627 tp->tf_rbp = mcp->mc_rbp; 1628 tp->tf_rbx = mcp->mc_rbx; 1629 tp->tf_rdx = mcp->mc_rdx; 1630 tp->tf_rcx = mcp->mc_rcx; 1631 tp->tf_rax = mcp->mc_rax; 1632 tp->tf_rip = mcp->mc_rip; 1633 tp->tf_rflags = rflags; 1634 tp->tf_rsp = mcp->mc_rsp; 1635 tp->tf_ss = mcp->mc_ss; 1636 td->td_pcb->pcb_flags |= PCB_FULLCTX; 1637 return (0); 1638} 1639 1640static void 1641get_fpcontext(struct thread *td, mcontext_t *mcp) 1642{ 1643 1644 mcp->mc_ownedfp = fpugetregs(td, (struct savefpu *)&mcp->mc_fpstate); 1645 mcp->mc_fpformat = fpuformat(); 1646} 1647 1648static int 1649set_fpcontext(struct thread *td, const mcontext_t *mcp) 1650{ 1651 struct savefpu *fpstate; 1652 1653 if (mcp->mc_fpformat == _MC_FPFMT_NODEV) 1654 return (0); 1655 else if (mcp->mc_fpformat != _MC_FPFMT_XMM) 1656 return (EINVAL); 1657 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) 1658 /* We don't care what state is left in the FPU or PCB. */ 1659 fpstate_drop(td); 1660 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU || 1661 mcp->mc_ownedfp == _MC_FPOWNED_PCB) { 1662 /* 1663 * XXX we violate the dubious requirement that fpusetregs() 1664 * be called with interrupts disabled. 1665 * XXX obsolete on trap-16 systems? 1666 */ 1667 fpstate = (struct savefpu *)&mcp->mc_fpstate; 1668 fpstate->sv_env.en_mxcsr &= cpu_mxcsr_mask; 1669 fpusetregs(td, fpstate); 1670 } else 1671 return (EINVAL); 1672 return (0); 1673} 1674 1675void 1676fpstate_drop(struct thread *td) 1677{ 1678 register_t s; 1679 1680 s = intr_disable(); 1681 if (PCPU_GET(fpcurthread) == td) 1682 fpudrop(); 1683 /* 1684 * XXX force a full drop of the fpu. The above only drops it if we 1685 * owned it. 1686 * 1687 * XXX I don't much like fpugetregs()'s semantics of doing a full 1688 * drop. Dropping only to the pcb matches fnsave's behaviour. 1689 * We only need to drop to !PCB_INITDONE in sendsig(). But 1690 * sendsig() is the only caller of fpugetregs()... perhaps we just 1691 * have too many layers. 1692 */ 1693 curthread->td_pcb->pcb_flags &= ~PCB_FPUINITDONE; 1694 intr_restore(s); 1695} 1696 1697int 1698fill_dbregs(struct thread *td, struct dbreg *dbregs) 1699{ 1700 struct pcb *pcb; 1701 1702 if (td == NULL) { 1703 dbregs->dr[0] = rdr0(); 1704 dbregs->dr[1] = rdr1(); 1705 dbregs->dr[2] = rdr2(); 1706 dbregs->dr[3] = rdr3(); 1707 dbregs->dr[6] = rdr6(); 1708 dbregs->dr[7] = rdr7(); 1709 } else { 1710 pcb = td->td_pcb; 1711 dbregs->dr[0] = pcb->pcb_dr0; 1712 dbregs->dr[1] = pcb->pcb_dr1; 1713 dbregs->dr[2] = pcb->pcb_dr2; 1714 dbregs->dr[3] = pcb->pcb_dr3; 1715 dbregs->dr[6] = pcb->pcb_dr6; 1716 dbregs->dr[7] = pcb->pcb_dr7; 1717 } 1718 dbregs->dr[4] = 0; 1719 dbregs->dr[5] = 0; 1720 dbregs->dr[8] = 0; 1721 dbregs->dr[9] = 0; 1722 dbregs->dr[10] = 0; 1723 dbregs->dr[11] = 0; 1724 dbregs->dr[12] = 0; 1725 dbregs->dr[13] = 0; 1726 dbregs->dr[14] = 0; 1727 dbregs->dr[15] = 0; 1728 return (0); 1729} 1730 1731int 1732set_dbregs(struct thread *td, struct dbreg *dbregs) 1733{ 1734 struct pcb *pcb; 1735 int i; 1736 1737 if (td == NULL) { 1738 load_dr0(dbregs->dr[0]); 1739 load_dr1(dbregs->dr[1]); 1740 load_dr2(dbregs->dr[2]); 1741 load_dr3(dbregs->dr[3]); 1742 load_dr6(dbregs->dr[6]); 1743 load_dr7(dbregs->dr[7]); 1744 } else { 1745 /* 1746 * Don't let an illegal value for dr7 get set. Specifically, 1747 * check for undefined settings. Setting these bit patterns 1748 * result in undefined behaviour and can lead to an unexpected 1749 * TRCTRAP or a general protection fault right here. 1750 * Upper bits of dr6 and dr7 must not be set 1751 */ 1752 for (i = 0; i < 4; i++) { 1753 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02) 1754 return (EINVAL); 1755 if (td->td_frame->tf_cs == _ucode32sel && 1756 DBREG_DR7_LEN(dbregs->dr[7], i) == DBREG_DR7_LEN_8) 1757 return (EINVAL); 1758 } 1759 if ((dbregs->dr[6] & 0xffffffff00000000ul) != 0 || 1760 (dbregs->dr[7] & 0xffffffff00000000ul) != 0) 1761 return (EINVAL); 1762 1763 pcb = td->td_pcb; 1764 1765 /* 1766 * Don't let a process set a breakpoint that is not within the 1767 * process's address space. If a process could do this, it 1768 * could halt the system by setting a breakpoint in the kernel 1769 * (if ddb was enabled). Thus, we need to check to make sure 1770 * that no breakpoints are being enabled for addresses outside 1771 * process's address space. 1772 * 1773 * XXX - what about when the watched area of the user's 1774 * address space is written into from within the kernel 1775 * ... wouldn't that still cause a breakpoint to be generated 1776 * from within kernel mode? 1777 */ 1778 1779 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) { 1780 /* dr0 is enabled */ 1781 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS) 1782 return (EINVAL); 1783 } 1784 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) { 1785 /* dr1 is enabled */ 1786 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS) 1787 return (EINVAL); 1788 } 1789 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) { 1790 /* dr2 is enabled */ 1791 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS) 1792 return (EINVAL); 1793 } 1794 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) { 1795 /* dr3 is enabled */ 1796 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS) 1797 return (EINVAL); 1798 } 1799 1800 pcb->pcb_dr0 = dbregs->dr[0]; 1801 pcb->pcb_dr1 = dbregs->dr[1]; 1802 pcb->pcb_dr2 = dbregs->dr[2]; 1803 pcb->pcb_dr3 = dbregs->dr[3]; 1804 pcb->pcb_dr6 = dbregs->dr[6]; 1805 pcb->pcb_dr7 = dbregs->dr[7]; 1806 1807 pcb->pcb_flags |= PCB_DBREGS; 1808 } 1809 1810 return (0); 1811} 1812 1813void 1814reset_dbregs(void) 1815{ 1816 1817 load_dr7(0); /* Turn off the control bits first */ 1818 load_dr0(0); 1819 load_dr1(0); 1820 load_dr2(0); 1821 load_dr3(0); 1822 load_dr6(0); 1823} 1824 1825/* 1826 * Return > 0 if a hardware breakpoint has been hit, and the 1827 * breakpoint was in user space. Return 0, otherwise. 1828 */ 1829int 1830user_dbreg_trap(void) 1831{ 1832 u_int64_t dr7, dr6; /* debug registers dr6 and dr7 */ 1833 u_int64_t bp; /* breakpoint bits extracted from dr6 */ 1834 int nbp; /* number of breakpoints that triggered */ 1835 caddr_t addr[4]; /* breakpoint addresses */ 1836 int i; 1837 1838 dr7 = rdr7(); 1839 if ((dr7 & 0x000000ff) == 0) { 1840 /* 1841 * all GE and LE bits in the dr7 register are zero, 1842 * thus the trap couldn't have been caused by the 1843 * hardware debug registers 1844 */ 1845 return 0; 1846 } 1847 1848 nbp = 0; 1849 dr6 = rdr6(); 1850 bp = dr6 & 0x0000000f; 1851 1852 if (!bp) { 1853 /* 1854 * None of the breakpoint bits are set meaning this 1855 * trap was not caused by any of the debug registers 1856 */ 1857 return 0; 1858 } 1859 1860 /* 1861 * at least one of the breakpoints were hit, check to see 1862 * which ones and if any of them are user space addresses 1863 */ 1864 1865 if (bp & 0x01) { 1866 addr[nbp++] = (caddr_t)rdr0(); 1867 } 1868 if (bp & 0x02) { 1869 addr[nbp++] = (caddr_t)rdr1(); 1870 } 1871 if (bp & 0x04) { 1872 addr[nbp++] = (caddr_t)rdr2(); 1873 } 1874 if (bp & 0x08) { 1875 addr[nbp++] = (caddr_t)rdr3(); 1876 } 1877 1878 for (i = 0; i < nbp; i++) { 1879 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) { 1880 /* 1881 * addr[i] is in user space 1882 */ 1883 return nbp; 1884 } 1885 } 1886 1887 /* 1888 * None of the breakpoints are in user space. 1889 */ 1890 return 0; 1891} 1892 1893#ifdef KDB 1894 1895/* 1896 * Provide inb() and outb() as functions. They are normally only 1897 * available as macros calling inlined functions, thus cannot be 1898 * called from the debugger. 1899 * 1900 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 1901 */ 1902 1903#undef inb 1904#undef outb 1905 1906/* silence compiler warnings */ 1907u_char inb(u_int); 1908void outb(u_int, u_char); 1909 1910u_char 1911inb(u_int port) 1912{ 1913 u_char data; 1914 /* 1915 * We use %%dx and not %1 here because i/o is done at %dx and not at 1916 * %edx, while gcc generates inferior code (movw instead of movl) 1917 * if we tell it to load (u_short) port. 1918 */ 1919 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 1920 return (data); 1921} 1922 1923void 1924outb(u_int port, u_char data) 1925{ 1926 u_char al; 1927 /* 1928 * Use an unnecessary assignment to help gcc's register allocator. 1929 * This make a large difference for gcc-1.40 and a tiny difference 1930 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 1931 * best results. gcc-2.6.0 can't handle this. 1932 */ 1933 al = data; 1934 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 1935} 1936 1937#endif /* KDB */ 1938