machdep.c revision 283479
1/*- 2 * Copyright (c) 2003 Peter Wemm. 3 * Copyright (c) 1992 Terrence R. Lambert. 4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * William Jolitz. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 39 */ 40 41#include <sys/cdefs.h> 42__FBSDID("$FreeBSD: head/sys/amd64/amd64/machdep.c 283479 2015-05-24 17:56:02Z dchagin $"); 43 44#include "opt_atpic.h" 45#include "opt_compat.h" 46#include "opt_cpu.h" 47#include "opt_ddb.h" 48#include "opt_inet.h" 49#include "opt_isa.h" 50#include "opt_kstack_pages.h" 51#include "opt_maxmem.h" 52#include "opt_mp_watchdog.h" 53#include "opt_perfmon.h" 54#include "opt_platform.h" 55#include "opt_sched.h" 56 57#include <sys/param.h> 58#include <sys/proc.h> 59#include <sys/systm.h> 60#include <sys/bio.h> 61#include <sys/buf.h> 62#include <sys/bus.h> 63#include <sys/callout.h> 64#include <sys/cons.h> 65#include <sys/cpu.h> 66#include <sys/efi.h> 67#include <sys/eventhandler.h> 68#include <sys/exec.h> 69#include <sys/imgact.h> 70#include <sys/kdb.h> 71#include <sys/kernel.h> 72#include <sys/ktr.h> 73#include <sys/linker.h> 74#include <sys/lock.h> 75#include <sys/malloc.h> 76#include <sys/memrange.h> 77#include <sys/msgbuf.h> 78#include <sys/mutex.h> 79#include <sys/pcpu.h> 80#include <sys/ptrace.h> 81#include <sys/reboot.h> 82#include <sys/rwlock.h> 83#include <sys/sched.h> 84#include <sys/signalvar.h> 85#ifdef SMP 86#include <sys/smp.h> 87#endif 88#include <sys/syscallsubr.h> 89#include <sys/sysctl.h> 90#include <sys/sysent.h> 91#include <sys/sysproto.h> 92#include <sys/ucontext.h> 93#include <sys/vmmeter.h> 94 95#include <vm/vm.h> 96#include <vm/vm_extern.h> 97#include <vm/vm_kern.h> 98#include <vm/vm_page.h> 99#include <vm/vm_map.h> 100#include <vm/vm_object.h> 101#include <vm/vm_pager.h> 102#include <vm/vm_param.h> 103 104#ifdef DDB 105#ifndef KDB 106#error KDB must be enabled in order for DDB to work! 107#endif 108#include <ddb/ddb.h> 109#include <ddb/db_sym.h> 110#endif 111 112#include <net/netisr.h> 113 114#include <machine/clock.h> 115#include <machine/cpu.h> 116#include <machine/cputypes.h> 117#include <machine/intr_machdep.h> 118#include <x86/mca.h> 119#include <machine/md_var.h> 120#include <machine/metadata.h> 121#include <machine/mp_watchdog.h> 122#include <machine/pc/bios.h> 123#include <machine/pcb.h> 124#include <machine/proc.h> 125#include <machine/reg.h> 126#include <machine/sigframe.h> 127#include <machine/specialreg.h> 128#ifdef PERFMON 129#include <machine/perfmon.h> 130#endif 131#include <machine/tss.h> 132#ifdef SMP 133#include <machine/smp.h> 134#endif 135#ifdef FDT 136#include <x86/fdt.h> 137#endif 138 139#ifdef DEV_ATPIC 140#include <x86/isa/icu.h> 141#else 142#include <x86/apicvar.h> 143#endif 144 145#include <isa/isareg.h> 146#include <isa/rtc.h> 147#include <x86/init.h> 148 149/* Sanity check for __curthread() */ 150CTASSERT(offsetof(struct pcpu, pc_curthread) == 0); 151 152extern u_int64_t hammer_time(u_int64_t, u_int64_t); 153 154#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 155#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 156 157static void cpu_startup(void *); 158static void get_fpcontext(struct thread *td, mcontext_t *mcp, 159 char *xfpusave, size_t xfpusave_len); 160static int set_fpcontext(struct thread *td, mcontext_t *mcp, 161 char *xfpustate, size_t xfpustate_len); 162SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); 163 164/* Preload data parse function */ 165static caddr_t native_parse_preload_data(u_int64_t); 166 167/* Native function to fetch and parse the e820 map */ 168static void native_parse_memmap(caddr_t, vm_paddr_t *, int *); 169 170/* Default init_ops implementation. */ 171struct init_ops init_ops = { 172 .parse_preload_data = native_parse_preload_data, 173 .early_clock_source_init = i8254_init, 174 .early_delay = i8254_delay, 175 .parse_memmap = native_parse_memmap, 176#ifdef SMP 177 .mp_bootaddress = mp_bootaddress, 178 .start_all_aps = native_start_all_aps, 179#endif 180 .msi_init = msi_init, 181}; 182 183/* 184 * The file "conf/ldscript.amd64" defines the symbol "kernphys". Its value is 185 * the physical address at which the kernel is loaded. 186 */ 187extern char kernphys[]; 188 189struct msgbuf *msgbufp; 190 191/* Intel ICH registers */ 192#define ICH_PMBASE 0x400 193#define ICH_SMI_EN ICH_PMBASE + 0x30 194 195int _udatasel, _ucodesel, _ucode32sel, _ufssel, _ugssel; 196 197int cold = 1; 198 199long Maxmem = 0; 200long realmem = 0; 201 202/* 203 * The number of PHYSMAP entries must be one less than the number of 204 * PHYSSEG entries because the PHYSMAP entry that spans the largest 205 * physical address that is accessible by ISA DMA is split into two 206 * PHYSSEG entries. 207 */ 208#define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1)) 209 210vm_paddr_t phys_avail[PHYSMAP_SIZE + 2]; 211vm_paddr_t dump_avail[PHYSMAP_SIZE + 2]; 212 213/* must be 2 less so 0 0 can signal end of chunks */ 214#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2) 215#define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2) 216 217struct kva_md_info kmi; 218 219static struct trapframe proc0_tf; 220struct region_descriptor r_gdt, r_idt; 221 222struct pcpu __pcpu[MAXCPU]; 223 224struct mtx icu_lock; 225 226struct mem_range_softc mem_range_softc; 227 228struct mtx dt_lock; /* lock for GDT and LDT */ 229 230void (*vmm_resume_p)(void); 231 232static void 233cpu_startup(dummy) 234 void *dummy; 235{ 236 uintmax_t memsize; 237 char *sysenv; 238 239 /* 240 * On MacBooks, we need to disallow the legacy USB circuit to 241 * generate an SMI# because this can cause several problems, 242 * namely: incorrect CPU frequency detection and failure to 243 * start the APs. 244 * We do this by disabling a bit in the SMI_EN (SMI Control and 245 * Enable register) of the Intel ICH LPC Interface Bridge. 246 */ 247 sysenv = kern_getenv("smbios.system.product"); 248 if (sysenv != NULL) { 249 if (strncmp(sysenv, "MacBook1,1", 10) == 0 || 250 strncmp(sysenv, "MacBook3,1", 10) == 0 || 251 strncmp(sysenv, "MacBook4,1", 10) == 0 || 252 strncmp(sysenv, "MacBookPro1,1", 13) == 0 || 253 strncmp(sysenv, "MacBookPro1,2", 13) == 0 || 254 strncmp(sysenv, "MacBookPro3,1", 13) == 0 || 255 strncmp(sysenv, "MacBookPro4,1", 13) == 0 || 256 strncmp(sysenv, "Macmini1,1", 10) == 0) { 257 if (bootverbose) 258 printf("Disabling LEGACY_USB_EN bit on " 259 "Intel ICH.\n"); 260 outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8); 261 } 262 freeenv(sysenv); 263 } 264 265 /* 266 * Good {morning,afternoon,evening,night}. 267 */ 268 startrtclock(); 269 printcpuinfo(); 270 panicifcpuunsupported(); 271#ifdef PERFMON 272 perfmon_init(); 273#endif 274 275 /* 276 * Display physical memory if SMBIOS reports reasonable amount. 277 */ 278 memsize = 0; 279 sysenv = kern_getenv("smbios.memory.enabled"); 280 if (sysenv != NULL) { 281 memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10; 282 freeenv(sysenv); 283 } 284 if (memsize < ptoa((uintmax_t)vm_cnt.v_free_count)) 285 memsize = ptoa((uintmax_t)Maxmem); 286 printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20); 287 realmem = atop(memsize); 288 289 /* 290 * Display any holes after the first chunk of extended memory. 291 */ 292 if (bootverbose) { 293 int indx; 294 295 printf("Physical memory chunk(s):\n"); 296 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 297 vm_paddr_t size; 298 299 size = phys_avail[indx + 1] - phys_avail[indx]; 300 printf( 301 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n", 302 (uintmax_t)phys_avail[indx], 303 (uintmax_t)phys_avail[indx + 1] - 1, 304 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE); 305 } 306 } 307 308 vm_ksubmap_init(&kmi); 309 310 printf("avail memory = %ju (%ju MB)\n", 311 ptoa((uintmax_t)vm_cnt.v_free_count), 312 ptoa((uintmax_t)vm_cnt.v_free_count) / 1048576); 313 314 /* 315 * Set up buffers, so they can be used to read disk labels. 316 */ 317 bufinit(); 318 vm_pager_bufferinit(); 319 320 cpu_setregs(); 321} 322 323/* 324 * Send an interrupt to process. 325 * 326 * Stack is set up to allow sigcode stored 327 * at top to call routine, followed by call 328 * to sigreturn routine below. After sigreturn 329 * resets the signal mask, the stack, and the 330 * frame pointer, it returns to the user 331 * specified pc, psl. 332 */ 333void 334sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) 335{ 336 struct sigframe sf, *sfp; 337 struct pcb *pcb; 338 struct proc *p; 339 struct thread *td; 340 struct sigacts *psp; 341 char *sp; 342 struct trapframe *regs; 343 char *xfpusave; 344 size_t xfpusave_len; 345 int sig; 346 int oonstack; 347 348 td = curthread; 349 pcb = td->td_pcb; 350 p = td->td_proc; 351 PROC_LOCK_ASSERT(p, MA_OWNED); 352 sig = ksi->ksi_signo; 353 psp = p->p_sigacts; 354 mtx_assert(&psp->ps_mtx, MA_OWNED); 355 regs = td->td_frame; 356 oonstack = sigonstack(regs->tf_rsp); 357 358 if (cpu_max_ext_state_size > sizeof(struct savefpu) && use_xsave) { 359 xfpusave_len = cpu_max_ext_state_size - sizeof(struct savefpu); 360 xfpusave = __builtin_alloca(xfpusave_len); 361 } else { 362 xfpusave_len = 0; 363 xfpusave = NULL; 364 } 365 366 /* Save user context. */ 367 bzero(&sf, sizeof(sf)); 368 sf.sf_uc.uc_sigmask = *mask; 369 sf.sf_uc.uc_stack = td->td_sigstk; 370 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) 371 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 372 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 373 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(*regs)); 374 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */ 375 get_fpcontext(td, &sf.sf_uc.uc_mcontext, xfpusave, xfpusave_len); 376 fpstate_drop(td); 377 sf.sf_uc.uc_mcontext.mc_fsbase = pcb->pcb_fsbase; 378 sf.sf_uc.uc_mcontext.mc_gsbase = pcb->pcb_gsbase; 379 bzero(sf.sf_uc.uc_mcontext.mc_spare, 380 sizeof(sf.sf_uc.uc_mcontext.mc_spare)); 381 bzero(sf.sf_uc.__spare__, sizeof(sf.sf_uc.__spare__)); 382 383 /* Allocate space for the signal handler context. */ 384 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && 385 SIGISMEMBER(psp->ps_sigonstack, sig)) { 386 sp = td->td_sigstk.ss_sp + td->td_sigstk.ss_size; 387#if defined(COMPAT_43) 388 td->td_sigstk.ss_flags |= SS_ONSTACK; 389#endif 390 } else 391 sp = (char *)regs->tf_rsp - 128; 392 if (xfpusave != NULL) { 393 sp -= xfpusave_len; 394 sp = (char *)((unsigned long)sp & ~0x3Ful); 395 sf.sf_uc.uc_mcontext.mc_xfpustate = (register_t)sp; 396 } 397 sp -= sizeof(struct sigframe); 398 /* Align to 16 bytes. */ 399 sfp = (struct sigframe *)((unsigned long)sp & ~0xFul); 400 401 /* Build the argument list for the signal handler. */ 402 regs->tf_rdi = sig; /* arg 1 in %rdi */ 403 regs->tf_rdx = (register_t)&sfp->sf_uc; /* arg 3 in %rdx */ 404 bzero(&sf.sf_si, sizeof(sf.sf_si)); 405 if (SIGISMEMBER(psp->ps_siginfo, sig)) { 406 /* Signal handler installed with SA_SIGINFO. */ 407 regs->tf_rsi = (register_t)&sfp->sf_si; /* arg 2 in %rsi */ 408 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 409 410 /* Fill in POSIX parts */ 411 sf.sf_si = ksi->ksi_info; 412 sf.sf_si.si_signo = sig; /* maybe a translated signal */ 413 regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */ 414 } else { 415 /* Old FreeBSD-style arguments. */ 416 regs->tf_rsi = ksi->ksi_code; /* arg 2 in %rsi */ 417 regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */ 418 sf.sf_ahu.sf_handler = catcher; 419 } 420 mtx_unlock(&psp->ps_mtx); 421 PROC_UNLOCK(p); 422 423 /* 424 * Copy the sigframe out to the user's stack. 425 */ 426 if (copyout(&sf, sfp, sizeof(*sfp)) != 0 || 427 (xfpusave != NULL && copyout(xfpusave, 428 (void *)sf.sf_uc.uc_mcontext.mc_xfpustate, xfpusave_len) 429 != 0)) { 430#ifdef DEBUG 431 printf("process %ld has trashed its stack\n", (long)p->p_pid); 432#endif 433 PROC_LOCK(p); 434 sigexit(td, SIGILL); 435 } 436 437 regs->tf_rsp = (long)sfp; 438 regs->tf_rip = p->p_sysent->sv_sigcode_base; 439 regs->tf_rflags &= ~(PSL_T | PSL_D); 440 regs->tf_cs = _ucodesel; 441 regs->tf_ds = _udatasel; 442 regs->tf_ss = _udatasel; 443 regs->tf_es = _udatasel; 444 regs->tf_fs = _ufssel; 445 regs->tf_gs = _ugssel; 446 regs->tf_flags = TF_HASSEGS; 447 set_pcb_flags(pcb, PCB_FULL_IRET); 448 PROC_LOCK(p); 449 mtx_lock(&psp->ps_mtx); 450} 451 452/* 453 * System call to cleanup state after a signal 454 * has been taken. Reset signal mask and 455 * stack state from context left by sendsig (above). 456 * Return to previous pc and psl as specified by 457 * context left by sendsig. Check carefully to 458 * make sure that the user has not modified the 459 * state to gain improper privileges. 460 * 461 * MPSAFE 462 */ 463int 464sys_sigreturn(td, uap) 465 struct thread *td; 466 struct sigreturn_args /* { 467 const struct __ucontext *sigcntxp; 468 } */ *uap; 469{ 470 ucontext_t uc; 471 struct pcb *pcb; 472 struct proc *p; 473 struct trapframe *regs; 474 ucontext_t *ucp; 475 char *xfpustate; 476 size_t xfpustate_len; 477 long rflags; 478 int cs, error, ret; 479 ksiginfo_t ksi; 480 481 pcb = td->td_pcb; 482 p = td->td_proc; 483 484 error = copyin(uap->sigcntxp, &uc, sizeof(uc)); 485 if (error != 0) { 486 uprintf("pid %d (%s): sigreturn copyin failed\n", 487 p->p_pid, td->td_name); 488 return (error); 489 } 490 ucp = &uc; 491 if ((ucp->uc_mcontext.mc_flags & ~_MC_FLAG_MASK) != 0) { 492 uprintf("pid %d (%s): sigreturn mc_flags %x\n", p->p_pid, 493 td->td_name, ucp->uc_mcontext.mc_flags); 494 return (EINVAL); 495 } 496 regs = td->td_frame; 497 rflags = ucp->uc_mcontext.mc_rflags; 498 /* 499 * Don't allow users to change privileged or reserved flags. 500 */ 501 if (!EFL_SECURE(rflags, regs->tf_rflags)) { 502 uprintf("pid %d (%s): sigreturn rflags = 0x%lx\n", p->p_pid, 503 td->td_name, rflags); 504 return (EINVAL); 505 } 506 507 /* 508 * Don't allow users to load a valid privileged %cs. Let the 509 * hardware check for invalid selectors, excess privilege in 510 * other selectors, invalid %eip's and invalid %esp's. 511 */ 512 cs = ucp->uc_mcontext.mc_cs; 513 if (!CS_SECURE(cs)) { 514 uprintf("pid %d (%s): sigreturn cs = 0x%x\n", p->p_pid, 515 td->td_name, cs); 516 ksiginfo_init_trap(&ksi); 517 ksi.ksi_signo = SIGBUS; 518 ksi.ksi_code = BUS_OBJERR; 519 ksi.ksi_trapno = T_PROTFLT; 520 ksi.ksi_addr = (void *)regs->tf_rip; 521 trapsignal(td, &ksi); 522 return (EINVAL); 523 } 524 525 if ((uc.uc_mcontext.mc_flags & _MC_HASFPXSTATE) != 0) { 526 xfpustate_len = uc.uc_mcontext.mc_xfpustate_len; 527 if (xfpustate_len > cpu_max_ext_state_size - 528 sizeof(struct savefpu)) { 529 uprintf("pid %d (%s): sigreturn xfpusave_len = 0x%zx\n", 530 p->p_pid, td->td_name, xfpustate_len); 531 return (EINVAL); 532 } 533 xfpustate = __builtin_alloca(xfpustate_len); 534 error = copyin((const void *)uc.uc_mcontext.mc_xfpustate, 535 xfpustate, xfpustate_len); 536 if (error != 0) { 537 uprintf( 538 "pid %d (%s): sigreturn copying xfpustate failed\n", 539 p->p_pid, td->td_name); 540 return (error); 541 } 542 } else { 543 xfpustate = NULL; 544 xfpustate_len = 0; 545 } 546 ret = set_fpcontext(td, &ucp->uc_mcontext, xfpustate, xfpustate_len); 547 if (ret != 0) { 548 uprintf("pid %d (%s): sigreturn set_fpcontext err %d\n", 549 p->p_pid, td->td_name, ret); 550 return (ret); 551 } 552 bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(*regs)); 553 pcb->pcb_fsbase = ucp->uc_mcontext.mc_fsbase; 554 pcb->pcb_gsbase = ucp->uc_mcontext.mc_gsbase; 555 556#if defined(COMPAT_43) 557 if (ucp->uc_mcontext.mc_onstack & 1) 558 td->td_sigstk.ss_flags |= SS_ONSTACK; 559 else 560 td->td_sigstk.ss_flags &= ~SS_ONSTACK; 561#endif 562 563 kern_sigprocmask(td, SIG_SETMASK, &ucp->uc_sigmask, NULL, 0); 564 set_pcb_flags(pcb, PCB_FULL_IRET); 565 return (EJUSTRETURN); 566} 567 568#ifdef COMPAT_FREEBSD4 569int 570freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap) 571{ 572 573 return sys_sigreturn(td, (struct sigreturn_args *)uap); 574} 575#endif 576 577/* 578 * Reset registers to default values on exec. 579 */ 580void 581exec_setregs(struct thread *td, struct image_params *imgp, u_long stack) 582{ 583 struct trapframe *regs = td->td_frame; 584 struct pcb *pcb = td->td_pcb; 585 586 mtx_lock(&dt_lock); 587 if (td->td_proc->p_md.md_ldt != NULL) 588 user_ldt_free(td); 589 else 590 mtx_unlock(&dt_lock); 591 592 pcb->pcb_fsbase = 0; 593 pcb->pcb_gsbase = 0; 594 clear_pcb_flags(pcb, PCB_32BIT); 595 pcb->pcb_initial_fpucw = __INITIAL_FPUCW__; 596 set_pcb_flags(pcb, PCB_FULL_IRET); 597 598 bzero((char *)regs, sizeof(struct trapframe)); 599 regs->tf_rip = imgp->entry_addr; 600 regs->tf_rsp = ((stack - 8) & ~0xFul) + 8; 601 regs->tf_rdi = stack; /* argv */ 602 regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T); 603 regs->tf_ss = _udatasel; 604 regs->tf_cs = _ucodesel; 605 regs->tf_ds = _udatasel; 606 regs->tf_es = _udatasel; 607 regs->tf_fs = _ufssel; 608 regs->tf_gs = _ugssel; 609 regs->tf_flags = TF_HASSEGS; 610 td->td_retval[1] = 0; 611 612 /* 613 * Reset the hardware debug registers if they were in use. 614 * They won't have any meaning for the newly exec'd process. 615 */ 616 if (pcb->pcb_flags & PCB_DBREGS) { 617 pcb->pcb_dr0 = 0; 618 pcb->pcb_dr1 = 0; 619 pcb->pcb_dr2 = 0; 620 pcb->pcb_dr3 = 0; 621 pcb->pcb_dr6 = 0; 622 pcb->pcb_dr7 = 0; 623 if (pcb == curpcb) { 624 /* 625 * Clear the debug registers on the running 626 * CPU, otherwise they will end up affecting 627 * the next process we switch to. 628 */ 629 reset_dbregs(); 630 } 631 clear_pcb_flags(pcb, PCB_DBREGS); 632 } 633 634 /* 635 * Drop the FP state if we hold it, so that the process gets a 636 * clean FP state if it uses the FPU again. 637 */ 638 fpstate_drop(td); 639} 640 641void 642cpu_setregs(void) 643{ 644 register_t cr0; 645 646 cr0 = rcr0(); 647 /* 648 * CR0_MP, CR0_NE and CR0_TS are also set by npx_probe() for the 649 * BSP. See the comments there about why we set them. 650 */ 651 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM; 652 load_cr0(cr0); 653} 654 655/* 656 * Initialize amd64 and configure to run kernel 657 */ 658 659/* 660 * Initialize segments & interrupt table 661 */ 662 663struct user_segment_descriptor gdt[NGDT * MAXCPU];/* global descriptor tables */ 664static struct gate_descriptor idt0[NIDT]; 665struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 666 667static char dblfault_stack[PAGE_SIZE] __aligned(16); 668 669static char nmi0_stack[PAGE_SIZE] __aligned(16); 670CTASSERT(sizeof(struct nmi_pcpu) == 16); 671 672struct amd64tss common_tss[MAXCPU]; 673 674/* 675 * Software prototypes -- in more palatable form. 676 * 677 * Keep GUFS32, GUGS32, GUCODE32 and GUDATA at the same 678 * slots as corresponding segments for i386 kernel. 679 */ 680struct soft_segment_descriptor gdt_segs[] = { 681/* GNULL_SEL 0 Null Descriptor */ 682{ .ssd_base = 0x0, 683 .ssd_limit = 0x0, 684 .ssd_type = 0, 685 .ssd_dpl = 0, 686 .ssd_p = 0, 687 .ssd_long = 0, 688 .ssd_def32 = 0, 689 .ssd_gran = 0 }, 690/* GNULL2_SEL 1 Null Descriptor */ 691{ .ssd_base = 0x0, 692 .ssd_limit = 0x0, 693 .ssd_type = 0, 694 .ssd_dpl = 0, 695 .ssd_p = 0, 696 .ssd_long = 0, 697 .ssd_def32 = 0, 698 .ssd_gran = 0 }, 699/* GUFS32_SEL 2 32 bit %gs Descriptor for user */ 700{ .ssd_base = 0x0, 701 .ssd_limit = 0xfffff, 702 .ssd_type = SDT_MEMRWA, 703 .ssd_dpl = SEL_UPL, 704 .ssd_p = 1, 705 .ssd_long = 0, 706 .ssd_def32 = 1, 707 .ssd_gran = 1 }, 708/* GUGS32_SEL 3 32 bit %fs Descriptor for user */ 709{ .ssd_base = 0x0, 710 .ssd_limit = 0xfffff, 711 .ssd_type = SDT_MEMRWA, 712 .ssd_dpl = SEL_UPL, 713 .ssd_p = 1, 714 .ssd_long = 0, 715 .ssd_def32 = 1, 716 .ssd_gran = 1 }, 717/* GCODE_SEL 4 Code Descriptor for kernel */ 718{ .ssd_base = 0x0, 719 .ssd_limit = 0xfffff, 720 .ssd_type = SDT_MEMERA, 721 .ssd_dpl = SEL_KPL, 722 .ssd_p = 1, 723 .ssd_long = 1, 724 .ssd_def32 = 0, 725 .ssd_gran = 1 }, 726/* GDATA_SEL 5 Data Descriptor for kernel */ 727{ .ssd_base = 0x0, 728 .ssd_limit = 0xfffff, 729 .ssd_type = SDT_MEMRWA, 730 .ssd_dpl = SEL_KPL, 731 .ssd_p = 1, 732 .ssd_long = 1, 733 .ssd_def32 = 0, 734 .ssd_gran = 1 }, 735/* GUCODE32_SEL 6 32 bit Code Descriptor for user */ 736{ .ssd_base = 0x0, 737 .ssd_limit = 0xfffff, 738 .ssd_type = SDT_MEMERA, 739 .ssd_dpl = SEL_UPL, 740 .ssd_p = 1, 741 .ssd_long = 0, 742 .ssd_def32 = 1, 743 .ssd_gran = 1 }, 744/* GUDATA_SEL 7 32/64 bit Data Descriptor for user */ 745{ .ssd_base = 0x0, 746 .ssd_limit = 0xfffff, 747 .ssd_type = SDT_MEMRWA, 748 .ssd_dpl = SEL_UPL, 749 .ssd_p = 1, 750 .ssd_long = 0, 751 .ssd_def32 = 1, 752 .ssd_gran = 1 }, 753/* GUCODE_SEL 8 64 bit Code Descriptor for user */ 754{ .ssd_base = 0x0, 755 .ssd_limit = 0xfffff, 756 .ssd_type = SDT_MEMERA, 757 .ssd_dpl = SEL_UPL, 758 .ssd_p = 1, 759 .ssd_long = 1, 760 .ssd_def32 = 0, 761 .ssd_gran = 1 }, 762/* GPROC0_SEL 9 Proc 0 Tss Descriptor */ 763{ .ssd_base = 0x0, 764 .ssd_limit = sizeof(struct amd64tss) + IOPERM_BITMAP_SIZE - 1, 765 .ssd_type = SDT_SYSTSS, 766 .ssd_dpl = SEL_KPL, 767 .ssd_p = 1, 768 .ssd_long = 0, 769 .ssd_def32 = 0, 770 .ssd_gran = 0 }, 771/* Actually, the TSS is a system descriptor which is double size */ 772{ .ssd_base = 0x0, 773 .ssd_limit = 0x0, 774 .ssd_type = 0, 775 .ssd_dpl = 0, 776 .ssd_p = 0, 777 .ssd_long = 0, 778 .ssd_def32 = 0, 779 .ssd_gran = 0 }, 780/* GUSERLDT_SEL 11 LDT Descriptor */ 781{ .ssd_base = 0x0, 782 .ssd_limit = 0x0, 783 .ssd_type = 0, 784 .ssd_dpl = 0, 785 .ssd_p = 0, 786 .ssd_long = 0, 787 .ssd_def32 = 0, 788 .ssd_gran = 0 }, 789/* GUSERLDT_SEL 12 LDT Descriptor, double size */ 790{ .ssd_base = 0x0, 791 .ssd_limit = 0x0, 792 .ssd_type = 0, 793 .ssd_dpl = 0, 794 .ssd_p = 0, 795 .ssd_long = 0, 796 .ssd_def32 = 0, 797 .ssd_gran = 0 }, 798}; 799 800void 801setidt(idx, func, typ, dpl, ist) 802 int idx; 803 inthand_t *func; 804 int typ; 805 int dpl; 806 int ist; 807{ 808 struct gate_descriptor *ip; 809 810 ip = idt + idx; 811 ip->gd_looffset = (uintptr_t)func; 812 ip->gd_selector = GSEL(GCODE_SEL, SEL_KPL); 813 ip->gd_ist = ist; 814 ip->gd_xx = 0; 815 ip->gd_type = typ; 816 ip->gd_dpl = dpl; 817 ip->gd_p = 1; 818 ip->gd_hioffset = ((uintptr_t)func)>>16 ; 819} 820 821extern inthand_t 822 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 823 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 824 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 825 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 826 IDTVEC(xmm), IDTVEC(dblfault), 827#ifdef KDTRACE_HOOKS 828 IDTVEC(dtrace_ret), 829#endif 830#ifdef XENHVM 831 IDTVEC(xen_intr_upcall), 832#endif 833 IDTVEC(fast_syscall), IDTVEC(fast_syscall32); 834 835#ifdef DDB 836/* 837 * Display the index and function name of any IDT entries that don't use 838 * the default 'rsvd' entry point. 839 */ 840DB_SHOW_COMMAND(idt, db_show_idt) 841{ 842 struct gate_descriptor *ip; 843 int idx; 844 uintptr_t func; 845 846 ip = idt; 847 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) { 848 func = ((long)ip->gd_hioffset << 16 | ip->gd_looffset); 849 if (func != (uintptr_t)&IDTVEC(rsvd)) { 850 db_printf("%3d\t", idx); 851 db_printsym(func, DB_STGY_PROC); 852 db_printf("\n"); 853 } 854 ip++; 855 } 856} 857 858/* Show privileged registers. */ 859DB_SHOW_COMMAND(sysregs, db_show_sysregs) 860{ 861 struct { 862 uint16_t limit; 863 uint64_t base; 864 } __packed idtr, gdtr; 865 uint16_t ldt, tr; 866 867 __asm __volatile("sidt %0" : "=m" (idtr)); 868 db_printf("idtr\t0x%016lx/%04x\n", 869 (u_long)idtr.base, (u_int)idtr.limit); 870 __asm __volatile("sgdt %0" : "=m" (gdtr)); 871 db_printf("gdtr\t0x%016lx/%04x\n", 872 (u_long)gdtr.base, (u_int)gdtr.limit); 873 __asm __volatile("sldt %0" : "=r" (ldt)); 874 db_printf("ldtr\t0x%04x\n", ldt); 875 __asm __volatile("str %0" : "=r" (tr)); 876 db_printf("tr\t0x%04x\n", tr); 877 db_printf("cr0\t0x%016lx\n", rcr0()); 878 db_printf("cr2\t0x%016lx\n", rcr2()); 879 db_printf("cr3\t0x%016lx\n", rcr3()); 880 db_printf("cr4\t0x%016lx\n", rcr4()); 881 db_printf("EFER\t%016lx\n", rdmsr(MSR_EFER)); 882 db_printf("FEATURES_CTL\t%016lx\n", rdmsr(MSR_IA32_FEATURE_CONTROL)); 883 db_printf("DEBUG_CTL\t%016lx\n", rdmsr(MSR_DEBUGCTLMSR)); 884 db_printf("PAT\t%016lx\n", rdmsr(MSR_PAT)); 885 db_printf("GSBASE\t%016lx\n", rdmsr(MSR_GSBASE)); 886} 887#endif 888 889void 890sdtossd(sd, ssd) 891 struct user_segment_descriptor *sd; 892 struct soft_segment_descriptor *ssd; 893{ 894 895 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 896 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 897 ssd->ssd_type = sd->sd_type; 898 ssd->ssd_dpl = sd->sd_dpl; 899 ssd->ssd_p = sd->sd_p; 900 ssd->ssd_long = sd->sd_long; 901 ssd->ssd_def32 = sd->sd_def32; 902 ssd->ssd_gran = sd->sd_gran; 903} 904 905void 906ssdtosd(ssd, sd) 907 struct soft_segment_descriptor *ssd; 908 struct user_segment_descriptor *sd; 909{ 910 911 sd->sd_lobase = (ssd->ssd_base) & 0xffffff; 912 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xff; 913 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff; 914 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf; 915 sd->sd_type = ssd->ssd_type; 916 sd->sd_dpl = ssd->ssd_dpl; 917 sd->sd_p = ssd->ssd_p; 918 sd->sd_long = ssd->ssd_long; 919 sd->sd_def32 = ssd->ssd_def32; 920 sd->sd_gran = ssd->ssd_gran; 921} 922 923void 924ssdtosyssd(ssd, sd) 925 struct soft_segment_descriptor *ssd; 926 struct system_segment_descriptor *sd; 927{ 928 929 sd->sd_lobase = (ssd->ssd_base) & 0xffffff; 930 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xfffffffffful; 931 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff; 932 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf; 933 sd->sd_type = ssd->ssd_type; 934 sd->sd_dpl = ssd->ssd_dpl; 935 sd->sd_p = ssd->ssd_p; 936 sd->sd_gran = ssd->ssd_gran; 937} 938 939#if !defined(DEV_ATPIC) && defined(DEV_ISA) 940#include <isa/isavar.h> 941#include <isa/isareg.h> 942/* 943 * Return a bitmap of the current interrupt requests. This is 8259-specific 944 * and is only suitable for use at probe time. 945 * This is only here to pacify sio. It is NOT FATAL if this doesn't work. 946 * It shouldn't be here. There should probably be an APIC centric 947 * implementation in the apic driver code, if at all. 948 */ 949intrmask_t 950isa_irq_pending(void) 951{ 952 u_char irr1; 953 u_char irr2; 954 955 irr1 = inb(IO_ICU1); 956 irr2 = inb(IO_ICU2); 957 return ((irr2 << 8) | irr1); 958} 959#endif 960 961u_int basemem; 962 963static int 964add_physmap_entry(uint64_t base, uint64_t length, vm_paddr_t *physmap, 965 int *physmap_idxp) 966{ 967 int i, insert_idx, physmap_idx; 968 969 physmap_idx = *physmap_idxp; 970 971 if (length == 0) 972 return (1); 973 974 /* 975 * Find insertion point while checking for overlap. Start off by 976 * assuming the new entry will be added to the end. 977 * 978 * NB: physmap_idx points to the next free slot. 979 */ 980 insert_idx = physmap_idx; 981 for (i = 0; i <= physmap_idx; i += 2) { 982 if (base < physmap[i + 1]) { 983 if (base + length <= physmap[i]) { 984 insert_idx = i; 985 break; 986 } 987 if (boothowto & RB_VERBOSE) 988 printf( 989 "Overlapping memory regions, ignoring second region\n"); 990 return (1); 991 } 992 } 993 994 /* See if we can prepend to the next entry. */ 995 if (insert_idx <= physmap_idx && base + length == physmap[insert_idx]) { 996 physmap[insert_idx] = base; 997 return (1); 998 } 999 1000 /* See if we can append to the previous entry. */ 1001 if (insert_idx > 0 && base == physmap[insert_idx - 1]) { 1002 physmap[insert_idx - 1] += length; 1003 return (1); 1004 } 1005 1006 physmap_idx += 2; 1007 *physmap_idxp = physmap_idx; 1008 if (physmap_idx == PHYSMAP_SIZE) { 1009 printf( 1010 "Too many segments in the physical address map, giving up\n"); 1011 return (0); 1012 } 1013 1014 /* 1015 * Move the last 'N' entries down to make room for the new 1016 * entry if needed. 1017 */ 1018 for (i = (physmap_idx - 2); i > insert_idx; i -= 2) { 1019 physmap[i] = physmap[i - 2]; 1020 physmap[i + 1] = physmap[i - 1]; 1021 } 1022 1023 /* Insert the new entry. */ 1024 physmap[insert_idx] = base; 1025 physmap[insert_idx + 1] = base + length; 1026 return (1); 1027} 1028 1029void 1030bios_add_smap_entries(struct bios_smap *smapbase, u_int32_t smapsize, 1031 vm_paddr_t *physmap, int *physmap_idx) 1032{ 1033 struct bios_smap *smap, *smapend; 1034 1035 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize); 1036 1037 for (smap = smapbase; smap < smapend; smap++) { 1038 if (boothowto & RB_VERBOSE) 1039 printf("SMAP type=%02x base=%016lx len=%016lx\n", 1040 smap->type, smap->base, smap->length); 1041 1042 if (smap->type != SMAP_TYPE_MEMORY) 1043 continue; 1044 1045 if (!add_physmap_entry(smap->base, smap->length, physmap, 1046 physmap_idx)) 1047 break; 1048 } 1049} 1050 1051#define efi_next_descriptor(ptr, size) \ 1052 ((struct efi_md *)(((uint8_t *) ptr) + size)) 1053 1054static void 1055add_efi_map_entries(struct efi_map_header *efihdr, vm_paddr_t *physmap, 1056 int *physmap_idx) 1057{ 1058 struct efi_md *map, *p; 1059 const char *type; 1060 size_t efisz; 1061 int ndesc, i; 1062 1063 static const char *types[] = { 1064 "Reserved", 1065 "LoaderCode", 1066 "LoaderData", 1067 "BootServicesCode", 1068 "BootServicesData", 1069 "RuntimeServicesCode", 1070 "RuntimeServicesData", 1071 "ConventionalMemory", 1072 "UnusableMemory", 1073 "ACPIReclaimMemory", 1074 "ACPIMemoryNVS", 1075 "MemoryMappedIO", 1076 "MemoryMappedIOPortSpace", 1077 "PalCode" 1078 }; 1079 1080 /* 1081 * Memory map data provided by UEFI via the GetMemoryMap 1082 * Boot Services API. 1083 */ 1084 efisz = (sizeof(struct efi_map_header) + 0xf) & ~0xf; 1085 map = (struct efi_md *)((uint8_t *)efihdr + efisz); 1086 1087 if (efihdr->descriptor_size == 0) 1088 return; 1089 ndesc = efihdr->memory_size / efihdr->descriptor_size; 1090 1091 if (boothowto & RB_VERBOSE) 1092 printf("%23s %12s %12s %8s %4s\n", 1093 "Type", "Physical", "Virtual", "#Pages", "Attr"); 1094 1095 for (i = 0, p = map; i < ndesc; i++, 1096 p = efi_next_descriptor(p, efihdr->descriptor_size)) { 1097 if (boothowto & RB_VERBOSE) { 1098 if (p->md_type <= EFI_MD_TYPE_PALCODE) 1099 type = types[p->md_type]; 1100 else 1101 type = "<INVALID>"; 1102 printf("%23s %012lx %12p %08lx ", type, p->md_phys, 1103 p->md_virt, p->md_pages); 1104 if (p->md_attr & EFI_MD_ATTR_UC) 1105 printf("UC "); 1106 if (p->md_attr & EFI_MD_ATTR_WC) 1107 printf("WC "); 1108 if (p->md_attr & EFI_MD_ATTR_WT) 1109 printf("WT "); 1110 if (p->md_attr & EFI_MD_ATTR_WB) 1111 printf("WB "); 1112 if (p->md_attr & EFI_MD_ATTR_UCE) 1113 printf("UCE "); 1114 if (p->md_attr & EFI_MD_ATTR_WP) 1115 printf("WP "); 1116 if (p->md_attr & EFI_MD_ATTR_RP) 1117 printf("RP "); 1118 if (p->md_attr & EFI_MD_ATTR_XP) 1119 printf("XP "); 1120 if (p->md_attr & EFI_MD_ATTR_RT) 1121 printf("RUNTIME"); 1122 printf("\n"); 1123 } 1124 1125 switch (p->md_type) { 1126 case EFI_MD_TYPE_CODE: 1127 case EFI_MD_TYPE_DATA: 1128 case EFI_MD_TYPE_BS_CODE: 1129 case EFI_MD_TYPE_BS_DATA: 1130 case EFI_MD_TYPE_FREE: 1131 /* 1132 * We're allowed to use any entry with these types. 1133 */ 1134 break; 1135 default: 1136 continue; 1137 } 1138 1139 if (!add_physmap_entry(p->md_phys, (p->md_pages * PAGE_SIZE), 1140 physmap, physmap_idx)) 1141 break; 1142 } 1143} 1144 1145static char bootmethod[16] = ""; 1146SYSCTL_STRING(_machdep, OID_AUTO, bootmethod, CTLFLAG_RD, bootmethod, 0, 1147 "System firmware boot method"); 1148 1149static void 1150native_parse_memmap(caddr_t kmdp, vm_paddr_t *physmap, int *physmap_idx) 1151{ 1152 struct bios_smap *smap; 1153 struct efi_map_header *efihdr; 1154 u_int32_t size; 1155 1156 /* 1157 * Memory map from INT 15:E820. 1158 * 1159 * subr_module.c says: 1160 * "Consumer may safely assume that size value precedes data." 1161 * ie: an int32_t immediately precedes smap. 1162 */ 1163 1164 efihdr = (struct efi_map_header *)preload_search_info(kmdp, 1165 MODINFO_METADATA | MODINFOMD_EFI_MAP); 1166 smap = (struct bios_smap *)preload_search_info(kmdp, 1167 MODINFO_METADATA | MODINFOMD_SMAP); 1168 if (efihdr == NULL && smap == NULL) 1169 panic("No BIOS smap or EFI map info from loader!"); 1170 1171 if (efihdr != NULL) { 1172 add_efi_map_entries(efihdr, physmap, physmap_idx); 1173 strlcpy(bootmethod, "UEFI", sizeof(bootmethod)); 1174 } else { 1175 size = *((u_int32_t *)smap - 1); 1176 bios_add_smap_entries(smap, size, physmap, physmap_idx); 1177 strlcpy(bootmethod, "BIOS", sizeof(bootmethod)); 1178 } 1179} 1180 1181#define PAGES_PER_GB (1024 * 1024 * 1024 / PAGE_SIZE) 1182 1183/* 1184 * Populate the (physmap) array with base/bound pairs describing the 1185 * available physical memory in the system, then test this memory and 1186 * build the phys_avail array describing the actually-available memory. 1187 * 1188 * Total memory size may be set by the kernel environment variable 1189 * hw.physmem or the compile-time define MAXMEM. 1190 * 1191 * XXX first should be vm_paddr_t. 1192 */ 1193static void 1194getmemsize(caddr_t kmdp, u_int64_t first) 1195{ 1196 int i, physmap_idx, pa_indx, da_indx; 1197 vm_paddr_t pa, physmap[PHYSMAP_SIZE]; 1198 u_long physmem_start, physmem_tunable, memtest; 1199 pt_entry_t *pte; 1200 quad_t dcons_addr, dcons_size; 1201 int page_counter; 1202 1203 bzero(physmap, sizeof(physmap)); 1204 physmap_idx = 0; 1205 1206 init_ops.parse_memmap(kmdp, physmap, &physmap_idx); 1207 physmap_idx -= 2; 1208 1209 /* 1210 * Find the 'base memory' segment for SMP 1211 */ 1212 basemem = 0; 1213 for (i = 0; i <= physmap_idx; i += 2) { 1214 if (physmap[i] <= 0xA0000) { 1215 basemem = physmap[i + 1] / 1024; 1216 break; 1217 } 1218 } 1219 if (basemem == 0 || basemem > 640) { 1220 if (bootverbose) 1221 printf( 1222 "Memory map doesn't contain a basemem segment, faking it"); 1223 basemem = 640; 1224 } 1225 1226 /* 1227 * Make hole for "AP -> long mode" bootstrap code. The 1228 * mp_bootaddress vector is only available when the kernel 1229 * is configured to support APs and APs for the system start 1230 * in 32bit mode (e.g. SMP bare metal). 1231 */ 1232 if (init_ops.mp_bootaddress) { 1233 if (physmap[1] >= 0x100000000) 1234 panic( 1235 "Basemem segment is not suitable for AP bootstrap code!"); 1236 physmap[1] = init_ops.mp_bootaddress(physmap[1] / 1024); 1237 } 1238 1239 /* 1240 * Maxmem isn't the "maximum memory", it's one larger than the 1241 * highest page of the physical address space. It should be 1242 * called something like "Maxphyspage". We may adjust this 1243 * based on ``hw.physmem'' and the results of the memory test. 1244 */ 1245 Maxmem = atop(physmap[physmap_idx + 1]); 1246 1247#ifdef MAXMEM 1248 Maxmem = MAXMEM / 4; 1249#endif 1250 1251 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable)) 1252 Maxmem = atop(physmem_tunable); 1253 1254 /* 1255 * The boot memory test is disabled by default, as it takes a 1256 * significant amount of time on large-memory systems, and is 1257 * unfriendly to virtual machines as it unnecessarily touches all 1258 * pages. 1259 * 1260 * A general name is used as the code may be extended to support 1261 * additional tests beyond the current "page present" test. 1262 */ 1263 memtest = 0; 1264 TUNABLE_ULONG_FETCH("hw.memtest.tests", &memtest); 1265 1266 /* 1267 * Don't allow MAXMEM or hw.physmem to extend the amount of memory 1268 * in the system. 1269 */ 1270 if (Maxmem > atop(physmap[physmap_idx + 1])) 1271 Maxmem = atop(physmap[physmap_idx + 1]); 1272 1273 if (atop(physmap[physmap_idx + 1]) != Maxmem && 1274 (boothowto & RB_VERBOSE)) 1275 printf("Physical memory use set to %ldK\n", Maxmem * 4); 1276 1277 /* call pmap initialization to make new kernel address space */ 1278 pmap_bootstrap(&first); 1279 1280 /* 1281 * Size up each available chunk of physical memory. 1282 * 1283 * XXX Some BIOSes corrupt low 64KB between suspend and resume. 1284 * By default, mask off the first 16 pages unless we appear to be 1285 * running in a VM. 1286 */ 1287 physmem_start = (vm_guest > VM_GUEST_NO ? 1 : 16) << PAGE_SHIFT; 1288 TUNABLE_ULONG_FETCH("hw.physmem.start", &physmem_start); 1289 if (physmap[0] < physmem_start) { 1290 if (physmem_start < PAGE_SIZE) 1291 physmap[0] = PAGE_SIZE; 1292 else if (physmem_start >= physmap[1]) 1293 physmap[0] = round_page(physmap[1] - PAGE_SIZE); 1294 else 1295 physmap[0] = round_page(physmem_start); 1296 } 1297 pa_indx = 0; 1298 da_indx = 1; 1299 phys_avail[pa_indx++] = physmap[0]; 1300 phys_avail[pa_indx] = physmap[0]; 1301 dump_avail[da_indx] = physmap[0]; 1302 pte = CMAP1; 1303 1304 /* 1305 * Get dcons buffer address 1306 */ 1307 if (getenv_quad("dcons.addr", &dcons_addr) == 0 || 1308 getenv_quad("dcons.size", &dcons_size) == 0) 1309 dcons_addr = 0; 1310 1311 /* 1312 * physmap is in bytes, so when converting to page boundaries, 1313 * round up the start address and round down the end address. 1314 */ 1315 page_counter = 0; 1316 if (memtest != 0) 1317 printf("Testing system memory"); 1318 for (i = 0; i <= physmap_idx; i += 2) { 1319 vm_paddr_t end; 1320 1321 end = ptoa((vm_paddr_t)Maxmem); 1322 if (physmap[i + 1] < end) 1323 end = trunc_page(physmap[i + 1]); 1324 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 1325 int tmp, page_bad, full; 1326 int *ptr = (int *)CADDR1; 1327 1328 full = FALSE; 1329 /* 1330 * block out kernel memory as not available. 1331 */ 1332 if (pa >= (vm_paddr_t)kernphys && pa < first) 1333 goto do_dump_avail; 1334 1335 /* 1336 * block out dcons buffer 1337 */ 1338 if (dcons_addr > 0 1339 && pa >= trunc_page(dcons_addr) 1340 && pa < dcons_addr + dcons_size) 1341 goto do_dump_avail; 1342 1343 page_bad = FALSE; 1344 if (memtest == 0) 1345 goto skip_memtest; 1346 1347 /* 1348 * Print a "." every GB to show we're making 1349 * progress. 1350 */ 1351 page_counter++; 1352 if ((page_counter % PAGES_PER_GB) == 0) 1353 printf("."); 1354 1355 /* 1356 * map page into kernel: valid, read/write,non-cacheable 1357 */ 1358 *pte = pa | PG_V | PG_RW | PG_NC_PWT | PG_NC_PCD; 1359 invltlb(); 1360 1361 tmp = *(int *)ptr; 1362 /* 1363 * Test for alternating 1's and 0's 1364 */ 1365 *(volatile int *)ptr = 0xaaaaaaaa; 1366 if (*(volatile int *)ptr != 0xaaaaaaaa) 1367 page_bad = TRUE; 1368 /* 1369 * Test for alternating 0's and 1's 1370 */ 1371 *(volatile int *)ptr = 0x55555555; 1372 if (*(volatile int *)ptr != 0x55555555) 1373 page_bad = TRUE; 1374 /* 1375 * Test for all 1's 1376 */ 1377 *(volatile int *)ptr = 0xffffffff; 1378 if (*(volatile int *)ptr != 0xffffffff) 1379 page_bad = TRUE; 1380 /* 1381 * Test for all 0's 1382 */ 1383 *(volatile int *)ptr = 0x0; 1384 if (*(volatile int *)ptr != 0x0) 1385 page_bad = TRUE; 1386 /* 1387 * Restore original value. 1388 */ 1389 *(int *)ptr = tmp; 1390 1391skip_memtest: 1392 /* 1393 * Adjust array of valid/good pages. 1394 */ 1395 if (page_bad == TRUE) 1396 continue; 1397 /* 1398 * If this good page is a continuation of the 1399 * previous set of good pages, then just increase 1400 * the end pointer. Otherwise start a new chunk. 1401 * Note that "end" points one higher than end, 1402 * making the range >= start and < end. 1403 * If we're also doing a speculative memory 1404 * test and we at or past the end, bump up Maxmem 1405 * so that we keep going. The first bad page 1406 * will terminate the loop. 1407 */ 1408 if (phys_avail[pa_indx] == pa) { 1409 phys_avail[pa_indx] += PAGE_SIZE; 1410 } else { 1411 pa_indx++; 1412 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1413 printf( 1414 "Too many holes in the physical address space, giving up\n"); 1415 pa_indx--; 1416 full = TRUE; 1417 goto do_dump_avail; 1418 } 1419 phys_avail[pa_indx++] = pa; /* start */ 1420 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1421 } 1422 physmem++; 1423do_dump_avail: 1424 if (dump_avail[da_indx] == pa) { 1425 dump_avail[da_indx] += PAGE_SIZE; 1426 } else { 1427 da_indx++; 1428 if (da_indx == DUMP_AVAIL_ARRAY_END) { 1429 da_indx--; 1430 goto do_next; 1431 } 1432 dump_avail[da_indx++] = pa; /* start */ 1433 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */ 1434 } 1435do_next: 1436 if (full) 1437 break; 1438 } 1439 } 1440 *pte = 0; 1441 invltlb(); 1442 if (memtest != 0) 1443 printf("\n"); 1444 1445 /* 1446 * XXX 1447 * The last chunk must contain at least one page plus the message 1448 * buffer to avoid complicating other code (message buffer address 1449 * calculation, etc.). 1450 */ 1451 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1452 round_page(msgbufsize) >= phys_avail[pa_indx]) { 1453 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1454 phys_avail[pa_indx--] = 0; 1455 phys_avail[pa_indx--] = 0; 1456 } 1457 1458 Maxmem = atop(phys_avail[pa_indx]); 1459 1460 /* Trim off space for the message buffer. */ 1461 phys_avail[pa_indx] -= round_page(msgbufsize); 1462 1463 /* Map the message buffer. */ 1464 msgbufp = (struct msgbuf *)PHYS_TO_DMAP(phys_avail[pa_indx]); 1465} 1466 1467static caddr_t 1468native_parse_preload_data(u_int64_t modulep) 1469{ 1470 caddr_t kmdp; 1471#ifdef DDB 1472 vm_offset_t ksym_start; 1473 vm_offset_t ksym_end; 1474#endif 1475 1476 preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE); 1477 preload_bootstrap_relocate(KERNBASE); 1478 kmdp = preload_search_by_type("elf kernel"); 1479 if (kmdp == NULL) 1480 kmdp = preload_search_by_type("elf64 kernel"); 1481 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); 1482 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *) + KERNBASE; 1483#ifdef DDB 1484 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); 1485 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); 1486 db_fetch_ksymtab(ksym_start, ksym_end); 1487#endif 1488 1489 return (kmdp); 1490} 1491 1492u_int64_t 1493hammer_time(u_int64_t modulep, u_int64_t physfree) 1494{ 1495 caddr_t kmdp; 1496 int gsel_tss, x; 1497 struct pcpu *pc; 1498 struct nmi_pcpu *np; 1499 struct xstate_hdr *xhdr; 1500 u_int64_t msr; 1501 char *env; 1502 size_t kstack0_sz; 1503 1504 thread0.td_kstack = physfree + KERNBASE; 1505 thread0.td_kstack_pages = KSTACK_PAGES; 1506 kstack0_sz = thread0.td_kstack_pages * PAGE_SIZE; 1507 bzero((void *)thread0.td_kstack, kstack0_sz); 1508 physfree += kstack0_sz; 1509 1510 /* 1511 * This may be done better later if it gets more high level 1512 * components in it. If so just link td->td_proc here. 1513 */ 1514 proc_linkup0(&proc0, &thread0); 1515 1516 kmdp = init_ops.parse_preload_data(modulep); 1517 1518 /* Init basic tunables, hz etc */ 1519 init_param1(); 1520 1521 /* 1522 * make gdt memory segments 1523 */ 1524 for (x = 0; x < NGDT; x++) { 1525 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1) && 1526 x != GUSERLDT_SEL && x != (GUSERLDT_SEL) + 1) 1527 ssdtosd(&gdt_segs[x], &gdt[x]); 1528 } 1529 gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&common_tss[0]; 1530 ssdtosyssd(&gdt_segs[GPROC0_SEL], 1531 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]); 1532 1533 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1534 r_gdt.rd_base = (long) gdt; 1535 lgdt(&r_gdt); 1536 pc = &__pcpu[0]; 1537 1538 wrmsr(MSR_FSBASE, 0); /* User value */ 1539 wrmsr(MSR_GSBASE, (u_int64_t)pc); 1540 wrmsr(MSR_KGSBASE, 0); /* User value while in the kernel */ 1541 1542 pcpu_init(pc, 0, sizeof(struct pcpu)); 1543 dpcpu_init((void *)(physfree + KERNBASE), 0); 1544 physfree += DPCPU_SIZE; 1545 PCPU_SET(prvspace, pc); 1546 PCPU_SET(curthread, &thread0); 1547 PCPU_SET(tssp, &common_tss[0]); 1548 PCPU_SET(commontssp, &common_tss[0]); 1549 PCPU_SET(tss, (struct system_segment_descriptor *)&gdt[GPROC0_SEL]); 1550 PCPU_SET(ldt, (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL]); 1551 PCPU_SET(fs32p, &gdt[GUFS32_SEL]); 1552 PCPU_SET(gs32p, &gdt[GUGS32_SEL]); 1553 1554 /* 1555 * Initialize mutexes. 1556 * 1557 * icu_lock: in order to allow an interrupt to occur in a critical 1558 * section, to set pcpu->ipending (etc...) properly, we 1559 * must be able to get the icu lock, so it can't be 1560 * under witness. 1561 */ 1562 mutex_init(); 1563 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS); 1564 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_DEF); 1565 1566 /* exceptions */ 1567 for (x = 0; x < NIDT; x++) 1568 setidt(x, &IDTVEC(rsvd), SDT_SYSIGT, SEL_KPL, 0); 1569 setidt(IDT_DE, &IDTVEC(div), SDT_SYSIGT, SEL_KPL, 0); 1570 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYSIGT, SEL_KPL, 0); 1571 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYSIGT, SEL_KPL, 2); 1572 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYSIGT, SEL_UPL, 0); 1573 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYSIGT, SEL_KPL, 0); 1574 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYSIGT, SEL_KPL, 0); 1575 setidt(IDT_UD, &IDTVEC(ill), SDT_SYSIGT, SEL_KPL, 0); 1576 setidt(IDT_NM, &IDTVEC(dna), SDT_SYSIGT, SEL_KPL, 0); 1577 setidt(IDT_DF, &IDTVEC(dblfault), SDT_SYSIGT, SEL_KPL, 1); 1578 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYSIGT, SEL_KPL, 0); 1579 setidt(IDT_TS, &IDTVEC(tss), SDT_SYSIGT, SEL_KPL, 0); 1580 setidt(IDT_NP, &IDTVEC(missing), SDT_SYSIGT, SEL_KPL, 0); 1581 setidt(IDT_SS, &IDTVEC(stk), SDT_SYSIGT, SEL_KPL, 0); 1582 setidt(IDT_GP, &IDTVEC(prot), SDT_SYSIGT, SEL_KPL, 0); 1583 setidt(IDT_PF, &IDTVEC(page), SDT_SYSIGT, SEL_KPL, 0); 1584 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYSIGT, SEL_KPL, 0); 1585 setidt(IDT_AC, &IDTVEC(align), SDT_SYSIGT, SEL_KPL, 0); 1586 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYSIGT, SEL_KPL, 0); 1587 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYSIGT, SEL_KPL, 0); 1588#ifdef KDTRACE_HOOKS 1589 setidt(IDT_DTRACE_RET, &IDTVEC(dtrace_ret), SDT_SYSIGT, SEL_UPL, 0); 1590#endif 1591#ifdef XENHVM 1592 setidt(IDT_EVTCHN, &IDTVEC(xen_intr_upcall), SDT_SYSIGT, SEL_UPL, 0); 1593#endif 1594 1595 r_idt.rd_limit = sizeof(idt0) - 1; 1596 r_idt.rd_base = (long) idt; 1597 lidt(&r_idt); 1598 1599 /* 1600 * Initialize the clock before the console so that console 1601 * initialization can use DELAY(). 1602 */ 1603 clock_init(); 1604 1605 /* 1606 * Use vt(4) by default for UEFI boot (during the sc(4)/vt(4) 1607 * transition). 1608 */ 1609 if (kmdp != NULL && preload_search_info(kmdp, 1610 MODINFO_METADATA | MODINFOMD_EFI_MAP) != NULL) 1611 vty_set_preferred(VTY_VT); 1612 1613 /* 1614 * Initialize the console before we print anything out. 1615 */ 1616 cninit(); 1617 1618#ifdef DEV_ISA 1619#ifdef DEV_ATPIC 1620 elcr_probe(); 1621 atpic_startup(); 1622#else 1623 /* Reset and mask the atpics and leave them shut down. */ 1624 atpic_reset(); 1625 1626 /* 1627 * Point the ICU spurious interrupt vectors at the APIC spurious 1628 * interrupt handler. 1629 */ 1630 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0); 1631 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0); 1632#endif 1633#else 1634#error "have you forgotten the isa device?"; 1635#endif 1636 1637 kdb_init(); 1638 1639#ifdef KDB 1640 if (boothowto & RB_KDB) 1641 kdb_enter(KDB_WHY_BOOTFLAGS, 1642 "Boot flags requested debugger"); 1643#endif 1644 1645 identify_cpu(); /* Final stage of CPU initialization */ 1646 initializecpu(); /* Initialize CPU registers */ 1647 initializecpucache(); 1648 1649 /* doublefault stack space, runs on ist1 */ 1650 common_tss[0].tss_ist1 = (long)&dblfault_stack[sizeof(dblfault_stack)]; 1651 1652 /* 1653 * NMI stack, runs on ist2. The pcpu pointer is stored just 1654 * above the start of the ist2 stack. 1655 */ 1656 np = ((struct nmi_pcpu *) &nmi0_stack[sizeof(nmi0_stack)]) - 1; 1657 np->np_pcpu = (register_t) pc; 1658 common_tss[0].tss_ist2 = (long) np; 1659 1660 /* Set the IO permission bitmap (empty due to tss seg limit) */ 1661 common_tss[0].tss_iobase = sizeof(struct amd64tss) + IOPERM_BITMAP_SIZE; 1662 1663 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1664 ltr(gsel_tss); 1665 1666 /* Set up the fast syscall stuff */ 1667 msr = rdmsr(MSR_EFER) | EFER_SCE; 1668 wrmsr(MSR_EFER, msr); 1669 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall)); 1670 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32)); 1671 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) | 1672 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48); 1673 wrmsr(MSR_STAR, msr); 1674 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D); 1675 1676 getmemsize(kmdp, physfree); 1677 init_param2(physmem); 1678 1679 /* now running on new page tables, configured,and u/iom is accessible */ 1680 1681 msgbufinit(msgbufp, msgbufsize); 1682 fpuinit(); 1683 1684 /* 1685 * Set up thread0 pcb after fpuinit calculated pcb + fpu save 1686 * area size. Zero out the extended state header in fpu save 1687 * area. 1688 */ 1689 thread0.td_pcb = get_pcb_td(&thread0); 1690 bzero(get_pcb_user_save_td(&thread0), cpu_max_ext_state_size); 1691 if (use_xsave) { 1692 xhdr = (struct xstate_hdr *)(get_pcb_user_save_td(&thread0) + 1693 1); 1694 xhdr->xstate_bv = xsave_mask; 1695 } 1696 /* make an initial tss so cpu can get interrupt stack on syscall! */ 1697 common_tss[0].tss_rsp0 = (vm_offset_t)thread0.td_pcb; 1698 /* Ensure the stack is aligned to 16 bytes */ 1699 common_tss[0].tss_rsp0 &= ~0xFul; 1700 PCPU_SET(rsp0, common_tss[0].tss_rsp0); 1701 PCPU_SET(curpcb, thread0.td_pcb); 1702 1703 /* transfer to user mode */ 1704 1705 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL); 1706 _udatasel = GSEL(GUDATA_SEL, SEL_UPL); 1707 _ucode32sel = GSEL(GUCODE32_SEL, SEL_UPL); 1708 _ufssel = GSEL(GUFS32_SEL, SEL_UPL); 1709 _ugssel = GSEL(GUGS32_SEL, SEL_UPL); 1710 1711 load_ds(_udatasel); 1712 load_es(_udatasel); 1713 load_fs(_ufssel); 1714 1715 /* setup proc 0's pcb */ 1716 thread0.td_pcb->pcb_flags = 0; 1717 thread0.td_frame = &proc0_tf; 1718 1719 env = kern_getenv("kernelname"); 1720 if (env != NULL) 1721 strlcpy(kernelname, env, sizeof(kernelname)); 1722 1723 cpu_probe_amdc1e(); 1724 1725#ifdef FDT 1726 x86_init_fdt(); 1727#endif 1728 1729 /* Location of kernel stack for locore */ 1730 return ((u_int64_t)thread0.td_pcb); 1731} 1732 1733void 1734cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) 1735{ 1736 1737 pcpu->pc_acpi_id = 0xffffffff; 1738} 1739 1740static int 1741smap_sysctl_handler(SYSCTL_HANDLER_ARGS) 1742{ 1743 struct bios_smap *smapbase; 1744 struct bios_smap_xattr smap; 1745 caddr_t kmdp; 1746 uint32_t *smapattr; 1747 int count, error, i; 1748 1749 /* Retrieve the system memory map from the loader. */ 1750 kmdp = preload_search_by_type("elf kernel"); 1751 if (kmdp == NULL) 1752 kmdp = preload_search_by_type("elf64 kernel"); 1753 smapbase = (struct bios_smap *)preload_search_info(kmdp, 1754 MODINFO_METADATA | MODINFOMD_SMAP); 1755 if (smapbase == NULL) 1756 return (0); 1757 smapattr = (uint32_t *)preload_search_info(kmdp, 1758 MODINFO_METADATA | MODINFOMD_SMAP_XATTR); 1759 count = *((uint32_t *)smapbase - 1) / sizeof(*smapbase); 1760 error = 0; 1761 for (i = 0; i < count; i++) { 1762 smap.base = smapbase[i].base; 1763 smap.length = smapbase[i].length; 1764 smap.type = smapbase[i].type; 1765 if (smapattr != NULL) 1766 smap.xattr = smapattr[i]; 1767 else 1768 smap.xattr = 0; 1769 error = SYSCTL_OUT(req, &smap, sizeof(smap)); 1770 } 1771 return (error); 1772} 1773SYSCTL_PROC(_machdep, OID_AUTO, smap, CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0, 1774 smap_sysctl_handler, "S,bios_smap_xattr", "Raw BIOS SMAP data"); 1775 1776static int 1777efi_map_sysctl_handler(SYSCTL_HANDLER_ARGS) 1778{ 1779 struct efi_map_header *efihdr; 1780 caddr_t kmdp; 1781 uint32_t efisize; 1782 1783 kmdp = preload_search_by_type("elf kernel"); 1784 if (kmdp == NULL) 1785 kmdp = preload_search_by_type("elf64 kernel"); 1786 efihdr = (struct efi_map_header *)preload_search_info(kmdp, 1787 MODINFO_METADATA | MODINFOMD_EFI_MAP); 1788 if (efihdr == NULL) 1789 return (0); 1790 efisize = *((uint32_t *)efihdr - 1); 1791 return (SYSCTL_OUT(req, efihdr, efisize)); 1792} 1793SYSCTL_PROC(_machdep, OID_AUTO, efi_map, CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0, 1794 efi_map_sysctl_handler, "S,efi_map_header", "Raw EFI Memory Map"); 1795 1796void 1797spinlock_enter(void) 1798{ 1799 struct thread *td; 1800 register_t flags; 1801 1802 td = curthread; 1803 if (td->td_md.md_spinlock_count == 0) { 1804 flags = intr_disable(); 1805 td->td_md.md_spinlock_count = 1; 1806 td->td_md.md_saved_flags = flags; 1807 } else 1808 td->td_md.md_spinlock_count++; 1809 critical_enter(); 1810} 1811 1812void 1813spinlock_exit(void) 1814{ 1815 struct thread *td; 1816 register_t flags; 1817 1818 td = curthread; 1819 critical_exit(); 1820 flags = td->td_md.md_saved_flags; 1821 td->td_md.md_spinlock_count--; 1822 if (td->td_md.md_spinlock_count == 0) 1823 intr_restore(flags); 1824} 1825 1826/* 1827 * Construct a PCB from a trapframe. This is called from kdb_trap() where 1828 * we want to start a backtrace from the function that caused us to enter 1829 * the debugger. We have the context in the trapframe, but base the trace 1830 * on the PCB. The PCB doesn't have to be perfect, as long as it contains 1831 * enough for a backtrace. 1832 */ 1833void 1834makectx(struct trapframe *tf, struct pcb *pcb) 1835{ 1836 1837 pcb->pcb_r12 = tf->tf_r12; 1838 pcb->pcb_r13 = tf->tf_r13; 1839 pcb->pcb_r14 = tf->tf_r14; 1840 pcb->pcb_r15 = tf->tf_r15; 1841 pcb->pcb_rbp = tf->tf_rbp; 1842 pcb->pcb_rbx = tf->tf_rbx; 1843 pcb->pcb_rip = tf->tf_rip; 1844 pcb->pcb_rsp = tf->tf_rsp; 1845} 1846 1847int 1848ptrace_set_pc(struct thread *td, unsigned long addr) 1849{ 1850 1851 td->td_frame->tf_rip = addr; 1852 set_pcb_flags(td->td_pcb, PCB_FULL_IRET); 1853 return (0); 1854} 1855 1856int 1857ptrace_single_step(struct thread *td) 1858{ 1859 td->td_frame->tf_rflags |= PSL_T; 1860 return (0); 1861} 1862 1863int 1864ptrace_clear_single_step(struct thread *td) 1865{ 1866 td->td_frame->tf_rflags &= ~PSL_T; 1867 return (0); 1868} 1869 1870int 1871fill_regs(struct thread *td, struct reg *regs) 1872{ 1873 struct trapframe *tp; 1874 1875 tp = td->td_frame; 1876 return (fill_frame_regs(tp, regs)); 1877} 1878 1879int 1880fill_frame_regs(struct trapframe *tp, struct reg *regs) 1881{ 1882 regs->r_r15 = tp->tf_r15; 1883 regs->r_r14 = tp->tf_r14; 1884 regs->r_r13 = tp->tf_r13; 1885 regs->r_r12 = tp->tf_r12; 1886 regs->r_r11 = tp->tf_r11; 1887 regs->r_r10 = tp->tf_r10; 1888 regs->r_r9 = tp->tf_r9; 1889 regs->r_r8 = tp->tf_r8; 1890 regs->r_rdi = tp->tf_rdi; 1891 regs->r_rsi = tp->tf_rsi; 1892 regs->r_rbp = tp->tf_rbp; 1893 regs->r_rbx = tp->tf_rbx; 1894 regs->r_rdx = tp->tf_rdx; 1895 regs->r_rcx = tp->tf_rcx; 1896 regs->r_rax = tp->tf_rax; 1897 regs->r_rip = tp->tf_rip; 1898 regs->r_cs = tp->tf_cs; 1899 regs->r_rflags = tp->tf_rflags; 1900 regs->r_rsp = tp->tf_rsp; 1901 regs->r_ss = tp->tf_ss; 1902 if (tp->tf_flags & TF_HASSEGS) { 1903 regs->r_ds = tp->tf_ds; 1904 regs->r_es = tp->tf_es; 1905 regs->r_fs = tp->tf_fs; 1906 regs->r_gs = tp->tf_gs; 1907 } else { 1908 regs->r_ds = 0; 1909 regs->r_es = 0; 1910 regs->r_fs = 0; 1911 regs->r_gs = 0; 1912 } 1913 return (0); 1914} 1915 1916int 1917set_regs(struct thread *td, struct reg *regs) 1918{ 1919 struct trapframe *tp; 1920 register_t rflags; 1921 1922 tp = td->td_frame; 1923 rflags = regs->r_rflags & 0xffffffff; 1924 if (!EFL_SECURE(rflags, tp->tf_rflags) || !CS_SECURE(regs->r_cs)) 1925 return (EINVAL); 1926 tp->tf_r15 = regs->r_r15; 1927 tp->tf_r14 = regs->r_r14; 1928 tp->tf_r13 = regs->r_r13; 1929 tp->tf_r12 = regs->r_r12; 1930 tp->tf_r11 = regs->r_r11; 1931 tp->tf_r10 = regs->r_r10; 1932 tp->tf_r9 = regs->r_r9; 1933 tp->tf_r8 = regs->r_r8; 1934 tp->tf_rdi = regs->r_rdi; 1935 tp->tf_rsi = regs->r_rsi; 1936 tp->tf_rbp = regs->r_rbp; 1937 tp->tf_rbx = regs->r_rbx; 1938 tp->tf_rdx = regs->r_rdx; 1939 tp->tf_rcx = regs->r_rcx; 1940 tp->tf_rax = regs->r_rax; 1941 tp->tf_rip = regs->r_rip; 1942 tp->tf_cs = regs->r_cs; 1943 tp->tf_rflags = rflags; 1944 tp->tf_rsp = regs->r_rsp; 1945 tp->tf_ss = regs->r_ss; 1946 if (0) { /* XXXKIB */ 1947 tp->tf_ds = regs->r_ds; 1948 tp->tf_es = regs->r_es; 1949 tp->tf_fs = regs->r_fs; 1950 tp->tf_gs = regs->r_gs; 1951 tp->tf_flags = TF_HASSEGS; 1952 } 1953 set_pcb_flags(td->td_pcb, PCB_FULL_IRET); 1954 return (0); 1955} 1956 1957/* XXX check all this stuff! */ 1958/* externalize from sv_xmm */ 1959static void 1960fill_fpregs_xmm(struct savefpu *sv_xmm, struct fpreg *fpregs) 1961{ 1962 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env; 1963 struct envxmm *penv_xmm = &sv_xmm->sv_env; 1964 int i; 1965 1966 /* pcb -> fpregs */ 1967 bzero(fpregs, sizeof(*fpregs)); 1968 1969 /* FPU control/status */ 1970 penv_fpreg->en_cw = penv_xmm->en_cw; 1971 penv_fpreg->en_sw = penv_xmm->en_sw; 1972 penv_fpreg->en_tw = penv_xmm->en_tw; 1973 penv_fpreg->en_opcode = penv_xmm->en_opcode; 1974 penv_fpreg->en_rip = penv_xmm->en_rip; 1975 penv_fpreg->en_rdp = penv_xmm->en_rdp; 1976 penv_fpreg->en_mxcsr = penv_xmm->en_mxcsr; 1977 penv_fpreg->en_mxcsr_mask = penv_xmm->en_mxcsr_mask; 1978 1979 /* FPU registers */ 1980 for (i = 0; i < 8; ++i) 1981 bcopy(sv_xmm->sv_fp[i].fp_acc.fp_bytes, fpregs->fpr_acc[i], 10); 1982 1983 /* SSE registers */ 1984 for (i = 0; i < 16; ++i) 1985 bcopy(sv_xmm->sv_xmm[i].xmm_bytes, fpregs->fpr_xacc[i], 16); 1986} 1987 1988/* internalize from fpregs into sv_xmm */ 1989static void 1990set_fpregs_xmm(struct fpreg *fpregs, struct savefpu *sv_xmm) 1991{ 1992 struct envxmm *penv_xmm = &sv_xmm->sv_env; 1993 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env; 1994 int i; 1995 1996 /* fpregs -> pcb */ 1997 /* FPU control/status */ 1998 penv_xmm->en_cw = penv_fpreg->en_cw; 1999 penv_xmm->en_sw = penv_fpreg->en_sw; 2000 penv_xmm->en_tw = penv_fpreg->en_tw; 2001 penv_xmm->en_opcode = penv_fpreg->en_opcode; 2002 penv_xmm->en_rip = penv_fpreg->en_rip; 2003 penv_xmm->en_rdp = penv_fpreg->en_rdp; 2004 penv_xmm->en_mxcsr = penv_fpreg->en_mxcsr; 2005 penv_xmm->en_mxcsr_mask = penv_fpreg->en_mxcsr_mask & cpu_mxcsr_mask; 2006 2007 /* FPU registers */ 2008 for (i = 0; i < 8; ++i) 2009 bcopy(fpregs->fpr_acc[i], sv_xmm->sv_fp[i].fp_acc.fp_bytes, 10); 2010 2011 /* SSE registers */ 2012 for (i = 0; i < 16; ++i) 2013 bcopy(fpregs->fpr_xacc[i], sv_xmm->sv_xmm[i].xmm_bytes, 16); 2014} 2015 2016/* externalize from td->pcb */ 2017int 2018fill_fpregs(struct thread *td, struct fpreg *fpregs) 2019{ 2020 2021 KASSERT(td == curthread || TD_IS_SUSPENDED(td) || 2022 P_SHOULDSTOP(td->td_proc), 2023 ("not suspended thread %p", td)); 2024 fpugetregs(td); 2025 fill_fpregs_xmm(get_pcb_user_save_td(td), fpregs); 2026 return (0); 2027} 2028 2029/* internalize to td->pcb */ 2030int 2031set_fpregs(struct thread *td, struct fpreg *fpregs) 2032{ 2033 2034 set_fpregs_xmm(fpregs, get_pcb_user_save_td(td)); 2035 fpuuserinited(td); 2036 return (0); 2037} 2038 2039/* 2040 * Get machine context. 2041 */ 2042int 2043get_mcontext(struct thread *td, mcontext_t *mcp, int flags) 2044{ 2045 struct pcb *pcb; 2046 struct trapframe *tp; 2047 2048 pcb = td->td_pcb; 2049 tp = td->td_frame; 2050 PROC_LOCK(curthread->td_proc); 2051 mcp->mc_onstack = sigonstack(tp->tf_rsp); 2052 PROC_UNLOCK(curthread->td_proc); 2053 mcp->mc_r15 = tp->tf_r15; 2054 mcp->mc_r14 = tp->tf_r14; 2055 mcp->mc_r13 = tp->tf_r13; 2056 mcp->mc_r12 = tp->tf_r12; 2057 mcp->mc_r11 = tp->tf_r11; 2058 mcp->mc_r10 = tp->tf_r10; 2059 mcp->mc_r9 = tp->tf_r9; 2060 mcp->mc_r8 = tp->tf_r8; 2061 mcp->mc_rdi = tp->tf_rdi; 2062 mcp->mc_rsi = tp->tf_rsi; 2063 mcp->mc_rbp = tp->tf_rbp; 2064 mcp->mc_rbx = tp->tf_rbx; 2065 mcp->mc_rcx = tp->tf_rcx; 2066 mcp->mc_rflags = tp->tf_rflags; 2067 if (flags & GET_MC_CLEAR_RET) { 2068 mcp->mc_rax = 0; 2069 mcp->mc_rdx = 0; 2070 mcp->mc_rflags &= ~PSL_C; 2071 } else { 2072 mcp->mc_rax = tp->tf_rax; 2073 mcp->mc_rdx = tp->tf_rdx; 2074 } 2075 mcp->mc_rip = tp->tf_rip; 2076 mcp->mc_cs = tp->tf_cs; 2077 mcp->mc_rsp = tp->tf_rsp; 2078 mcp->mc_ss = tp->tf_ss; 2079 mcp->mc_ds = tp->tf_ds; 2080 mcp->mc_es = tp->tf_es; 2081 mcp->mc_fs = tp->tf_fs; 2082 mcp->mc_gs = tp->tf_gs; 2083 mcp->mc_flags = tp->tf_flags; 2084 mcp->mc_len = sizeof(*mcp); 2085 get_fpcontext(td, mcp, NULL, 0); 2086 mcp->mc_fsbase = pcb->pcb_fsbase; 2087 mcp->mc_gsbase = pcb->pcb_gsbase; 2088 mcp->mc_xfpustate = 0; 2089 mcp->mc_xfpustate_len = 0; 2090 bzero(mcp->mc_spare, sizeof(mcp->mc_spare)); 2091 return (0); 2092} 2093 2094/* 2095 * Set machine context. 2096 * 2097 * However, we don't set any but the user modifiable flags, and we won't 2098 * touch the cs selector. 2099 */ 2100int 2101set_mcontext(struct thread *td, mcontext_t *mcp) 2102{ 2103 struct pcb *pcb; 2104 struct trapframe *tp; 2105 char *xfpustate; 2106 long rflags; 2107 int ret; 2108 2109 pcb = td->td_pcb; 2110 tp = td->td_frame; 2111 if (mcp->mc_len != sizeof(*mcp) || 2112 (mcp->mc_flags & ~_MC_FLAG_MASK) != 0) 2113 return (EINVAL); 2114 rflags = (mcp->mc_rflags & PSL_USERCHANGE) | 2115 (tp->tf_rflags & ~PSL_USERCHANGE); 2116 if (mcp->mc_flags & _MC_HASFPXSTATE) { 2117 if (mcp->mc_xfpustate_len > cpu_max_ext_state_size - 2118 sizeof(struct savefpu)) 2119 return (EINVAL); 2120 xfpustate = __builtin_alloca(mcp->mc_xfpustate_len); 2121 ret = copyin((void *)mcp->mc_xfpustate, xfpustate, 2122 mcp->mc_xfpustate_len); 2123 if (ret != 0) 2124 return (ret); 2125 } else 2126 xfpustate = NULL; 2127 ret = set_fpcontext(td, mcp, xfpustate, mcp->mc_xfpustate_len); 2128 if (ret != 0) 2129 return (ret); 2130 tp->tf_r15 = mcp->mc_r15; 2131 tp->tf_r14 = mcp->mc_r14; 2132 tp->tf_r13 = mcp->mc_r13; 2133 tp->tf_r12 = mcp->mc_r12; 2134 tp->tf_r11 = mcp->mc_r11; 2135 tp->tf_r10 = mcp->mc_r10; 2136 tp->tf_r9 = mcp->mc_r9; 2137 tp->tf_r8 = mcp->mc_r8; 2138 tp->tf_rdi = mcp->mc_rdi; 2139 tp->tf_rsi = mcp->mc_rsi; 2140 tp->tf_rbp = mcp->mc_rbp; 2141 tp->tf_rbx = mcp->mc_rbx; 2142 tp->tf_rdx = mcp->mc_rdx; 2143 tp->tf_rcx = mcp->mc_rcx; 2144 tp->tf_rax = mcp->mc_rax; 2145 tp->tf_rip = mcp->mc_rip; 2146 tp->tf_rflags = rflags; 2147 tp->tf_rsp = mcp->mc_rsp; 2148 tp->tf_ss = mcp->mc_ss; 2149 tp->tf_flags = mcp->mc_flags; 2150 if (tp->tf_flags & TF_HASSEGS) { 2151 tp->tf_ds = mcp->mc_ds; 2152 tp->tf_es = mcp->mc_es; 2153 tp->tf_fs = mcp->mc_fs; 2154 tp->tf_gs = mcp->mc_gs; 2155 } 2156 if (mcp->mc_flags & _MC_HASBASES) { 2157 pcb->pcb_fsbase = mcp->mc_fsbase; 2158 pcb->pcb_gsbase = mcp->mc_gsbase; 2159 } 2160 set_pcb_flags(pcb, PCB_FULL_IRET); 2161 return (0); 2162} 2163 2164static void 2165get_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpusave, 2166 size_t xfpusave_len) 2167{ 2168 size_t max_len, len; 2169 2170 mcp->mc_ownedfp = fpugetregs(td); 2171 bcopy(get_pcb_user_save_td(td), &mcp->mc_fpstate[0], 2172 sizeof(mcp->mc_fpstate)); 2173 mcp->mc_fpformat = fpuformat(); 2174 if (!use_xsave || xfpusave_len == 0) 2175 return; 2176 max_len = cpu_max_ext_state_size - sizeof(struct savefpu); 2177 len = xfpusave_len; 2178 if (len > max_len) { 2179 len = max_len; 2180 bzero(xfpusave + max_len, len - max_len); 2181 } 2182 mcp->mc_flags |= _MC_HASFPXSTATE; 2183 mcp->mc_xfpustate_len = len; 2184 bcopy(get_pcb_user_save_td(td) + 1, xfpusave, len); 2185} 2186 2187static int 2188set_fpcontext(struct thread *td, mcontext_t *mcp, char *xfpustate, 2189 size_t xfpustate_len) 2190{ 2191 struct savefpu *fpstate; 2192 int error; 2193 2194 if (mcp->mc_fpformat == _MC_FPFMT_NODEV) 2195 return (0); 2196 else if (mcp->mc_fpformat != _MC_FPFMT_XMM) 2197 return (EINVAL); 2198 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) { 2199 /* We don't care what state is left in the FPU or PCB. */ 2200 fpstate_drop(td); 2201 error = 0; 2202 } else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU || 2203 mcp->mc_ownedfp == _MC_FPOWNED_PCB) { 2204 fpstate = (struct savefpu *)&mcp->mc_fpstate; 2205 fpstate->sv_env.en_mxcsr &= cpu_mxcsr_mask; 2206 error = fpusetregs(td, fpstate, xfpustate, xfpustate_len); 2207 } else 2208 return (EINVAL); 2209 return (error); 2210} 2211 2212void 2213fpstate_drop(struct thread *td) 2214{ 2215 2216 KASSERT(PCB_USER_FPU(td->td_pcb), ("fpstate_drop: kernel-owned fpu")); 2217 critical_enter(); 2218 if (PCPU_GET(fpcurthread) == td) 2219 fpudrop(); 2220 /* 2221 * XXX force a full drop of the fpu. The above only drops it if we 2222 * owned it. 2223 * 2224 * XXX I don't much like fpugetuserregs()'s semantics of doing a full 2225 * drop. Dropping only to the pcb matches fnsave's behaviour. 2226 * We only need to drop to !PCB_INITDONE in sendsig(). But 2227 * sendsig() is the only caller of fpugetuserregs()... perhaps we just 2228 * have too many layers. 2229 */ 2230 clear_pcb_flags(curthread->td_pcb, 2231 PCB_FPUINITDONE | PCB_USERFPUINITDONE); 2232 critical_exit(); 2233} 2234 2235int 2236fill_dbregs(struct thread *td, struct dbreg *dbregs) 2237{ 2238 struct pcb *pcb; 2239 2240 if (td == NULL) { 2241 dbregs->dr[0] = rdr0(); 2242 dbregs->dr[1] = rdr1(); 2243 dbregs->dr[2] = rdr2(); 2244 dbregs->dr[3] = rdr3(); 2245 dbregs->dr[6] = rdr6(); 2246 dbregs->dr[7] = rdr7(); 2247 } else { 2248 pcb = td->td_pcb; 2249 dbregs->dr[0] = pcb->pcb_dr0; 2250 dbregs->dr[1] = pcb->pcb_dr1; 2251 dbregs->dr[2] = pcb->pcb_dr2; 2252 dbregs->dr[3] = pcb->pcb_dr3; 2253 dbregs->dr[6] = pcb->pcb_dr6; 2254 dbregs->dr[7] = pcb->pcb_dr7; 2255 } 2256 dbregs->dr[4] = 0; 2257 dbregs->dr[5] = 0; 2258 dbregs->dr[8] = 0; 2259 dbregs->dr[9] = 0; 2260 dbregs->dr[10] = 0; 2261 dbregs->dr[11] = 0; 2262 dbregs->dr[12] = 0; 2263 dbregs->dr[13] = 0; 2264 dbregs->dr[14] = 0; 2265 dbregs->dr[15] = 0; 2266 return (0); 2267} 2268 2269int 2270set_dbregs(struct thread *td, struct dbreg *dbregs) 2271{ 2272 struct pcb *pcb; 2273 int i; 2274 2275 if (td == NULL) { 2276 load_dr0(dbregs->dr[0]); 2277 load_dr1(dbregs->dr[1]); 2278 load_dr2(dbregs->dr[2]); 2279 load_dr3(dbregs->dr[3]); 2280 load_dr6(dbregs->dr[6]); 2281 load_dr7(dbregs->dr[7]); 2282 } else { 2283 /* 2284 * Don't let an illegal value for dr7 get set. Specifically, 2285 * check for undefined settings. Setting these bit patterns 2286 * result in undefined behaviour and can lead to an unexpected 2287 * TRCTRAP or a general protection fault right here. 2288 * Upper bits of dr6 and dr7 must not be set 2289 */ 2290 for (i = 0; i < 4; i++) { 2291 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02) 2292 return (EINVAL); 2293 if (td->td_frame->tf_cs == _ucode32sel && 2294 DBREG_DR7_LEN(dbregs->dr[7], i) == DBREG_DR7_LEN_8) 2295 return (EINVAL); 2296 } 2297 if ((dbregs->dr[6] & 0xffffffff00000000ul) != 0 || 2298 (dbregs->dr[7] & 0xffffffff00000000ul) != 0) 2299 return (EINVAL); 2300 2301 pcb = td->td_pcb; 2302 2303 /* 2304 * Don't let a process set a breakpoint that is not within the 2305 * process's address space. If a process could do this, it 2306 * could halt the system by setting a breakpoint in the kernel 2307 * (if ddb was enabled). Thus, we need to check to make sure 2308 * that no breakpoints are being enabled for addresses outside 2309 * process's address space. 2310 * 2311 * XXX - what about when the watched area of the user's 2312 * address space is written into from within the kernel 2313 * ... wouldn't that still cause a breakpoint to be generated 2314 * from within kernel mode? 2315 */ 2316 2317 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) { 2318 /* dr0 is enabled */ 2319 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS) 2320 return (EINVAL); 2321 } 2322 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) { 2323 /* dr1 is enabled */ 2324 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS) 2325 return (EINVAL); 2326 } 2327 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) { 2328 /* dr2 is enabled */ 2329 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS) 2330 return (EINVAL); 2331 } 2332 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) { 2333 /* dr3 is enabled */ 2334 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS) 2335 return (EINVAL); 2336 } 2337 2338 pcb->pcb_dr0 = dbregs->dr[0]; 2339 pcb->pcb_dr1 = dbregs->dr[1]; 2340 pcb->pcb_dr2 = dbregs->dr[2]; 2341 pcb->pcb_dr3 = dbregs->dr[3]; 2342 pcb->pcb_dr6 = dbregs->dr[6]; 2343 pcb->pcb_dr7 = dbregs->dr[7]; 2344 2345 set_pcb_flags(pcb, PCB_DBREGS); 2346 } 2347 2348 return (0); 2349} 2350 2351void 2352reset_dbregs(void) 2353{ 2354 2355 load_dr7(0); /* Turn off the control bits first */ 2356 load_dr0(0); 2357 load_dr1(0); 2358 load_dr2(0); 2359 load_dr3(0); 2360 load_dr6(0); 2361} 2362 2363/* 2364 * Return > 0 if a hardware breakpoint has been hit, and the 2365 * breakpoint was in user space. Return 0, otherwise. 2366 */ 2367int 2368user_dbreg_trap(void) 2369{ 2370 u_int64_t dr7, dr6; /* debug registers dr6 and dr7 */ 2371 u_int64_t bp; /* breakpoint bits extracted from dr6 */ 2372 int nbp; /* number of breakpoints that triggered */ 2373 caddr_t addr[4]; /* breakpoint addresses */ 2374 int i; 2375 2376 dr7 = rdr7(); 2377 if ((dr7 & 0x000000ff) == 0) { 2378 /* 2379 * all GE and LE bits in the dr7 register are zero, 2380 * thus the trap couldn't have been caused by the 2381 * hardware debug registers 2382 */ 2383 return 0; 2384 } 2385 2386 nbp = 0; 2387 dr6 = rdr6(); 2388 bp = dr6 & 0x0000000f; 2389 2390 if (!bp) { 2391 /* 2392 * None of the breakpoint bits are set meaning this 2393 * trap was not caused by any of the debug registers 2394 */ 2395 return 0; 2396 } 2397 2398 /* 2399 * at least one of the breakpoints were hit, check to see 2400 * which ones and if any of them are user space addresses 2401 */ 2402 2403 if (bp & 0x01) { 2404 addr[nbp++] = (caddr_t)rdr0(); 2405 } 2406 if (bp & 0x02) { 2407 addr[nbp++] = (caddr_t)rdr1(); 2408 } 2409 if (bp & 0x04) { 2410 addr[nbp++] = (caddr_t)rdr2(); 2411 } 2412 if (bp & 0x08) { 2413 addr[nbp++] = (caddr_t)rdr3(); 2414 } 2415 2416 for (i = 0; i < nbp; i++) { 2417 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) { 2418 /* 2419 * addr[i] is in user space 2420 */ 2421 return nbp; 2422 } 2423 } 2424 2425 /* 2426 * None of the breakpoints are in user space. 2427 */ 2428 return 0; 2429} 2430 2431#ifdef KDB 2432 2433/* 2434 * Provide inb() and outb() as functions. They are normally only available as 2435 * inline functions, thus cannot be called from the debugger. 2436 */ 2437 2438/* silence compiler warnings */ 2439u_char inb_(u_short); 2440void outb_(u_short, u_char); 2441 2442u_char 2443inb_(u_short port) 2444{ 2445 return inb(port); 2446} 2447 2448void 2449outb_(u_short port, u_char data) 2450{ 2451 outb(port, data); 2452} 2453 2454#endif /* KDB */ 2455