machdep.c revision 196412
1244769Sglebius/*- 2223637Sbz * Copyright (c) 2003 Peter Wemm. 3223637Sbz * Copyright (c) 1992 Terrence R. Lambert. 4223637Sbz * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 5223637Sbz * All rights reserved. 6223637Sbz * 7223637Sbz * This code is derived from software contributed to Berkeley by 8223637Sbz * William Jolitz. 9223637Sbz * 10223637Sbz * Redistribution and use in source and binary forms, with or without 11223637Sbz * modification, are permitted provided that the following conditions 12223637Sbz * are met: 13223637Sbz * 1. Redistributions of source code must retain the above copyright 14223637Sbz * notice, this list of conditions and the following disclaimer. 15223637Sbz * 2. Redistributions in binary form must reproduce the above copyright 16223637Sbz * notice, this list of conditions and the following disclaimer in the 17223637Sbz * documentation and/or other materials provided with the distribution. 18223637Sbz * 3. All advertising materials mentioning features or use of this software 19223637Sbz * must display the following acknowledgement: 20223637Sbz * This product includes software developed by the University of 21223637Sbz * California, Berkeley and its contributors. 22223637Sbz * 4. Neither the name of the University nor the names of its contributors 23223637Sbz * may be used to endorse or promote products derived from this software 24223637Sbz * without specific prior written permission. 25223637Sbz * 26223637Sbz * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27223637Sbz * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28223637Sbz * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29223637Sbz * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30223637Sbz * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31223637Sbz * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32223637Sbz * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33223637Sbz * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34244769Sglebius * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35223637Sbz * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36223637Sbz * SUCH DAMAGE. 37223637Sbz * 38223637Sbz * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 39223637Sbz */ 40223637Sbz 41240233Sglebius#include <sys/cdefs.h> 42240233Sglebius__FBSDID("$FreeBSD: head/sys/amd64/amd64/machdep.c 196412 2009-08-20 22:58:05Z jkim $"); 43223637Sbz 44223637Sbz#include "opt_atalk.h" 45257176Sglebius#include "opt_atpic.h" 46257176Sglebius#include "opt_compat.h" 47257176Sglebius#include "opt_cpu.h" 48223637Sbz#include "opt_ddb.h" 49223637Sbz#include "opt_inet.h" 50223637Sbz#include "opt_ipx.h" 51223637Sbz#include "opt_isa.h" 52257176Sglebius#include "opt_kstack_pages.h" 53223637Sbz#include "opt_maxmem.h" 54223637Sbz#include "opt_msgbuf.h" 55223637Sbz#include "opt_perfmon.h" 56223637Sbz#include "opt_sched.h" 57223637Sbz 58240233Sglebius#include <sys/param.h> 59223637Sbz#include <sys/proc.h> 60240233Sglebius#include <sys/systm.h> 61223637Sbz#include <sys/bio.h> 62223637Sbz#include <sys/buf.h> 63240641Sglebius#include <sys/bus.h> 64255143Sglebius#include <sys/callout.h> 65255143Sglebius#include <sys/cons.h> 66255143Sglebius#include <sys/cpu.h> 67223637Sbz#include <sys/eventhandler.h> 68223637Sbz#include <sys/exec.h> 69223637Sbz#include <sys/imgact.h> 70223637Sbz#include <sys/kdb.h> 71223637Sbz#include <sys/kernel.h> 72223637Sbz#include <sys/ktr.h> 73223637Sbz#include <sys/linker.h> 74223637Sbz#include <sys/lock.h> 75223637Sbz#include <sys/malloc.h> 76223637Sbz#include <sys/memrange.h> 77223637Sbz#include <sys/msgbuf.h> 78223637Sbz#include <sys/mutex.h> 79223637Sbz#include <sys/pcpu.h> 80223637Sbz#include <sys/ptrace.h> 81223637Sbz#include <sys/reboot.h> 82223637Sbz#include <sys/sched.h> 83223637Sbz#include <sys/signalvar.h> 84240233Sglebius#include <sys/sysctl.h> 85223637Sbz#include <sys/sysent.h> 86223637Sbz#include <sys/sysproto.h> 87223637Sbz#include <sys/ucontext.h> 88223637Sbz#include <sys/vmmeter.h> 89223637Sbz 90223637Sbz#include <vm/vm.h> 91223637Sbz#include <vm/vm_extern.h> 92223637Sbz#include <vm/vm_kern.h> 93223637Sbz#include <vm/vm_page.h> 94223637Sbz#include <vm/vm_map.h> 95223637Sbz#include <vm/vm_object.h> 96223637Sbz#include <vm/vm_pager.h> 97223637Sbz#include <vm/vm_param.h> 98223637Sbz 99223637Sbz#ifdef DDB 100223637Sbz#ifndef KDB 101223637Sbz#error KDB must be enabled in order for DDB to work! 102223637Sbz#endif 103223637Sbz#include <ddb/ddb.h> 104223637Sbz#include <ddb/db_sym.h> 105223637Sbz#endif 106223637Sbz 107223637Sbz#include <net/netisr.h> 108223637Sbz 109223637Sbz#include <machine/clock.h> 110223637Sbz#include <machine/cpu.h> 111223637Sbz#include <machine/cputypes.h> 112223637Sbz#include <machine/intr_machdep.h> 113223637Sbz#include <machine/mca.h> 114223637Sbz#include <machine/md_var.h> 115223637Sbz#include <machine/metadata.h> 116223637Sbz#include <machine/pc/bios.h> 117223637Sbz#include <machine/pcb.h> 118223637Sbz#include <machine/proc.h> 119223637Sbz#include <machine/reg.h> 120223637Sbz#include <machine/sigframe.h> 121223637Sbz#include <machine/specialreg.h> 122223637Sbz#ifdef PERFMON 123223637Sbz#include <machine/perfmon.h> 124223637Sbz#endif 125240233Sglebius#include <machine/tss.h> 126223637Sbz#ifdef SMP 127223637Sbz#include <machine/smp.h> 128240641Sglebius#endif 129240641Sglebius 130223637Sbz#ifdef DEV_ATPIC 131223637Sbz#include <amd64/isa/icu.h> 132223637Sbz#else 133223637Sbz#include <machine/apicvar.h> 134223637Sbz#endif 135223637Sbz 136223637Sbz#include <isa/isareg.h> 137223637Sbz#include <isa/rtc.h> 138223637Sbz 139223637Sbz/* Sanity check for __curthread() */ 140223637SbzCTASSERT(offsetof(struct pcpu, pc_curthread) == 0); 141223637Sbz 142223637Sbzextern u_int64_t hammer_time(u_int64_t, u_int64_t); 143223637Sbz 144223637Sbzextern void printcpuinfo(void); /* XXX header file */ 145223637Sbzextern void identify_cpu(void); 146223637Sbzextern void panicifcpuunsupported(void); 147223637Sbz 148223637Sbz#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 149223637Sbz#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 150223637Sbz 151223637Sbzstatic void cpu_startup(void *); 152223637Sbzstatic void get_fpcontext(struct thread *td, mcontext_t *mcp); 153223637Sbzstatic int set_fpcontext(struct thread *td, const mcontext_t *mcp); 154223637SbzSYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL); 155223637Sbz 156223637Sbz#ifdef DDB 157223637Sbzextern vm_offset_t ksym_start, ksym_end; 158223637Sbz#endif 159223637Sbz 160223637Sbz/* Intel ICH registers */ 161231852Sbz#define ICH_PMBASE 0x400 162223637Sbz#define ICH_SMI_EN ICH_PMBASE + 0x30 163223637Sbz 164223637Sbzint _udatasel, _ucodesel, _ucode32sel, _ufssel, _ugssel; 165223637Sbz 166223637Sbzint cold = 1; 167223637Sbz 168223637Sbzlong Maxmem = 0; 169231852Sbzlong realmem = 0; 170231852Sbz 171223637Sbz/* 172223637Sbz * The number of PHYSMAP entries must be one less than the number of 173231852Sbz * PHYSSEG entries because the PHYSMAP entry that spans the largest 174223637Sbz * physical address that is accessible by ISA DMA is split into two 175223637Sbz * PHYSSEG entries. 176223637Sbz */ 177223637Sbz#define PHYSMAP_SIZE (2 * (VM_PHYSSEG_MAX - 1)) 178223637Sbz 179240233Sglebiusvm_paddr_t phys_avail[PHYSMAP_SIZE + 2]; 180240233Sglebiusvm_paddr_t dump_avail[PHYSMAP_SIZE + 2]; 181223637Sbz 182223637Sbz/* must be 2 less so 0 0 can signal end of chunks */ 183223637Sbz#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(phys_avail[0])) - 2) 184223637Sbz#define DUMP_AVAIL_ARRAY_END ((sizeof(dump_avail) / sizeof(dump_avail[0])) - 2) 185223637Sbz 186223637Sbzstruct kva_md_info kmi; 187223637Sbz 188223637Sbzstatic struct trapframe proc0_tf; 189223637Sbzstruct region_descriptor r_gdt, r_idt; 190223637Sbz 191223637Sbzstruct pcpu __pcpu[MAXCPU]; 192223637Sbz 193223637Sbzstruct mtx icu_lock; 194240641Sglebius 195240641Sglebiusstruct mem_range_softc mem_range_softc; 196223637Sbz 197223637Sbzstruct mtx dt_lock; /* lock for GDT and LDT */ 198240641Sglebius 199240641Sglebiusstatic void 200223637Sbzcpu_startup(dummy) 201240233Sglebius void *dummy; 202240233Sglebius{ 203223637Sbz uintmax_t memsize; 204240233Sglebius char *sysenv; 205240233Sglebius 206240233Sglebius /* 207223637Sbz * On MacBooks, we need to disallow the legacy USB circuit to 208223637Sbz * generate an SMI# because this can cause several problems, 209223637Sbz * namely: incorrect CPU frequency detection and failure to 210223637Sbz * start the APs. 211223637Sbz * We do this by disabling a bit in the SMI_EN (SMI Control and 212223637Sbz * Enable register) of the Intel ICH LPC Interface Bridge. 213240233Sglebius */ 214223637Sbz sysenv = getenv("smbios.system.product"); 215255143Sglebius if (sysenv != NULL) { 216255143Sglebius if (strncmp(sysenv, "MacBook1,1", 10) == 0 || 217255143Sglebius strncmp(sysenv, "MacBook3,1", 10) == 0 || 218223637Sbz strncmp(sysenv, "MacBookPro1,1", 13) == 0 || 219223637Sbz strncmp(sysenv, "MacBookPro1,2", 13) == 0 || 220223637Sbz strncmp(sysenv, "MacBookPro3,1", 13) == 0 || 221223637Sbz strncmp(sysenv, "Macmini1,1", 10) == 0) { 222223637Sbz if (bootverbose) 223223637Sbz printf("Disabling LEGACY_USB_EN bit on " 224223637Sbz "Intel ICH.\n"); 225223637Sbz outl(ICH_SMI_EN, inl(ICH_SMI_EN) & ~0x8); 226264521Sglebius } 227223637Sbz freeenv(sysenv); 228223637Sbz } 229223637Sbz 230223637Sbz /* 231255143Sglebius * Good {morning,afternoon,evening,night}. 232255143Sglebius */ 233255143Sglebius startrtclock(); 234255143Sglebius printcpuinfo(); 235255143Sglebius panicifcpuunsupported(); 236255143Sglebius#ifdef PERFMON 237223637Sbz perfmon_init(); 238255143Sglebius#endif 239223637Sbz realmem = Maxmem; 240223637Sbz 241223637Sbz /* 242223637Sbz * Display physical memory if SMBIOS reports reasonable amount. 243223637Sbz */ 244223637Sbz memsize = 0; 245255143Sglebius sysenv = getenv("smbios.memory.enabled"); 246255143Sglebius if (sysenv != NULL) { 247255143Sglebius memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10; 248255143Sglebius freeenv(sysenv); 249255143Sglebius } 250255143Sglebius if (memsize < ptoa((uintmax_t)cnt.v_free_count)) 251255143Sglebius memsize = ptoa((uintmax_t)Maxmem); 252255143Sglebius printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20); 253223637Sbz 254255143Sglebius /* 255223637Sbz * Display any holes after the first chunk of extended memory. 256255143Sglebius */ 257223637Sbz if (bootverbose) { 258223637Sbz int indx; 259223637Sbz 260223637Sbz printf("Physical memory chunk(s):\n"); 261223637Sbz for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 262258133Sglebius vm_paddr_t size; 263223637Sbz 264223637Sbz size = phys_avail[indx + 1] - phys_avail[indx]; 265223637Sbz printf( 266223637Sbz "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n", 267223637Sbz (uintmax_t)phys_avail[indx], 268223637Sbz (uintmax_t)phys_avail[indx + 1] - 1, 269223637Sbz (uintmax_t)size, (uintmax_t)size / PAGE_SIZE); 270258133Sglebius } 271223637Sbz } 272223637Sbz 273255143Sglebius vm_ksubmap_init(&kmi); 274223637Sbz 275223637Sbz printf("avail memory = %ju (%ju MB)\n", 276223637Sbz ptoa((uintmax_t)cnt.v_free_count), 277223637Sbz ptoa((uintmax_t)cnt.v_free_count) / 1048576); 278223637Sbz 279223637Sbz /* 280223637Sbz * Set up buffers, so they can be used to read disk labels. 281255143Sglebius */ 282223637Sbz bufinit(); 283223637Sbz vm_pager_bufferinit(); 284223637Sbz 285223637Sbz cpu_setregs(); 286223637Sbz mca_init(); 287223637Sbz} 288223637Sbz 289223637Sbz/* 290223637Sbz * Send an interrupt to process. 291223637Sbz * 292223637Sbz * Stack is set up to allow sigcode stored 293223637Sbz * at top to call routine, followed by call 294223637Sbz * to sigreturn routine below. After sigreturn 295223637Sbz * resets the signal mask, the stack, and the 296223637Sbz * frame pointer, it returns to the user 297223637Sbz * specified pc, psl. 298223637Sbz */ 299223637Sbzvoid 300223637Sbzsendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask) 301223637Sbz{ 302223637Sbz struct sigframe sf, *sfp; 303223637Sbz struct proc *p; 304223637Sbz struct thread *td; 305223637Sbz struct sigacts *psp; 306223637Sbz char *sp; 307223637Sbz struct trapframe *regs; 308223637Sbz int sig; 309223637Sbz int oonstack; 310223637Sbz 311240233Sglebius td = curthread; 312223637Sbz p = td->td_proc; 313270023Sglebius PROC_LOCK_ASSERT(p, MA_OWNED); 314270023Sglebius sig = ksi->ksi_signo; 315223637Sbz psp = p->p_sigacts; 316270023Sglebius mtx_assert(&psp->ps_mtx, MA_OWNED); 317240233Sglebius regs = td->td_frame; 318270023Sglebius oonstack = sigonstack(regs->tf_rsp); 319270023Sglebius 320270023Sglebius /* Save user context. */ 321270023Sglebius bzero(&sf, sizeof(sf)); 322270023Sglebius sf.sf_uc.uc_sigmask = *mask; 323270023Sglebius sf.sf_uc.uc_stack = td->td_sigstk; 324270023Sglebius sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) 325270023Sglebius ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 326270023Sglebius sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 327270023Sglebius bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(*regs)); 328270023Sglebius sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */ 329270023Sglebius get_fpcontext(td, &sf.sf_uc.uc_mcontext); 330270023Sglebius fpstate_drop(td); 331223637Sbz sf.sf_uc.uc_mcontext.mc_fsbase = td->td_pcb->pcb_fsbase; 332270023Sglebius sf.sf_uc.uc_mcontext.mc_gsbase = td->td_pcb->pcb_gsbase; 333223637Sbz 334223637Sbz /* Allocate space for the signal handler context. */ 335270023Sglebius if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && 336270023Sglebius SIGISMEMBER(psp->ps_sigonstack, sig)) { 337223637Sbz sp = td->td_sigstk.ss_sp + 338223637Sbz td->td_sigstk.ss_size - sizeof(struct sigframe); 339223637Sbz#if defined(COMPAT_43) 340223637Sbz td->td_sigstk.ss_flags |= SS_ONSTACK; 341223637Sbz#endif 342223637Sbz } else 343223637Sbz sp = (char *)regs->tf_rsp - sizeof(struct sigframe) - 128; 344223637Sbz /* Align to 16 bytes. */ 345223637Sbz sfp = (struct sigframe *)((unsigned long)sp & ~0xFul); 346223637Sbz 347223637Sbz /* Translate the signal if appropriate. */ 348223637Sbz if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 349223637Sbz sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 350223637Sbz 351223637Sbz /* Build the argument list for the signal handler. */ 352223637Sbz regs->tf_rdi = sig; /* arg 1 in %rdi */ 353223637Sbz regs->tf_rdx = (register_t)&sfp->sf_uc; /* arg 3 in %rdx */ 354223637Sbz if (SIGISMEMBER(psp->ps_siginfo, sig)) { 355223637Sbz /* Signal handler installed with SA_SIGINFO. */ 356223637Sbz regs->tf_rsi = (register_t)&sfp->sf_si; /* arg 2 in %rsi */ 357223637Sbz sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 358223637Sbz 359223637Sbz /* Fill in POSIX parts */ 360223637Sbz sf.sf_si = ksi->ksi_info; 361223637Sbz sf.sf_si.si_signo = sig; /* maybe a translated signal */ 362223637Sbz regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */ 363223637Sbz } else { 364223637Sbz /* Old FreeBSD-style arguments. */ 365223637Sbz regs->tf_rsi = ksi->ksi_code; /* arg 2 in %rsi */ 366223637Sbz regs->tf_rcx = (register_t)ksi->ksi_addr; /* arg 4 in %rcx */ 367223637Sbz sf.sf_ahu.sf_handler = catcher; 368223637Sbz } 369223637Sbz mtx_unlock(&psp->ps_mtx); 370223637Sbz PROC_UNLOCK(p); 371223637Sbz 372223637Sbz /* 373223637Sbz * Copy the sigframe out to the user's stack. 374223637Sbz */ 375223637Sbz if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { 376223637Sbz#ifdef DEBUG 377223637Sbz printf("process %ld has trashed its stack\n", (long)p->p_pid); 378223637Sbz#endif 379223637Sbz PROC_LOCK(p); 380223637Sbz sigexit(td, SIGILL); 381223637Sbz } 382223637Sbz 383223637Sbz regs->tf_rsp = (long)sfp; 384223637Sbz regs->tf_rip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 385223637Sbz regs->tf_rflags &= ~(PSL_T | PSL_D); 386223637Sbz regs->tf_cs = _ucodesel; 387223637Sbz regs->tf_ds = _udatasel; 388223637Sbz regs->tf_es = _udatasel; 389223637Sbz regs->tf_fs = _ufssel; 390223637Sbz regs->tf_gs = _ugssel; 391223637Sbz regs->tf_flags = TF_HASSEGS; 392223637Sbz td->td_pcb->pcb_full_iret = 1; 393223637Sbz PROC_LOCK(p); 394223637Sbz mtx_lock(&psp->ps_mtx); 395223637Sbz} 396223637Sbz 397223637Sbz/* 398223637Sbz * System call to cleanup state after a signal 399223637Sbz * has been taken. Reset signal mask and 400223637Sbz * stack state from context left by sendsig (above). 401223637Sbz * Return to previous pc and psl as specified by 402223637Sbz * context left by sendsig. Check carefully to 403223637Sbz * make sure that the user has not modified the 404223637Sbz * state to gain improper privileges. 405223637Sbz * 406223637Sbz * MPSAFE 407223637Sbz */ 408223637Sbzint 409223637Sbzsigreturn(td, uap) 410223637Sbz struct thread *td; 411223637Sbz struct sigreturn_args /* { 412223637Sbz const struct __ucontext *sigcntxp; 413223637Sbz } */ *uap; 414223637Sbz{ 415223637Sbz ucontext_t uc; 416223637Sbz struct proc *p = td->td_proc; 417240233Sglebius struct trapframe *regs; 418240233Sglebius const ucontext_t *ucp; 419240233Sglebius long rflags; 420223637Sbz int cs, error, ret; 421223637Sbz ksiginfo_t ksi; 422223637Sbz 423240233Sglebius error = copyin(uap->sigcntxp, &uc, sizeof(uc)); 424223637Sbz if (error != 0) { 425240233Sglebius printf("sigreturn (pid %d): copyin failed\n", p->p_pid); 426240233Sglebius return (error); 427240233Sglebius } 428240233Sglebius ucp = &uc; 429240233Sglebius if ((ucp->uc_mcontext.mc_flags & ~_MC_FLAG_MASK) != 0) { 430240233Sglebius printf("sigreturn (pid %d): mc_flags %x\n", p->p_pid, 431240233Sglebius ucp->uc_mcontext.mc_flags); 432240233Sglebius return (EINVAL); 433240233Sglebius } 434240233Sglebius regs = td->td_frame; 435240233Sglebius rflags = ucp->uc_mcontext.mc_rflags; 436240233Sglebius /* 437240233Sglebius * Don't allow users to change privileged or reserved flags. 438240233Sglebius */ 439240233Sglebius /* 440240233Sglebius * XXX do allow users to change the privileged flag PSL_RF. 441240233Sglebius * The cpu sets PSL_RF in tf_rflags for faults. Debuggers 442240233Sglebius * should sometimes set it there too. tf_rflags is kept in 443240233Sglebius * the signal context during signal handling and there is no 444240233Sglebius * other place to remember it, so the PSL_RF bit may be 445240233Sglebius * corrupted by the signal handler without us knowing. 446240233Sglebius * Corruption of the PSL_RF bit at worst causes one more or 447240233Sglebius * one less debugger trap, so allowing it is fairly harmless. 448240233Sglebius */ 449223637Sbz if (!EFL_SECURE(rflags & ~PSL_RF, regs->tf_rflags & ~PSL_RF)) { 450223637Sbz printf("sigreturn (pid %d): rflags = 0x%lx\n", p->p_pid, 451240233Sglebius rflags); 452223637Sbz return (EINVAL); 453223637Sbz } 454223637Sbz 455240233Sglebius /* 456223637Sbz * Don't allow users to load a valid privileged %cs. Let the 457223637Sbz * hardware check for invalid selectors, excess privilege in 458223637Sbz * other selectors, invalid %eip's and invalid %esp's. 459223637Sbz */ 460223637Sbz cs = ucp->uc_mcontext.mc_cs; 461240233Sglebius if (!CS_SECURE(cs)) { 462223637Sbz printf("sigreturn (pid %d): cs = 0x%x\n", p->p_pid, cs); 463240233Sglebius ksiginfo_init_trap(&ksi); 464240233Sglebius ksi.ksi_signo = SIGBUS; 465223637Sbz ksi.ksi_code = BUS_OBJERR; 466223637Sbz ksi.ksi_trapno = T_PROTFLT; 467223637Sbz ksi.ksi_addr = (void *)regs->tf_rip; 468240233Sglebius trapsignal(td, &ksi); 469223637Sbz return (EINVAL); 470223637Sbz } 471223637Sbz 472223637Sbz ret = set_fpcontext(td, &ucp->uc_mcontext); 473223637Sbz if (ret != 0) { 474223637Sbz printf("sigreturn (pid %d): set_fpcontext\n", p->p_pid); 475223637Sbz return (ret); 476223637Sbz } 477240233Sglebius bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(*regs)); 478223637Sbz td->td_pcb->pcb_fsbase = ucp->uc_mcontext.mc_fsbase; 479223637Sbz td->td_pcb->pcb_gsbase = ucp->uc_mcontext.mc_gsbase; 480223637Sbz 481223637Sbz PROC_LOCK(p); 482223637Sbz#if defined(COMPAT_43) 483223637Sbz if (ucp->uc_mcontext.mc_onstack & 1) 484223637Sbz td->td_sigstk.ss_flags |= SS_ONSTACK; 485223637Sbz else 486223637Sbz td->td_sigstk.ss_flags &= ~SS_ONSTACK; 487223637Sbz#endif 488223637Sbz 489223637Sbz td->td_sigmask = ucp->uc_sigmask; 490223637Sbz SIG_CANTMASK(td->td_sigmask); 491223637Sbz signotify(td); 492223637Sbz PROC_UNLOCK(p); 493223637Sbz td->td_pcb->pcb_flags |= PCB_FULLCTX; 494223637Sbz td->td_pcb->pcb_full_iret = 1; 495240233Sglebius return (EJUSTRETURN); 496223637Sbz} 497223637Sbz 498223637Sbz#ifdef COMPAT_FREEBSD4 499223637Sbzint 500223637Sbzfreebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap) 501223637Sbz{ 502223637Sbz 503223637Sbz return sigreturn(td, (struct sigreturn_args *)uap); 504223637Sbz} 505223637Sbz#endif 506223637Sbz 507223637Sbz 508223637Sbz/* 509223637Sbz * Machine dependent boot() routine 510223637Sbz * 511223637Sbz * I haven't seen anything to put here yet 512223637Sbz * Possibly some stuff might be grafted back here from boot() 513223637Sbz */ 514223637Sbzvoid 515240641Sglebiuscpu_boot(int howto) 516223637Sbz{ 517223637Sbz} 518240233Sglebius 519240233Sglebius/* 520223637Sbz * Flush the D-cache for non-DMA I/O so that the I-cache can 521240233Sglebius * be made coherent later. 522240233Sglebius */ 523240233Sglebiusvoid 524223637Sbzcpu_flush_dcache(void *ptr, size_t len) 525223637Sbz{ 526223637Sbz /* Not applicable */ 527240641Sglebius} 528223637Sbz 529223637Sbz/* Get current clock frequency for the given cpu id. */ 530240641Sglebiusint 531240641Sglebiuscpu_est_clockrate(int cpu_id, uint64_t *rate) 532223637Sbz{ 533223637Sbz register_t reg; 534240641Sglebius uint64_t tsc1, tsc2; 535223637Sbz 536223637Sbz if (pcpu_find(cpu_id) == NULL || rate == NULL) 537240641Sglebius return (EINVAL); 538240641Sglebius 539223637Sbz /* If we're booting, trust the rate calibrated moments ago. */ 540223637Sbz if (cold) { 541240233Sglebius *rate = tsc_freq; 542240233Sglebius return (0); 543223637Sbz } 544240233Sglebius 545240233Sglebius#ifdef SMP 546240233Sglebius /* Schedule ourselves on the indicated cpu. */ 547240233Sglebius thread_lock(curthread); 548240233Sglebius sched_bind(curthread, cpu_id); 549240233Sglebius thread_unlock(curthread); 550223637Sbz#endif 551240233Sglebius 552240233Sglebius /* Calibrate by measuring a short delay. */ 553240233Sglebius reg = intr_disable(); 554240233Sglebius tsc1 = rdtsc(); 555240233Sglebius DELAY(1000); 556240233Sglebius tsc2 = rdtsc(); 557240233Sglebius intr_restore(reg); 558240233Sglebius 559240233Sglebius#ifdef SMP 560223637Sbz thread_lock(curthread); 561240233Sglebius sched_unbind(curthread); 562240233Sglebius thread_unlock(curthread); 563240233Sglebius#endif 564240233Sglebius 565240233Sglebius /* 566240233Sglebius * Calculate the difference in readings, convert to Mhz, and 567255143Sglebius * subtract 0.5% of the total. Empirical testing has shown that 568255143Sglebius * overhead in DELAY() works out to approximately this value. 569240233Sglebius */ 570240233Sglebius tsc2 -= tsc1; 571240233Sglebius *rate = tsc2 * 1000 - tsc2 * 5; 572240233Sglebius return (0); 573240233Sglebius} 574240233Sglebius 575240233Sglebius/* 576240233Sglebius * Shutdown the CPU as much as possible 577240233Sglebius */ 578240233Sglebiusvoid 579240233Sglebiuscpu_halt(void) 580240233Sglebius{ 581223637Sbz for (;;) 582240233Sglebius __asm__ ("hlt"); 583240233Sglebius} 584240233Sglebius 585240233Sglebiusvoid (*cpu_idle_hook)(void) = NULL; /* ACPI idle hook. */ 586240233Sglebius 587240233Sglebiusstatic void 588240233Sglebiuscpu_idle_hlt(int busy) 589240233Sglebius{ 590240233Sglebius /* 591240233Sglebius * we must absolutely guarentee that hlt is the next instruction 592223637Sbz * after sti or we introduce a timing window. 593223637Sbz */ 594240233Sglebius disable_intr(); 595240233Sglebius if (sched_runnable()) 596240233Sglebius enable_intr(); 597240233Sglebius else 598240233Sglebius __asm __volatile("sti; hlt"); 599240233Sglebius} 600240233Sglebius 601240233Sglebiusstatic void 602240233Sglebiuscpu_idle_acpi(int busy) 603240233Sglebius{ 604223637Sbz disable_intr(); 605240233Sglebius if (sched_runnable()) 606240233Sglebius enable_intr(); 607240233Sglebius else if (cpu_idle_hook) 608240233Sglebius cpu_idle_hook(); 609240233Sglebius else 610240233Sglebius __asm __volatile("sti; hlt"); 611240233Sglebius} 612240233Sglebius 613240233Sglebiusstatic int cpu_ident_amdc1e = 0; 614240233Sglebius 615240233Sglebiusstatic int 616240233Sglebiuscpu_probe_amdc1e(void) 617240233Sglebius{ 618240233Sglebius int i; 619223637Sbz 620240233Sglebius /* 621240233Sglebius * Forget it, if we're not using local APIC timer. 622240233Sglebius */ 623240233Sglebius if (resource_disabled("apic", 0) || 624223637Sbz (resource_int_value("apic", 0, "clock", &i) == 0 && i == 0)) 625223637Sbz return (0); 626240233Sglebius 627240233Sglebius /* 628240233Sglebius * Detect the presence of C1E capability mostly on latest 629240233Sglebius * dual-cores (or future) k8 family. 630240233Sglebius */ 631240233Sglebius if (cpu_vendor_id == CPU_VENDOR_AMD && 632240233Sglebius (cpu_id & 0x00000f00) == 0x00000f00 && 633240233Sglebius (cpu_id & 0x0fff0000) >= 0x00040000) { 634223637Sbz cpu_ident_amdc1e = 1; 635240233Sglebius return (1); 636240233Sglebius } 637240233Sglebius 638240233Sglebius return (0); 639223637Sbz} 640240233Sglebius 641240233Sglebius/* 642240233Sglebius * C1E renders the local APIC timer dead, so we disable it by 643240233Sglebius * reading the Interrupt Pending Message register and clearing 644240233Sglebius * both C1eOnCmpHalt (bit 28) and SmiOnCmpHalt (bit 27). 645240233Sglebius * 646240233Sglebius * Reference: 647240233Sglebius * "BIOS and Kernel Developer's Guide for AMD NPT Family 0Fh Processors" 648223637Sbz * #32559 revision 3.00+ 649240233Sglebius */ 650240233Sglebius#define MSR_AMDK8_IPM 0xc0010055 651223637Sbz#define AMDK8_SMIONCMPHALT (1ULL << 27) 652240233Sglebius#define AMDK8_C1EONCMPHALT (1ULL << 28) 653240233Sglebius#define AMDK8_CMPHALT (AMDK8_SMIONCMPHALT | AMDK8_C1EONCMPHALT) 654240233Sglebius 655223637Sbzstatic void 656240233Sglebiuscpu_idle_amdc1e(int busy) 657240233Sglebius{ 658240233Sglebius 659240233Sglebius disable_intr(); 660240233Sglebius if (sched_runnable()) 661240233Sglebius enable_intr(); 662240233Sglebius else { 663223637Sbz uint64_t msr; 664240233Sglebius 665240233Sglebius msr = rdmsr(MSR_AMDK8_IPM); 666240233Sglebius if (msr & AMDK8_CMPHALT) 667223637Sbz wrmsr(MSR_AMDK8_IPM, msr & ~AMDK8_CMPHALT); 668240233Sglebius 669240233Sglebius if (cpu_idle_hook) 670240233Sglebius cpu_idle_hook(); 671240233Sglebius else 672240233Sglebius __asm __volatile("sti; hlt"); 673240233Sglebius } 674240233Sglebius} 675240233Sglebius 676260377Sglebiusstatic void 677240233Sglebiuscpu_idle_spin(int busy) 678240233Sglebius{ 679223637Sbz return; 680} 681 682void (*cpu_idle_fn)(int) = cpu_idle_acpi; 683 684void 685cpu_idle(int busy) 686{ 687#ifdef SMP 688 if (mp_grab_cpu_hlt()) 689 return; 690#endif 691 cpu_idle_fn(busy); 692} 693 694/* 695 * mwait cpu power states. Lower 4 bits are sub-states. 696 */ 697#define MWAIT_C0 0xf0 698#define MWAIT_C1 0x00 699#define MWAIT_C2 0x10 700#define MWAIT_C3 0x20 701#define MWAIT_C4 0x30 702 703#define MWAIT_DISABLED 0x0 704#define MWAIT_WOKEN 0x1 705#define MWAIT_WAITING 0x2 706 707static void 708cpu_idle_mwait(int busy) 709{ 710 int *mwait; 711 712 mwait = (int *)PCPU_PTR(monitorbuf); 713 *mwait = MWAIT_WAITING; 714 if (sched_runnable()) 715 return; 716 cpu_monitor(mwait, 0, 0); 717 if (*mwait == MWAIT_WAITING) 718 cpu_mwait(0, MWAIT_C1); 719} 720 721static void 722cpu_idle_mwait_hlt(int busy) 723{ 724 int *mwait; 725 726 mwait = (int *)PCPU_PTR(monitorbuf); 727 if (busy == 0) { 728 *mwait = MWAIT_DISABLED; 729 cpu_idle_hlt(busy); 730 return; 731 } 732 *mwait = MWAIT_WAITING; 733 if (sched_runnable()) 734 return; 735 cpu_monitor(mwait, 0, 0); 736 if (*mwait == MWAIT_WAITING) 737 cpu_mwait(0, MWAIT_C1); 738} 739 740int 741cpu_idle_wakeup(int cpu) 742{ 743 struct pcpu *pcpu; 744 int *mwait; 745 746 if (cpu_idle_fn == cpu_idle_spin) 747 return (1); 748 if (cpu_idle_fn != cpu_idle_mwait && cpu_idle_fn != cpu_idle_mwait_hlt) 749 return (0); 750 pcpu = pcpu_find(cpu); 751 mwait = (int *)pcpu->pc_monitorbuf; 752 /* 753 * This doesn't need to be atomic since missing the race will 754 * simply result in unnecessary IPIs. 755 */ 756 if (cpu_idle_fn == cpu_idle_mwait_hlt && *mwait == MWAIT_DISABLED) 757 return (0); 758 *mwait = MWAIT_WOKEN; 759 760 return (1); 761} 762 763/* 764 * Ordered by speed/power consumption. 765 */ 766struct { 767 void *id_fn; 768 char *id_name; 769} idle_tbl[] = { 770 { cpu_idle_spin, "spin" }, 771 { cpu_idle_mwait, "mwait" }, 772 { cpu_idle_mwait_hlt, "mwait_hlt" }, 773 { cpu_idle_amdc1e, "amdc1e" }, 774 { cpu_idle_hlt, "hlt" }, 775 { cpu_idle_acpi, "acpi" }, 776 { NULL, NULL } 777}; 778 779static int 780idle_sysctl_available(SYSCTL_HANDLER_ARGS) 781{ 782 char *avail, *p; 783 int error; 784 int i; 785 786 avail = malloc(256, M_TEMP, M_WAITOK); 787 p = avail; 788 for (i = 0; idle_tbl[i].id_name != NULL; i++) { 789 if (strstr(idle_tbl[i].id_name, "mwait") && 790 (cpu_feature2 & CPUID2_MON) == 0) 791 continue; 792 if (strcmp(idle_tbl[i].id_name, "amdc1e") == 0 && 793 cpu_ident_amdc1e == 0) 794 continue; 795 p += sprintf(p, "%s, ", idle_tbl[i].id_name); 796 } 797 error = sysctl_handle_string(oidp, avail, 0, req); 798 free(avail, M_TEMP); 799 return (error); 800} 801 802static int 803idle_sysctl(SYSCTL_HANDLER_ARGS) 804{ 805 char buf[16]; 806 int error; 807 char *p; 808 int i; 809 810 p = "unknown"; 811 for (i = 0; idle_tbl[i].id_name != NULL; i++) { 812 if (idle_tbl[i].id_fn == cpu_idle_fn) { 813 p = idle_tbl[i].id_name; 814 break; 815 } 816 } 817 strncpy(buf, p, sizeof(buf)); 818 error = sysctl_handle_string(oidp, buf, sizeof(buf), req); 819 if (error != 0 || req->newptr == NULL) 820 return (error); 821 for (i = 0; idle_tbl[i].id_name != NULL; i++) { 822 if (strstr(idle_tbl[i].id_name, "mwait") && 823 (cpu_feature2 & CPUID2_MON) == 0) 824 continue; 825 if (strcmp(idle_tbl[i].id_name, "amdc1e") == 0 && 826 cpu_ident_amdc1e == 0) 827 continue; 828 if (strcmp(idle_tbl[i].id_name, buf)) 829 continue; 830 cpu_idle_fn = idle_tbl[i].id_fn; 831 return (0); 832 } 833 return (EINVAL); 834} 835 836SYSCTL_PROC(_machdep, OID_AUTO, idle_available, CTLTYPE_STRING | CTLFLAG_RD, 837 0, 0, idle_sysctl_available, "A", "list of available idle functions"); 838 839SYSCTL_PROC(_machdep, OID_AUTO, idle, CTLTYPE_STRING | CTLFLAG_RW, 0, 0, 840 idle_sysctl, "A", "currently selected idle function"); 841 842/* 843 * Reset registers to default values on exec. 844 */ 845void 846exec_setregs(td, entry, stack, ps_strings) 847 struct thread *td; 848 u_long entry; 849 u_long stack; 850 u_long ps_strings; 851{ 852 struct trapframe *regs = td->td_frame; 853 struct pcb *pcb = td->td_pcb; 854 855 mtx_lock(&dt_lock); 856 if (td->td_proc->p_md.md_ldt != NULL) 857 user_ldt_free(td); 858 else 859 mtx_unlock(&dt_lock); 860 861 pcb->pcb_fsbase = 0; 862 pcb->pcb_gsbase = 0; 863 pcb->pcb_flags &= ~(PCB_32BIT | PCB_GS32BIT); 864 pcb->pcb_initial_fpucw = __INITIAL_FPUCW__; 865 pcb->pcb_full_iret = 1; 866 867 bzero((char *)regs, sizeof(struct trapframe)); 868 regs->tf_rip = entry; 869 regs->tf_rsp = ((stack - 8) & ~0xFul) + 8; 870 regs->tf_rdi = stack; /* argv */ 871 regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T); 872 regs->tf_ss = _udatasel; 873 regs->tf_cs = _ucodesel; 874 regs->tf_ds = _udatasel; 875 regs->tf_es = _udatasel; 876 regs->tf_fs = _ufssel; 877 regs->tf_gs = _ugssel; 878 regs->tf_flags = TF_HASSEGS; 879 880 /* 881 * Reset the hardware debug registers if they were in use. 882 * They won't have any meaning for the newly exec'd process. 883 */ 884 if (pcb->pcb_flags & PCB_DBREGS) { 885 pcb->pcb_dr0 = 0; 886 pcb->pcb_dr1 = 0; 887 pcb->pcb_dr2 = 0; 888 pcb->pcb_dr3 = 0; 889 pcb->pcb_dr6 = 0; 890 pcb->pcb_dr7 = 0; 891 if (pcb == PCPU_GET(curpcb)) { 892 /* 893 * Clear the debug registers on the running 894 * CPU, otherwise they will end up affecting 895 * the next process we switch to. 896 */ 897 reset_dbregs(); 898 } 899 pcb->pcb_flags &= ~PCB_DBREGS; 900 } 901 902 /* 903 * Drop the FP state if we hold it, so that the process gets a 904 * clean FP state if it uses the FPU again. 905 */ 906 fpstate_drop(td); 907} 908 909void 910cpu_setregs(void) 911{ 912 register_t cr0; 913 914 cr0 = rcr0(); 915 /* 916 * CR0_MP, CR0_NE and CR0_TS are also set by npx_probe() for the 917 * BSP. See the comments there about why we set them. 918 */ 919 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM; 920 load_cr0(cr0); 921} 922 923/* 924 * Initialize amd64 and configure to run kernel 925 */ 926 927/* 928 * Initialize segments & interrupt table 929 */ 930 931struct user_segment_descriptor gdt[NGDT * MAXCPU];/* global descriptor tables */ 932static struct gate_descriptor idt0[NIDT]; 933struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 934 935static char dblfault_stack[PAGE_SIZE] __aligned(16); 936 937static char nmi0_stack[PAGE_SIZE] __aligned(16); 938CTASSERT(sizeof(struct nmi_pcpu) == 16); 939 940struct amd64tss common_tss[MAXCPU]; 941 942/* 943 * Software prototypes -- in more palatable form. 944 * 945 * Keep GUFS32, GUGS32, GUCODE32 and GUDATA at the same 946 * slots as corresponding segments for i386 kernel. 947 */ 948struct soft_segment_descriptor gdt_segs[] = { 949/* GNULL_SEL 0 Null Descriptor */ 950{ .ssd_base = 0x0, 951 .ssd_limit = 0x0, 952 .ssd_type = 0, 953 .ssd_dpl = 0, 954 .ssd_p = 0, 955 .ssd_long = 0, 956 .ssd_def32 = 0, 957 .ssd_gran = 0 }, 958/* GNULL2_SEL 1 Null Descriptor */ 959{ .ssd_base = 0x0, 960 .ssd_limit = 0x0, 961 .ssd_type = 0, 962 .ssd_dpl = 0, 963 .ssd_p = 0, 964 .ssd_long = 0, 965 .ssd_def32 = 0, 966 .ssd_gran = 0 }, 967/* GUFS32_SEL 2 32 bit %gs Descriptor for user */ 968{ .ssd_base = 0x0, 969 .ssd_limit = 0xfffff, 970 .ssd_type = SDT_MEMRWA, 971 .ssd_dpl = SEL_UPL, 972 .ssd_p = 1, 973 .ssd_long = 0, 974 .ssd_def32 = 1, 975 .ssd_gran = 1 }, 976/* GUGS32_SEL 3 32 bit %fs Descriptor for user */ 977{ .ssd_base = 0x0, 978 .ssd_limit = 0xfffff, 979 .ssd_type = SDT_MEMRWA, 980 .ssd_dpl = SEL_UPL, 981 .ssd_p = 1, 982 .ssd_long = 0, 983 .ssd_def32 = 1, 984 .ssd_gran = 1 }, 985/* GCODE_SEL 4 Code Descriptor for kernel */ 986{ .ssd_base = 0x0, 987 .ssd_limit = 0xfffff, 988 .ssd_type = SDT_MEMERA, 989 .ssd_dpl = SEL_KPL, 990 .ssd_p = 1, 991 .ssd_long = 1, 992 .ssd_def32 = 0, 993 .ssd_gran = 1 }, 994/* GDATA_SEL 5 Data Descriptor for kernel */ 995{ .ssd_base = 0x0, 996 .ssd_limit = 0xfffff, 997 .ssd_type = SDT_MEMRWA, 998 .ssd_dpl = SEL_KPL, 999 .ssd_p = 1, 1000 .ssd_long = 1, 1001 .ssd_def32 = 0, 1002 .ssd_gran = 1 }, 1003/* GUCODE32_SEL 6 32 bit Code Descriptor for user */ 1004{ .ssd_base = 0x0, 1005 .ssd_limit = 0xfffff, 1006 .ssd_type = SDT_MEMERA, 1007 .ssd_dpl = SEL_UPL, 1008 .ssd_p = 1, 1009 .ssd_long = 0, 1010 .ssd_def32 = 1, 1011 .ssd_gran = 1 }, 1012/* GUDATA_SEL 7 32/64 bit Data Descriptor for user */ 1013{ .ssd_base = 0x0, 1014 .ssd_limit = 0xfffff, 1015 .ssd_type = SDT_MEMRWA, 1016 .ssd_dpl = SEL_UPL, 1017 .ssd_p = 1, 1018 .ssd_long = 0, 1019 .ssd_def32 = 1, 1020 .ssd_gran = 1 }, 1021/* GUCODE_SEL 8 64 bit Code Descriptor for user */ 1022{ .ssd_base = 0x0, 1023 .ssd_limit = 0xfffff, 1024 .ssd_type = SDT_MEMERA, 1025 .ssd_dpl = SEL_UPL, 1026 .ssd_p = 1, 1027 .ssd_long = 1, 1028 .ssd_def32 = 0, 1029 .ssd_gran = 1 }, 1030/* GPROC0_SEL 9 Proc 0 Tss Descriptor */ 1031{ .ssd_base = 0x0, 1032 .ssd_limit = sizeof(struct amd64tss) + IOPAGES * PAGE_SIZE - 1, 1033 .ssd_type = SDT_SYSTSS, 1034 .ssd_dpl = SEL_KPL, 1035 .ssd_p = 1, 1036 .ssd_long = 0, 1037 .ssd_def32 = 0, 1038 .ssd_gran = 0 }, 1039/* Actually, the TSS is a system descriptor which is double size */ 1040{ .ssd_base = 0x0, 1041 .ssd_limit = 0x0, 1042 .ssd_type = 0, 1043 .ssd_dpl = 0, 1044 .ssd_p = 0, 1045 .ssd_long = 0, 1046 .ssd_def32 = 0, 1047 .ssd_gran = 0 }, 1048/* GUSERLDT_SEL 11 LDT Descriptor */ 1049{ .ssd_base = 0x0, 1050 .ssd_limit = 0x0, 1051 .ssd_type = 0, 1052 .ssd_dpl = 0, 1053 .ssd_p = 0, 1054 .ssd_long = 0, 1055 .ssd_def32 = 0, 1056 .ssd_gran = 0 }, 1057/* GUSERLDT_SEL 12 LDT Descriptor, double size */ 1058{ .ssd_base = 0x0, 1059 .ssd_limit = 0x0, 1060 .ssd_type = 0, 1061 .ssd_dpl = 0, 1062 .ssd_p = 0, 1063 .ssd_long = 0, 1064 .ssd_def32 = 0, 1065 .ssd_gran = 0 }, 1066}; 1067 1068void 1069setidt(idx, func, typ, dpl, ist) 1070 int idx; 1071 inthand_t *func; 1072 int typ; 1073 int dpl; 1074 int ist; 1075{ 1076 struct gate_descriptor *ip; 1077 1078 ip = idt + idx; 1079 ip->gd_looffset = (uintptr_t)func; 1080 ip->gd_selector = GSEL(GCODE_SEL, SEL_KPL); 1081 ip->gd_ist = ist; 1082 ip->gd_xx = 0; 1083 ip->gd_type = typ; 1084 ip->gd_dpl = dpl; 1085 ip->gd_p = 1; 1086 ip->gd_hioffset = ((uintptr_t)func)>>16 ; 1087} 1088 1089extern inthand_t 1090 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 1091 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 1092 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 1093 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 1094 IDTVEC(xmm), IDTVEC(dblfault), 1095 IDTVEC(fast_syscall), IDTVEC(fast_syscall32); 1096 1097#ifdef DDB 1098/* 1099 * Display the index and function name of any IDT entries that don't use 1100 * the default 'rsvd' entry point. 1101 */ 1102DB_SHOW_COMMAND(idt, db_show_idt) 1103{ 1104 struct gate_descriptor *ip; 1105 int idx; 1106 uintptr_t func; 1107 1108 ip = idt; 1109 for (idx = 0; idx < NIDT && !db_pager_quit; idx++) { 1110 func = ((long)ip->gd_hioffset << 16 | ip->gd_looffset); 1111 if (func != (uintptr_t)&IDTVEC(rsvd)) { 1112 db_printf("%3d\t", idx); 1113 db_printsym(func, DB_STGY_PROC); 1114 db_printf("\n"); 1115 } 1116 ip++; 1117 } 1118} 1119#endif 1120 1121void 1122sdtossd(sd, ssd) 1123 struct user_segment_descriptor *sd; 1124 struct soft_segment_descriptor *ssd; 1125{ 1126 1127 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 1128 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 1129 ssd->ssd_type = sd->sd_type; 1130 ssd->ssd_dpl = sd->sd_dpl; 1131 ssd->ssd_p = sd->sd_p; 1132 ssd->ssd_long = sd->sd_long; 1133 ssd->ssd_def32 = sd->sd_def32; 1134 ssd->ssd_gran = sd->sd_gran; 1135} 1136 1137void 1138ssdtosd(ssd, sd) 1139 struct soft_segment_descriptor *ssd; 1140 struct user_segment_descriptor *sd; 1141{ 1142 1143 sd->sd_lobase = (ssd->ssd_base) & 0xffffff; 1144 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xff; 1145 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff; 1146 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf; 1147 sd->sd_type = ssd->ssd_type; 1148 sd->sd_dpl = ssd->ssd_dpl; 1149 sd->sd_p = ssd->ssd_p; 1150 sd->sd_long = ssd->ssd_long; 1151 sd->sd_def32 = ssd->ssd_def32; 1152 sd->sd_gran = ssd->ssd_gran; 1153} 1154 1155void 1156ssdtosyssd(ssd, sd) 1157 struct soft_segment_descriptor *ssd; 1158 struct system_segment_descriptor *sd; 1159{ 1160 1161 sd->sd_lobase = (ssd->ssd_base) & 0xffffff; 1162 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xfffffffffful; 1163 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff; 1164 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf; 1165 sd->sd_type = ssd->ssd_type; 1166 sd->sd_dpl = ssd->ssd_dpl; 1167 sd->sd_p = ssd->ssd_p; 1168 sd->sd_gran = ssd->ssd_gran; 1169} 1170 1171#if !defined(DEV_ATPIC) && defined(DEV_ISA) 1172#include <isa/isavar.h> 1173#include <isa/isareg.h> 1174/* 1175 * Return a bitmap of the current interrupt requests. This is 8259-specific 1176 * and is only suitable for use at probe time. 1177 * This is only here to pacify sio. It is NOT FATAL if this doesn't work. 1178 * It shouldn't be here. There should probably be an APIC centric 1179 * implementation in the apic driver code, if at all. 1180 */ 1181intrmask_t 1182isa_irq_pending(void) 1183{ 1184 u_char irr1; 1185 u_char irr2; 1186 1187 irr1 = inb(IO_ICU1); 1188 irr2 = inb(IO_ICU2); 1189 return ((irr2 << 8) | irr1); 1190} 1191#endif 1192 1193u_int basemem; 1194 1195/* 1196 * Populate the (physmap) array with base/bound pairs describing the 1197 * available physical memory in the system, then test this memory and 1198 * build the phys_avail array describing the actually-available memory. 1199 * 1200 * If we cannot accurately determine the physical memory map, then use 1201 * value from the 0xE801 call, and failing that, the RTC. 1202 * 1203 * Total memory size may be set by the kernel environment variable 1204 * hw.physmem or the compile-time define MAXMEM. 1205 * 1206 * XXX first should be vm_paddr_t. 1207 */ 1208static void 1209getmemsize(caddr_t kmdp, u_int64_t first) 1210{ 1211 int i, off, physmap_idx, pa_indx, da_indx; 1212 vm_paddr_t pa, physmap[PHYSMAP_SIZE]; 1213 u_long physmem_tunable; 1214 pt_entry_t *pte; 1215 struct bios_smap *smapbase, *smap, *smapend; 1216 u_int32_t smapsize; 1217 quad_t dcons_addr, dcons_size; 1218 1219 bzero(physmap, sizeof(physmap)); 1220 basemem = 0; 1221 physmap_idx = 0; 1222 1223 /* 1224 * get memory map from INT 15:E820, kindly supplied by the loader. 1225 * 1226 * subr_module.c says: 1227 * "Consumer may safely assume that size value precedes data." 1228 * ie: an int32_t immediately precedes smap. 1229 */ 1230 smapbase = (struct bios_smap *)preload_search_info(kmdp, 1231 MODINFO_METADATA | MODINFOMD_SMAP); 1232 if (smapbase == NULL) 1233 panic("No BIOS smap info from loader!"); 1234 1235 smapsize = *((u_int32_t *)smapbase - 1); 1236 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize); 1237 1238 for (smap = smapbase; smap < smapend; smap++) { 1239 if (boothowto & RB_VERBOSE) 1240 printf("SMAP type=%02x base=%016lx len=%016lx\n", 1241 smap->type, smap->base, smap->length); 1242 1243 if (smap->type != SMAP_TYPE_MEMORY) 1244 continue; 1245 1246 if (smap->length == 0) 1247 continue; 1248 1249 for (i = 0; i <= physmap_idx; i += 2) { 1250 if (smap->base < physmap[i + 1]) { 1251 if (boothowto & RB_VERBOSE) 1252 printf( 1253 "Overlapping or non-monotonic memory region, ignoring second region\n"); 1254 continue; 1255 } 1256 } 1257 1258 if (smap->base == physmap[physmap_idx + 1]) { 1259 physmap[physmap_idx + 1] += smap->length; 1260 continue; 1261 } 1262 1263 physmap_idx += 2; 1264 if (physmap_idx == PHYSMAP_SIZE) { 1265 printf( 1266 "Too many segments in the physical address map, giving up\n"); 1267 break; 1268 } 1269 physmap[physmap_idx] = smap->base; 1270 physmap[physmap_idx + 1] = smap->base + smap->length; 1271 } 1272 1273 /* 1274 * Find the 'base memory' segment for SMP 1275 */ 1276 basemem = 0; 1277 for (i = 0; i <= physmap_idx; i += 2) { 1278 if (physmap[i] == 0x00000000) { 1279 basemem = physmap[i + 1] / 1024; 1280 break; 1281 } 1282 } 1283 if (basemem == 0) 1284 panic("BIOS smap did not include a basemem segment!"); 1285 1286#ifdef SMP 1287 /* make hole for AP bootstrap code */ 1288 physmap[1] = mp_bootaddress(physmap[1] / 1024); 1289#endif 1290 1291 /* 1292 * Maxmem isn't the "maximum memory", it's one larger than the 1293 * highest page of the physical address space. It should be 1294 * called something like "Maxphyspage". We may adjust this 1295 * based on ``hw.physmem'' and the results of the memory test. 1296 */ 1297 Maxmem = atop(physmap[physmap_idx + 1]); 1298 1299#ifdef MAXMEM 1300 Maxmem = MAXMEM / 4; 1301#endif 1302 1303 if (TUNABLE_ULONG_FETCH("hw.physmem", &physmem_tunable)) 1304 Maxmem = atop(physmem_tunable); 1305 1306 /* 1307 * Don't allow MAXMEM or hw.physmem to extend the amount of memory 1308 * in the system. 1309 */ 1310 if (Maxmem > atop(physmap[physmap_idx + 1])) 1311 Maxmem = atop(physmap[physmap_idx + 1]); 1312 1313 if (atop(physmap[physmap_idx + 1]) != Maxmem && 1314 (boothowto & RB_VERBOSE)) 1315 printf("Physical memory use set to %ldK\n", Maxmem * 4); 1316 1317 /* call pmap initialization to make new kernel address space */ 1318 pmap_bootstrap(&first); 1319 1320 /* 1321 * Size up each available chunk of physical memory. 1322 */ 1323 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 1324 pa_indx = 0; 1325 da_indx = 1; 1326 phys_avail[pa_indx++] = physmap[0]; 1327 phys_avail[pa_indx] = physmap[0]; 1328 dump_avail[da_indx] = physmap[0]; 1329 pte = CMAP1; 1330 1331 /* 1332 * Get dcons buffer address 1333 */ 1334 if (getenv_quad("dcons.addr", &dcons_addr) == 0 || 1335 getenv_quad("dcons.size", &dcons_size) == 0) 1336 dcons_addr = 0; 1337 1338 /* 1339 * physmap is in bytes, so when converting to page boundaries, 1340 * round up the start address and round down the end address. 1341 */ 1342 for (i = 0; i <= physmap_idx; i += 2) { 1343 vm_paddr_t end; 1344 1345 end = ptoa((vm_paddr_t)Maxmem); 1346 if (physmap[i + 1] < end) 1347 end = trunc_page(physmap[i + 1]); 1348 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 1349 int tmp, page_bad, full; 1350 int *ptr = (int *)CADDR1; 1351 1352 full = FALSE; 1353 /* 1354 * block out kernel memory as not available. 1355 */ 1356 if (pa >= 0x100000 && pa < first) 1357 goto do_dump_avail; 1358 1359 /* 1360 * block out dcons buffer 1361 */ 1362 if (dcons_addr > 0 1363 && pa >= trunc_page(dcons_addr) 1364 && pa < dcons_addr + dcons_size) 1365 goto do_dump_avail; 1366 1367 page_bad = FALSE; 1368 1369 /* 1370 * map page into kernel: valid, read/write,non-cacheable 1371 */ 1372 *pte = pa | PG_V | PG_RW | PG_N; 1373 invltlb(); 1374 1375 tmp = *(int *)ptr; 1376 /* 1377 * Test for alternating 1's and 0's 1378 */ 1379 *(volatile int *)ptr = 0xaaaaaaaa; 1380 if (*(volatile int *)ptr != 0xaaaaaaaa) 1381 page_bad = TRUE; 1382 /* 1383 * Test for alternating 0's and 1's 1384 */ 1385 *(volatile int *)ptr = 0x55555555; 1386 if (*(volatile int *)ptr != 0x55555555) 1387 page_bad = TRUE; 1388 /* 1389 * Test for all 1's 1390 */ 1391 *(volatile int *)ptr = 0xffffffff; 1392 if (*(volatile int *)ptr != 0xffffffff) 1393 page_bad = TRUE; 1394 /* 1395 * Test for all 0's 1396 */ 1397 *(volatile int *)ptr = 0x0; 1398 if (*(volatile int *)ptr != 0x0) 1399 page_bad = TRUE; 1400 /* 1401 * Restore original value. 1402 */ 1403 *(int *)ptr = tmp; 1404 1405 /* 1406 * Adjust array of valid/good pages. 1407 */ 1408 if (page_bad == TRUE) 1409 continue; 1410 /* 1411 * If this good page is a continuation of the 1412 * previous set of good pages, then just increase 1413 * the end pointer. Otherwise start a new chunk. 1414 * Note that "end" points one higher than end, 1415 * making the range >= start and < end. 1416 * If we're also doing a speculative memory 1417 * test and we at or past the end, bump up Maxmem 1418 * so that we keep going. The first bad page 1419 * will terminate the loop. 1420 */ 1421 if (phys_avail[pa_indx] == pa) { 1422 phys_avail[pa_indx] += PAGE_SIZE; 1423 } else { 1424 pa_indx++; 1425 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1426 printf( 1427 "Too many holes in the physical address space, giving up\n"); 1428 pa_indx--; 1429 full = TRUE; 1430 goto do_dump_avail; 1431 } 1432 phys_avail[pa_indx++] = pa; /* start */ 1433 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1434 } 1435 physmem++; 1436do_dump_avail: 1437 if (dump_avail[da_indx] == pa) { 1438 dump_avail[da_indx] += PAGE_SIZE; 1439 } else { 1440 da_indx++; 1441 if (da_indx == DUMP_AVAIL_ARRAY_END) { 1442 da_indx--; 1443 goto do_next; 1444 } 1445 dump_avail[da_indx++] = pa; /* start */ 1446 dump_avail[da_indx] = pa + PAGE_SIZE; /* end */ 1447 } 1448do_next: 1449 if (full) 1450 break; 1451 } 1452 } 1453 *pte = 0; 1454 invltlb(); 1455 1456 /* 1457 * XXX 1458 * The last chunk must contain at least one page plus the message 1459 * buffer to avoid complicating other code (message buffer address 1460 * calculation, etc.). 1461 */ 1462 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1463 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { 1464 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1465 phys_avail[pa_indx--] = 0; 1466 phys_avail[pa_indx--] = 0; 1467 } 1468 1469 Maxmem = atop(phys_avail[pa_indx]); 1470 1471 /* Trim off space for the message buffer. */ 1472 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); 1473 1474 /* Map the message buffer. */ 1475 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) 1476 pmap_kenter((vm_offset_t)msgbufp + off, phys_avail[pa_indx] + 1477 off); 1478} 1479 1480u_int64_t 1481hammer_time(u_int64_t modulep, u_int64_t physfree) 1482{ 1483 caddr_t kmdp; 1484 int gsel_tss, x; 1485 struct pcpu *pc; 1486 struct nmi_pcpu *np; 1487 u_int64_t msr; 1488 char *env; 1489 1490 thread0.td_kstack = physfree + KERNBASE; 1491 bzero((void *)thread0.td_kstack, KSTACK_PAGES * PAGE_SIZE); 1492 physfree += KSTACK_PAGES * PAGE_SIZE; 1493 thread0.td_pcb = (struct pcb *) 1494 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; 1495 1496 /* 1497 * This may be done better later if it gets more high level 1498 * components in it. If so just link td->td_proc here. 1499 */ 1500 proc_linkup0(&proc0, &thread0); 1501 1502 preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE); 1503 preload_bootstrap_relocate(KERNBASE); 1504 kmdp = preload_search_by_type("elf kernel"); 1505 if (kmdp == NULL) 1506 kmdp = preload_search_by_type("elf64 kernel"); 1507 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); 1508 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *) + KERNBASE; 1509#ifdef DDB 1510 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); 1511 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); 1512#endif 1513 1514 /* Init basic tunables, hz etc */ 1515 init_param1(); 1516 1517 /* 1518 * make gdt memory segments 1519 */ 1520 for (x = 0; x < NGDT; x++) { 1521 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1) && 1522 x != GUSERLDT_SEL && x != (GUSERLDT_SEL) + 1) 1523 ssdtosd(&gdt_segs[x], &gdt[x]); 1524 } 1525 gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&common_tss[0]; 1526 ssdtosyssd(&gdt_segs[GPROC0_SEL], 1527 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]); 1528 1529 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1530 r_gdt.rd_base = (long) gdt; 1531 lgdt(&r_gdt); 1532 pc = &__pcpu[0]; 1533 1534 wrmsr(MSR_FSBASE, 0); /* User value */ 1535 wrmsr(MSR_GSBASE, (u_int64_t)pc); 1536 wrmsr(MSR_KGSBASE, 0); /* User value while in the kernel */ 1537 1538 pcpu_init(pc, 0, sizeof(struct pcpu)); 1539 dpcpu_init((void *)(physfree + KERNBASE), 0); 1540 physfree += DPCPU_SIZE; 1541 PCPU_SET(prvspace, pc); 1542 PCPU_SET(curthread, &thread0); 1543 PCPU_SET(curpcb, thread0.td_pcb); 1544 PCPU_SET(tssp, &common_tss[0]); 1545 PCPU_SET(commontssp, &common_tss[0]); 1546 PCPU_SET(tss, (struct system_segment_descriptor *)&gdt[GPROC0_SEL]); 1547 PCPU_SET(ldt, (struct system_segment_descriptor *)&gdt[GUSERLDT_SEL]); 1548 PCPU_SET(fs32p, &gdt[GUFS32_SEL]); 1549 PCPU_SET(gs32p, &gdt[GUGS32_SEL]); 1550 1551 /* 1552 * Initialize mutexes. 1553 * 1554 * icu_lock: in order to allow an interrupt to occur in a critical 1555 * section, to set pcpu->ipending (etc...) properly, we 1556 * must be able to get the icu lock, so it can't be 1557 * under witness. 1558 */ 1559 mutex_init(); 1560 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS); 1561 mtx_init(&dt_lock, "descriptor tables", NULL, MTX_DEF); 1562 1563 /* exceptions */ 1564 for (x = 0; x < NIDT; x++) 1565 setidt(x, &IDTVEC(rsvd), SDT_SYSIGT, SEL_KPL, 0); 1566 setidt(IDT_DE, &IDTVEC(div), SDT_SYSIGT, SEL_KPL, 0); 1567 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYSIGT, SEL_KPL, 0); 1568 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYSIGT, SEL_KPL, 2); 1569 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYSIGT, SEL_UPL, 0); 1570 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYSIGT, SEL_KPL, 0); 1571 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYSIGT, SEL_KPL, 0); 1572 setidt(IDT_UD, &IDTVEC(ill), SDT_SYSIGT, SEL_KPL, 0); 1573 setidt(IDT_NM, &IDTVEC(dna), SDT_SYSIGT, SEL_KPL, 0); 1574 setidt(IDT_DF, &IDTVEC(dblfault), SDT_SYSIGT, SEL_KPL, 1); 1575 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYSIGT, SEL_KPL, 0); 1576 setidt(IDT_TS, &IDTVEC(tss), SDT_SYSIGT, SEL_KPL, 0); 1577 setidt(IDT_NP, &IDTVEC(missing), SDT_SYSIGT, SEL_KPL, 0); 1578 setidt(IDT_SS, &IDTVEC(stk), SDT_SYSIGT, SEL_KPL, 0); 1579 setidt(IDT_GP, &IDTVEC(prot), SDT_SYSIGT, SEL_KPL, 0); 1580 setidt(IDT_PF, &IDTVEC(page), SDT_SYSIGT, SEL_KPL, 0); 1581 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYSIGT, SEL_KPL, 0); 1582 setidt(IDT_AC, &IDTVEC(align), SDT_SYSIGT, SEL_KPL, 0); 1583 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYSIGT, SEL_KPL, 0); 1584 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYSIGT, SEL_KPL, 0); 1585 1586 r_idt.rd_limit = sizeof(idt0) - 1; 1587 r_idt.rd_base = (long) idt; 1588 lidt(&r_idt); 1589 1590 /* 1591 * Initialize the i8254 before the console so that console 1592 * initialization can use DELAY(). 1593 */ 1594 i8254_init(); 1595 1596 /* 1597 * Initialize the console before we print anything out. 1598 */ 1599 cninit(); 1600 1601#ifdef DEV_ISA 1602#ifdef DEV_ATPIC 1603 elcr_probe(); 1604 atpic_startup(); 1605#else 1606 /* Reset and mask the atpics and leave them shut down. */ 1607 atpic_reset(); 1608 1609 /* 1610 * Point the ICU spurious interrupt vectors at the APIC spurious 1611 * interrupt handler. 1612 */ 1613 setidt(IDT_IO_INTS + 7, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0); 1614 setidt(IDT_IO_INTS + 15, IDTVEC(spuriousint), SDT_SYSIGT, SEL_KPL, 0); 1615#endif 1616#else 1617#error "have you forgotten the isa device?"; 1618#endif 1619 1620 kdb_init(); 1621 1622#ifdef KDB 1623 if (boothowto & RB_KDB) 1624 kdb_enter(KDB_WHY_BOOTFLAGS, 1625 "Boot flags requested debugger"); 1626#endif 1627 1628 identify_cpu(); /* Final stage of CPU initialization */ 1629 initializecpu(); /* Initialize CPU registers */ 1630 1631 /* make an initial tss so cpu can get interrupt stack on syscall! */ 1632 common_tss[0].tss_rsp0 = thread0.td_kstack + \ 1633 KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb); 1634 /* Ensure the stack is aligned to 16 bytes */ 1635 common_tss[0].tss_rsp0 &= ~0xFul; 1636 PCPU_SET(rsp0, common_tss[0].tss_rsp0); 1637 1638 /* doublefault stack space, runs on ist1 */ 1639 common_tss[0].tss_ist1 = (long)&dblfault_stack[sizeof(dblfault_stack)]; 1640 1641 /* 1642 * NMI stack, runs on ist2. The pcpu pointer is stored just 1643 * above the start of the ist2 stack. 1644 */ 1645 np = ((struct nmi_pcpu *) &nmi0_stack[sizeof(nmi0_stack)]) - 1; 1646 np->np_pcpu = (register_t) pc; 1647 common_tss[0].tss_ist2 = (long) np; 1648 1649 /* Set the IO permission bitmap (empty due to tss seg limit) */ 1650 common_tss[0].tss_iobase = sizeof(struct amd64tss) + 1651 IOPAGES * PAGE_SIZE; 1652 1653 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1654 ltr(gsel_tss); 1655 1656 /* Set up the fast syscall stuff */ 1657 msr = rdmsr(MSR_EFER) | EFER_SCE; 1658 wrmsr(MSR_EFER, msr); 1659 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall)); 1660 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32)); 1661 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) | 1662 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48); 1663 wrmsr(MSR_STAR, msr); 1664 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D); 1665 1666 getmemsize(kmdp, physfree); 1667 init_param2(physmem); 1668 1669 /* now running on new page tables, configured,and u/iom is accessible */ 1670 1671 msgbufinit(msgbufp, MSGBUF_SIZE); 1672 fpuinit(); 1673 1674 /* transfer to user mode */ 1675 1676 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL); 1677 _udatasel = GSEL(GUDATA_SEL, SEL_UPL); 1678 _ucode32sel = GSEL(GUCODE32_SEL, SEL_UPL); 1679 _ufssel = GSEL(GUFS32_SEL, SEL_UPL); 1680 _ugssel = GSEL(GUGS32_SEL, SEL_UPL); 1681 1682 load_ds(_udatasel); 1683 load_es(_udatasel); 1684 load_fs(_ufssel); 1685 1686 /* setup proc 0's pcb */ 1687 thread0.td_pcb->pcb_flags = 0; 1688 thread0.td_pcb->pcb_cr3 = KPML4phys; 1689 thread0.td_frame = &proc0_tf; 1690 1691 env = getenv("kernelname"); 1692 if (env != NULL) 1693 strlcpy(kernelname, env, sizeof(kernelname)); 1694 1695#ifdef XENHVM 1696 if (inw(0x10) == 0x49d2) { 1697 if (bootverbose) 1698 printf("Xen detected: disabling emulated block and network devices\n"); 1699 outw(0x10, 3); 1700 } 1701#endif 1702 1703 if (cpu_probe_amdc1e()) 1704 cpu_idle_fn = cpu_idle_amdc1e; 1705 1706 /* Location of kernel stack for locore */ 1707 return ((u_int64_t)thread0.td_pcb); 1708} 1709 1710void 1711cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) 1712{ 1713 1714 pcpu->pc_acpi_id = 0xffffffff; 1715} 1716 1717void 1718spinlock_enter(void) 1719{ 1720 struct thread *td; 1721 1722 td = curthread; 1723 if (td->td_md.md_spinlock_count == 0) 1724 td->td_md.md_saved_flags = intr_disable(); 1725 td->td_md.md_spinlock_count++; 1726 critical_enter(); 1727} 1728 1729void 1730spinlock_exit(void) 1731{ 1732 struct thread *td; 1733 1734 td = curthread; 1735 critical_exit(); 1736 td->td_md.md_spinlock_count--; 1737 if (td->td_md.md_spinlock_count == 0) 1738 intr_restore(td->td_md.md_saved_flags); 1739} 1740 1741/* 1742 * Construct a PCB from a trapframe. This is called from kdb_trap() where 1743 * we want to start a backtrace from the function that caused us to enter 1744 * the debugger. We have the context in the trapframe, but base the trace 1745 * on the PCB. The PCB doesn't have to be perfect, as long as it contains 1746 * enough for a backtrace. 1747 */ 1748void 1749makectx(struct trapframe *tf, struct pcb *pcb) 1750{ 1751 1752 pcb->pcb_r12 = tf->tf_r12; 1753 pcb->pcb_r13 = tf->tf_r13; 1754 pcb->pcb_r14 = tf->tf_r14; 1755 pcb->pcb_r15 = tf->tf_r15; 1756 pcb->pcb_rbp = tf->tf_rbp; 1757 pcb->pcb_rbx = tf->tf_rbx; 1758 pcb->pcb_rip = tf->tf_rip; 1759 pcb->pcb_rsp = (ISPL(tf->tf_cs)) ? tf->tf_rsp : (long)(tf + 1) - 8; 1760} 1761 1762int 1763ptrace_set_pc(struct thread *td, unsigned long addr) 1764{ 1765 td->td_frame->tf_rip = addr; 1766 return (0); 1767} 1768 1769int 1770ptrace_single_step(struct thread *td) 1771{ 1772 td->td_frame->tf_rflags |= PSL_T; 1773 return (0); 1774} 1775 1776int 1777ptrace_clear_single_step(struct thread *td) 1778{ 1779 td->td_frame->tf_rflags &= ~PSL_T; 1780 return (0); 1781} 1782 1783int 1784fill_regs(struct thread *td, struct reg *regs) 1785{ 1786 struct trapframe *tp; 1787 1788 tp = td->td_frame; 1789 regs->r_r15 = tp->tf_r15; 1790 regs->r_r14 = tp->tf_r14; 1791 regs->r_r13 = tp->tf_r13; 1792 regs->r_r12 = tp->tf_r12; 1793 regs->r_r11 = tp->tf_r11; 1794 regs->r_r10 = tp->tf_r10; 1795 regs->r_r9 = tp->tf_r9; 1796 regs->r_r8 = tp->tf_r8; 1797 regs->r_rdi = tp->tf_rdi; 1798 regs->r_rsi = tp->tf_rsi; 1799 regs->r_rbp = tp->tf_rbp; 1800 regs->r_rbx = tp->tf_rbx; 1801 regs->r_rdx = tp->tf_rdx; 1802 regs->r_rcx = tp->tf_rcx; 1803 regs->r_rax = tp->tf_rax; 1804 regs->r_rip = tp->tf_rip; 1805 regs->r_cs = tp->tf_cs; 1806 regs->r_rflags = tp->tf_rflags; 1807 regs->r_rsp = tp->tf_rsp; 1808 regs->r_ss = tp->tf_ss; 1809 if (tp->tf_flags & TF_HASSEGS) { 1810 regs->r_ds = tp->tf_ds; 1811 regs->r_es = tp->tf_es; 1812 regs->r_fs = tp->tf_fs; 1813 regs->r_gs = tp->tf_gs; 1814 } else { 1815 regs->r_ds = 0; 1816 regs->r_es = 0; 1817 regs->r_fs = 0; 1818 regs->r_gs = 0; 1819 } 1820 return (0); 1821} 1822 1823int 1824set_regs(struct thread *td, struct reg *regs) 1825{ 1826 struct trapframe *tp; 1827 register_t rflags; 1828 1829 tp = td->td_frame; 1830 rflags = regs->r_rflags & 0xffffffff; 1831 if (!EFL_SECURE(rflags, tp->tf_rflags) || !CS_SECURE(regs->r_cs)) 1832 return (EINVAL); 1833 tp->tf_r15 = regs->r_r15; 1834 tp->tf_r14 = regs->r_r14; 1835 tp->tf_r13 = regs->r_r13; 1836 tp->tf_r12 = regs->r_r12; 1837 tp->tf_r11 = regs->r_r11; 1838 tp->tf_r10 = regs->r_r10; 1839 tp->tf_r9 = regs->r_r9; 1840 tp->tf_r8 = regs->r_r8; 1841 tp->tf_rdi = regs->r_rdi; 1842 tp->tf_rsi = regs->r_rsi; 1843 tp->tf_rbp = regs->r_rbp; 1844 tp->tf_rbx = regs->r_rbx; 1845 tp->tf_rdx = regs->r_rdx; 1846 tp->tf_rcx = regs->r_rcx; 1847 tp->tf_rax = regs->r_rax; 1848 tp->tf_rip = regs->r_rip; 1849 tp->tf_cs = regs->r_cs; 1850 tp->tf_rflags = rflags; 1851 tp->tf_rsp = regs->r_rsp; 1852 tp->tf_ss = regs->r_ss; 1853 if (0) { /* XXXKIB */ 1854 tp->tf_ds = regs->r_ds; 1855 tp->tf_es = regs->r_es; 1856 tp->tf_fs = regs->r_fs; 1857 tp->tf_gs = regs->r_gs; 1858 tp->tf_flags = TF_HASSEGS; 1859 } 1860 td->td_pcb->pcb_flags |= PCB_FULLCTX; 1861 return (0); 1862} 1863 1864/* XXX check all this stuff! */ 1865/* externalize from sv_xmm */ 1866static void 1867fill_fpregs_xmm(struct savefpu *sv_xmm, struct fpreg *fpregs) 1868{ 1869 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env; 1870 struct envxmm *penv_xmm = &sv_xmm->sv_env; 1871 int i; 1872 1873 /* pcb -> fpregs */ 1874 bzero(fpregs, sizeof(*fpregs)); 1875 1876 /* FPU control/status */ 1877 penv_fpreg->en_cw = penv_xmm->en_cw; 1878 penv_fpreg->en_sw = penv_xmm->en_sw; 1879 penv_fpreg->en_tw = penv_xmm->en_tw; 1880 penv_fpreg->en_opcode = penv_xmm->en_opcode; 1881 penv_fpreg->en_rip = penv_xmm->en_rip; 1882 penv_fpreg->en_rdp = penv_xmm->en_rdp; 1883 penv_fpreg->en_mxcsr = penv_xmm->en_mxcsr; 1884 penv_fpreg->en_mxcsr_mask = penv_xmm->en_mxcsr_mask; 1885 1886 /* FPU registers */ 1887 for (i = 0; i < 8; ++i) 1888 bcopy(sv_xmm->sv_fp[i].fp_acc.fp_bytes, fpregs->fpr_acc[i], 10); 1889 1890 /* SSE registers */ 1891 for (i = 0; i < 16; ++i) 1892 bcopy(sv_xmm->sv_xmm[i].xmm_bytes, fpregs->fpr_xacc[i], 16); 1893} 1894 1895/* internalize from fpregs into sv_xmm */ 1896static void 1897set_fpregs_xmm(struct fpreg *fpregs, struct savefpu *sv_xmm) 1898{ 1899 struct envxmm *penv_xmm = &sv_xmm->sv_env; 1900 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env; 1901 int i; 1902 1903 /* fpregs -> pcb */ 1904 /* FPU control/status */ 1905 penv_xmm->en_cw = penv_fpreg->en_cw; 1906 penv_xmm->en_sw = penv_fpreg->en_sw; 1907 penv_xmm->en_tw = penv_fpreg->en_tw; 1908 penv_xmm->en_opcode = penv_fpreg->en_opcode; 1909 penv_xmm->en_rip = penv_fpreg->en_rip; 1910 penv_xmm->en_rdp = penv_fpreg->en_rdp; 1911 penv_xmm->en_mxcsr = penv_fpreg->en_mxcsr; 1912 penv_xmm->en_mxcsr_mask = penv_fpreg->en_mxcsr_mask & cpu_mxcsr_mask; 1913 1914 /* FPU registers */ 1915 for (i = 0; i < 8; ++i) 1916 bcopy(fpregs->fpr_acc[i], sv_xmm->sv_fp[i].fp_acc.fp_bytes, 10); 1917 1918 /* SSE registers */ 1919 for (i = 0; i < 16; ++i) 1920 bcopy(fpregs->fpr_xacc[i], sv_xmm->sv_xmm[i].xmm_bytes, 16); 1921} 1922 1923/* externalize from td->pcb */ 1924int 1925fill_fpregs(struct thread *td, struct fpreg *fpregs) 1926{ 1927 1928 fill_fpregs_xmm(&td->td_pcb->pcb_save, fpregs); 1929 return (0); 1930} 1931 1932/* internalize to td->pcb */ 1933int 1934set_fpregs(struct thread *td, struct fpreg *fpregs) 1935{ 1936 1937 set_fpregs_xmm(fpregs, &td->td_pcb->pcb_save); 1938 return (0); 1939} 1940 1941/* 1942 * Get machine context. 1943 */ 1944int 1945get_mcontext(struct thread *td, mcontext_t *mcp, int flags) 1946{ 1947 struct trapframe *tp; 1948 1949 tp = td->td_frame; 1950 PROC_LOCK(curthread->td_proc); 1951 mcp->mc_onstack = sigonstack(tp->tf_rsp); 1952 PROC_UNLOCK(curthread->td_proc); 1953 mcp->mc_r15 = tp->tf_r15; 1954 mcp->mc_r14 = tp->tf_r14; 1955 mcp->mc_r13 = tp->tf_r13; 1956 mcp->mc_r12 = tp->tf_r12; 1957 mcp->mc_r11 = tp->tf_r11; 1958 mcp->mc_r10 = tp->tf_r10; 1959 mcp->mc_r9 = tp->tf_r9; 1960 mcp->mc_r8 = tp->tf_r8; 1961 mcp->mc_rdi = tp->tf_rdi; 1962 mcp->mc_rsi = tp->tf_rsi; 1963 mcp->mc_rbp = tp->tf_rbp; 1964 mcp->mc_rbx = tp->tf_rbx; 1965 mcp->mc_rcx = tp->tf_rcx; 1966 mcp->mc_rflags = tp->tf_rflags; 1967 if (flags & GET_MC_CLEAR_RET) { 1968 mcp->mc_rax = 0; 1969 mcp->mc_rdx = 0; 1970 mcp->mc_rflags &= ~PSL_C; 1971 } else { 1972 mcp->mc_rax = tp->tf_rax; 1973 mcp->mc_rdx = tp->tf_rdx; 1974 } 1975 mcp->mc_rip = tp->tf_rip; 1976 mcp->mc_cs = tp->tf_cs; 1977 mcp->mc_rsp = tp->tf_rsp; 1978 mcp->mc_ss = tp->tf_ss; 1979 mcp->mc_ds = tp->tf_ds; 1980 mcp->mc_es = tp->tf_es; 1981 mcp->mc_fs = tp->tf_fs; 1982 mcp->mc_gs = tp->tf_gs; 1983 mcp->mc_flags = tp->tf_flags; 1984 mcp->mc_len = sizeof(*mcp); 1985 get_fpcontext(td, mcp); 1986 mcp->mc_fsbase = td->td_pcb->pcb_fsbase; 1987 mcp->mc_gsbase = td->td_pcb->pcb_gsbase; 1988 return (0); 1989} 1990 1991/* 1992 * Set machine context. 1993 * 1994 * However, we don't set any but the user modifiable flags, and we won't 1995 * touch the cs selector. 1996 */ 1997int 1998set_mcontext(struct thread *td, const mcontext_t *mcp) 1999{ 2000 struct trapframe *tp; 2001 long rflags; 2002 int ret; 2003 2004 tp = td->td_frame; 2005 if (mcp->mc_len != sizeof(*mcp) || 2006 (mcp->mc_flags & ~_MC_FLAG_MASK) != 0) 2007 return (EINVAL); 2008 rflags = (mcp->mc_rflags & PSL_USERCHANGE) | 2009 (tp->tf_rflags & ~PSL_USERCHANGE); 2010 ret = set_fpcontext(td, mcp); 2011 if (ret != 0) 2012 return (ret); 2013 tp->tf_r15 = mcp->mc_r15; 2014 tp->tf_r14 = mcp->mc_r14; 2015 tp->tf_r13 = mcp->mc_r13; 2016 tp->tf_r12 = mcp->mc_r12; 2017 tp->tf_r11 = mcp->mc_r11; 2018 tp->tf_r10 = mcp->mc_r10; 2019 tp->tf_r9 = mcp->mc_r9; 2020 tp->tf_r8 = mcp->mc_r8; 2021 tp->tf_rdi = mcp->mc_rdi; 2022 tp->tf_rsi = mcp->mc_rsi; 2023 tp->tf_rbp = mcp->mc_rbp; 2024 tp->tf_rbx = mcp->mc_rbx; 2025 tp->tf_rdx = mcp->mc_rdx; 2026 tp->tf_rcx = mcp->mc_rcx; 2027 tp->tf_rax = mcp->mc_rax; 2028 tp->tf_rip = mcp->mc_rip; 2029 tp->tf_rflags = rflags; 2030 tp->tf_rsp = mcp->mc_rsp; 2031 tp->tf_ss = mcp->mc_ss; 2032 tp->tf_flags = mcp->mc_flags; 2033 if (tp->tf_flags & TF_HASSEGS) { 2034 tp->tf_ds = mcp->mc_ds; 2035 tp->tf_es = mcp->mc_es; 2036 tp->tf_fs = mcp->mc_fs; 2037 tp->tf_gs = mcp->mc_gs; 2038 } 2039 if (mcp->mc_flags & _MC_HASBASES) { 2040 td->td_pcb->pcb_fsbase = mcp->mc_fsbase; 2041 td->td_pcb->pcb_gsbase = mcp->mc_gsbase; 2042 } 2043 td->td_pcb->pcb_flags |= PCB_FULLCTX; 2044 td->td_pcb->pcb_full_iret = 1; 2045 return (0); 2046} 2047 2048static void 2049get_fpcontext(struct thread *td, mcontext_t *mcp) 2050{ 2051 2052 mcp->mc_ownedfp = fpugetregs(td, (struct savefpu *)&mcp->mc_fpstate); 2053 mcp->mc_fpformat = fpuformat(); 2054} 2055 2056static int 2057set_fpcontext(struct thread *td, const mcontext_t *mcp) 2058{ 2059 struct savefpu *fpstate; 2060 2061 if (mcp->mc_fpformat == _MC_FPFMT_NODEV) 2062 return (0); 2063 else if (mcp->mc_fpformat != _MC_FPFMT_XMM) 2064 return (EINVAL); 2065 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) 2066 /* We don't care what state is left in the FPU or PCB. */ 2067 fpstate_drop(td); 2068 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU || 2069 mcp->mc_ownedfp == _MC_FPOWNED_PCB) { 2070 /* 2071 * XXX we violate the dubious requirement that fpusetregs() 2072 * be called with interrupts disabled. 2073 * XXX obsolete on trap-16 systems? 2074 */ 2075 fpstate = (struct savefpu *)&mcp->mc_fpstate; 2076 fpstate->sv_env.en_mxcsr &= cpu_mxcsr_mask; 2077 fpusetregs(td, fpstate); 2078 } else 2079 return (EINVAL); 2080 return (0); 2081} 2082 2083void 2084fpstate_drop(struct thread *td) 2085{ 2086 register_t s; 2087 2088 s = intr_disable(); 2089 if (PCPU_GET(fpcurthread) == td) 2090 fpudrop(); 2091 /* 2092 * XXX force a full drop of the fpu. The above only drops it if we 2093 * owned it. 2094 * 2095 * XXX I don't much like fpugetregs()'s semantics of doing a full 2096 * drop. Dropping only to the pcb matches fnsave's behaviour. 2097 * We only need to drop to !PCB_INITDONE in sendsig(). But 2098 * sendsig() is the only caller of fpugetregs()... perhaps we just 2099 * have too many layers. 2100 */ 2101 curthread->td_pcb->pcb_flags &= ~PCB_FPUINITDONE; 2102 intr_restore(s); 2103} 2104 2105int 2106fill_dbregs(struct thread *td, struct dbreg *dbregs) 2107{ 2108 struct pcb *pcb; 2109 2110 if (td == NULL) { 2111 dbregs->dr[0] = rdr0(); 2112 dbregs->dr[1] = rdr1(); 2113 dbregs->dr[2] = rdr2(); 2114 dbregs->dr[3] = rdr3(); 2115 dbregs->dr[6] = rdr6(); 2116 dbregs->dr[7] = rdr7(); 2117 } else { 2118 pcb = td->td_pcb; 2119 dbregs->dr[0] = pcb->pcb_dr0; 2120 dbregs->dr[1] = pcb->pcb_dr1; 2121 dbregs->dr[2] = pcb->pcb_dr2; 2122 dbregs->dr[3] = pcb->pcb_dr3; 2123 dbregs->dr[6] = pcb->pcb_dr6; 2124 dbregs->dr[7] = pcb->pcb_dr7; 2125 } 2126 dbregs->dr[4] = 0; 2127 dbregs->dr[5] = 0; 2128 dbregs->dr[8] = 0; 2129 dbregs->dr[9] = 0; 2130 dbregs->dr[10] = 0; 2131 dbregs->dr[11] = 0; 2132 dbregs->dr[12] = 0; 2133 dbregs->dr[13] = 0; 2134 dbregs->dr[14] = 0; 2135 dbregs->dr[15] = 0; 2136 return (0); 2137} 2138 2139int 2140set_dbregs(struct thread *td, struct dbreg *dbregs) 2141{ 2142 struct pcb *pcb; 2143 int i; 2144 2145 if (td == NULL) { 2146 load_dr0(dbregs->dr[0]); 2147 load_dr1(dbregs->dr[1]); 2148 load_dr2(dbregs->dr[2]); 2149 load_dr3(dbregs->dr[3]); 2150 load_dr6(dbregs->dr[6]); 2151 load_dr7(dbregs->dr[7]); 2152 } else { 2153 /* 2154 * Don't let an illegal value for dr7 get set. Specifically, 2155 * check for undefined settings. Setting these bit patterns 2156 * result in undefined behaviour and can lead to an unexpected 2157 * TRCTRAP or a general protection fault right here. 2158 * Upper bits of dr6 and dr7 must not be set 2159 */ 2160 for (i = 0; i < 4; i++) { 2161 if (DBREG_DR7_ACCESS(dbregs->dr[7], i) == 0x02) 2162 return (EINVAL); 2163 if (td->td_frame->tf_cs == _ucode32sel && 2164 DBREG_DR7_LEN(dbregs->dr[7], i) == DBREG_DR7_LEN_8) 2165 return (EINVAL); 2166 } 2167 if ((dbregs->dr[6] & 0xffffffff00000000ul) != 0 || 2168 (dbregs->dr[7] & 0xffffffff00000000ul) != 0) 2169 return (EINVAL); 2170 2171 pcb = td->td_pcb; 2172 2173 /* 2174 * Don't let a process set a breakpoint that is not within the 2175 * process's address space. If a process could do this, it 2176 * could halt the system by setting a breakpoint in the kernel 2177 * (if ddb was enabled). Thus, we need to check to make sure 2178 * that no breakpoints are being enabled for addresses outside 2179 * process's address space. 2180 * 2181 * XXX - what about when the watched area of the user's 2182 * address space is written into from within the kernel 2183 * ... wouldn't that still cause a breakpoint to be generated 2184 * from within kernel mode? 2185 */ 2186 2187 if (DBREG_DR7_ENABLED(dbregs->dr[7], 0)) { 2188 /* dr0 is enabled */ 2189 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS) 2190 return (EINVAL); 2191 } 2192 if (DBREG_DR7_ENABLED(dbregs->dr[7], 1)) { 2193 /* dr1 is enabled */ 2194 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS) 2195 return (EINVAL); 2196 } 2197 if (DBREG_DR7_ENABLED(dbregs->dr[7], 2)) { 2198 /* dr2 is enabled */ 2199 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS) 2200 return (EINVAL); 2201 } 2202 if (DBREG_DR7_ENABLED(dbregs->dr[7], 3)) { 2203 /* dr3 is enabled */ 2204 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS) 2205 return (EINVAL); 2206 } 2207 2208 pcb->pcb_dr0 = dbregs->dr[0]; 2209 pcb->pcb_dr1 = dbregs->dr[1]; 2210 pcb->pcb_dr2 = dbregs->dr[2]; 2211 pcb->pcb_dr3 = dbregs->dr[3]; 2212 pcb->pcb_dr6 = dbregs->dr[6]; 2213 pcb->pcb_dr7 = dbregs->dr[7]; 2214 2215 pcb->pcb_flags |= PCB_DBREGS; 2216 } 2217 2218 return (0); 2219} 2220 2221void 2222reset_dbregs(void) 2223{ 2224 2225 load_dr7(0); /* Turn off the control bits first */ 2226 load_dr0(0); 2227 load_dr1(0); 2228 load_dr2(0); 2229 load_dr3(0); 2230 load_dr6(0); 2231} 2232 2233/* 2234 * Return > 0 if a hardware breakpoint has been hit, and the 2235 * breakpoint was in user space. Return 0, otherwise. 2236 */ 2237int 2238user_dbreg_trap(void) 2239{ 2240 u_int64_t dr7, dr6; /* debug registers dr6 and dr7 */ 2241 u_int64_t bp; /* breakpoint bits extracted from dr6 */ 2242 int nbp; /* number of breakpoints that triggered */ 2243 caddr_t addr[4]; /* breakpoint addresses */ 2244 int i; 2245 2246 dr7 = rdr7(); 2247 if ((dr7 & 0x000000ff) == 0) { 2248 /* 2249 * all GE and LE bits in the dr7 register are zero, 2250 * thus the trap couldn't have been caused by the 2251 * hardware debug registers 2252 */ 2253 return 0; 2254 } 2255 2256 nbp = 0; 2257 dr6 = rdr6(); 2258 bp = dr6 & 0x0000000f; 2259 2260 if (!bp) { 2261 /* 2262 * None of the breakpoint bits are set meaning this 2263 * trap was not caused by any of the debug registers 2264 */ 2265 return 0; 2266 } 2267 2268 /* 2269 * at least one of the breakpoints were hit, check to see 2270 * which ones and if any of them are user space addresses 2271 */ 2272 2273 if (bp & 0x01) { 2274 addr[nbp++] = (caddr_t)rdr0(); 2275 } 2276 if (bp & 0x02) { 2277 addr[nbp++] = (caddr_t)rdr1(); 2278 } 2279 if (bp & 0x04) { 2280 addr[nbp++] = (caddr_t)rdr2(); 2281 } 2282 if (bp & 0x08) { 2283 addr[nbp++] = (caddr_t)rdr3(); 2284 } 2285 2286 for (i = 0; i < nbp; i++) { 2287 if (addr[i] < (caddr_t)VM_MAXUSER_ADDRESS) { 2288 /* 2289 * addr[i] is in user space 2290 */ 2291 return nbp; 2292 } 2293 } 2294 2295 /* 2296 * None of the breakpoints are in user space. 2297 */ 2298 return 0; 2299} 2300 2301#ifdef KDB 2302 2303/* 2304 * Provide inb() and outb() as functions. They are normally only available as 2305 * inline functions, thus cannot be called from the debugger. 2306 */ 2307 2308/* silence compiler warnings */ 2309u_char inb_(u_short); 2310void outb_(u_short, u_char); 2311 2312u_char 2313inb_(u_short port) 2314{ 2315 return inb(port); 2316} 2317 2318void 2319outb_(u_short port, u_char data) 2320{ 2321 outb(port, data); 2322} 2323 2324#endif /* KDB */ 2325