machdep.c revision 141374
1/*- 2 * Copyright (c) 2003 Peter Wemm. 3 * Copyright (c) 1992 Terrence R. Lambert. 4 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * William Jolitz. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 39 */ 40 41#include <sys/cdefs.h> 42__FBSDID("$FreeBSD: head/sys/amd64/amd64/machdep.c 141374 2005-02-05 23:16:27Z njl $"); 43 44#include "opt_atalk.h" 45#include "opt_atpic.h" 46#include "opt_compat.h" 47#include "opt_cpu.h" 48#include "opt_ddb.h" 49#include "opt_inet.h" 50#include "opt_ipx.h" 51#include "opt_isa.h" 52#include "opt_kstack_pages.h" 53#include "opt_maxmem.h" 54#include "opt_msgbuf.h" 55#include "opt_perfmon.h" 56 57#include <sys/param.h> 58#include <sys/systm.h> 59#include <sys/bio.h> 60#include <sys/buf.h> 61#include <sys/bus.h> 62#include <sys/callout.h> 63#include <sys/cpu.h> 64#include <sys/eventhandler.h> 65#include <sys/imgact.h> 66#include <sys/kdb.h> 67#include <sys/kernel.h> 68#include <sys/ktr.h> 69#include <sys/linker.h> 70#include <sys/lock.h> 71#include <sys/malloc.h> 72#include <sys/memrange.h> 73#include <sys/mutex.h> 74#include <sys/pcpu.h> 75#include <sys/proc.h> 76#include <sys/reboot.h> 77#include <sys/msgbuf.h> 78#include <sys/sched.h> 79#include <sys/signalvar.h> 80#include <sys/sysent.h> 81#include <sys/sysctl.h> 82#include <sys/sysproto.h> 83#include <sys/ucontext.h> 84#include <sys/vmmeter.h> 85 86#include <machine/clock.h> 87#include <machine/pcb.h> 88 89#include <vm/vm.h> 90#include <vm/vm_param.h> 91#include <vm/vm_kern.h> 92#include <vm/vm_object.h> 93#include <vm/vm_page.h> 94#include <vm/vm_map.h> 95#include <vm/vm_pager.h> 96#include <vm/vm_extern.h> 97 98#include <sys/exec.h> 99#include <sys/cons.h> 100 101#ifdef DDB 102#ifndef KDB 103#error KDB must be enabled in order for DDB to work! 104#endif 105#endif 106#include <ddb/ddb.h> 107 108#include <net/netisr.h> 109 110#include <machine/cpu.h> 111#include <machine/cputypes.h> 112#include <machine/reg.h> 113#include <machine/clock.h> 114#include <machine/specialreg.h> 115#include <machine/intr_machdep.h> 116#include <machine/md_var.h> 117#include <machine/pc/bios.h> 118#include <machine/metadata.h> 119#include <machine/proc.h> 120#ifdef PERFMON 121#include <machine/perfmon.h> 122#endif 123#include <machine/tss.h> 124#ifdef SMP 125#include <machine/smp.h> 126#endif 127 128#include <amd64/isa/icu.h> 129 130#include <isa/isareg.h> 131#include <isa/rtc.h> 132#include <sys/ptrace.h> 133#include <machine/sigframe.h> 134 135/* Sanity check for __curthread() */ 136CTASSERT(offsetof(struct pcpu, pc_curthread) == 0); 137 138extern u_int64_t hammer_time(u_int64_t, u_int64_t); 139extern void dblfault_handler(void); 140 141extern void printcpuinfo(void); /* XXX header file */ 142extern void identify_cpu(void); 143extern void panicifcpuunsupported(void); 144 145#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 146#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 147 148static void cpu_startup(void *); 149static void get_fpcontext(struct thread *td, mcontext_t *mcp); 150static int set_fpcontext(struct thread *td, const mcontext_t *mcp); 151SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) 152 153#ifdef DDB 154extern vm_offset_t ksym_start, ksym_end; 155#endif 156 157int _udatasel, _ucodesel, _ucode32sel; 158 159int cold = 1; 160 161long Maxmem = 0; 162 163vm_paddr_t phys_avail[20]; 164 165/* must be 2 less so 0 0 can signal end of chunks */ 166#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2) 167 168struct kva_md_info kmi; 169 170static struct trapframe proc0_tf; 171struct region_descriptor r_gdt, r_idt; 172 173struct pcpu __pcpu[MAXCPU]; 174 175struct mtx icu_lock; 176 177struct mem_range_softc mem_range_softc; 178 179static void 180cpu_startup(dummy) 181 void *dummy; 182{ 183 /* 184 * Good {morning,afternoon,evening,night}. 185 */ 186 startrtclock(); 187 printcpuinfo(); 188 panicifcpuunsupported(); 189#ifdef PERFMON 190 perfmon_init(); 191#endif 192 printf("real memory = %ju (%ju MB)\n", ptoa((uintmax_t)Maxmem), 193 ptoa((uintmax_t)Maxmem) / 1048576); 194 /* 195 * Display any holes after the first chunk of extended memory. 196 */ 197 if (bootverbose) { 198 int indx; 199 200 printf("Physical memory chunk(s):\n"); 201 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 202 vm_paddr_t size; 203 204 size = phys_avail[indx + 1] - phys_avail[indx]; 205 printf( 206 "0x%016jx - 0x%016jx, %ju bytes (%ju pages)\n", 207 (uintmax_t)phys_avail[indx], 208 (uintmax_t)phys_avail[indx + 1] - 1, 209 (uintmax_t)size, (uintmax_t)size / PAGE_SIZE); 210 } 211 } 212 213 vm_ksubmap_init(&kmi); 214 215 printf("avail memory = %ju (%ju MB)\n", 216 ptoa((uintmax_t)cnt.v_free_count), 217 ptoa((uintmax_t)cnt.v_free_count) / 1048576); 218 219 /* 220 * Set up buffers, so they can be used to read disk labels. 221 */ 222 bufinit(); 223 vm_pager_bufferinit(); 224 225 cpu_setregs(); 226} 227 228/* 229 * Send an interrupt to process. 230 * 231 * Stack is set up to allow sigcode stored 232 * at top to call routine, followed by kcall 233 * to sigreturn routine below. After sigreturn 234 * resets the signal mask, the stack, and the 235 * frame pointer, it returns to the user 236 * specified pc, psl. 237 */ 238void 239sendsig(catcher, sig, mask, code) 240 sig_t catcher; 241 int sig; 242 sigset_t *mask; 243 u_long code; 244{ 245 struct sigframe sf, *sfp; 246 struct proc *p; 247 struct thread *td; 248 struct sigacts *psp; 249 char *sp; 250 struct trapframe *regs; 251 int oonstack; 252 253 td = curthread; 254 p = td->td_proc; 255 PROC_LOCK_ASSERT(p, MA_OWNED); 256 psp = p->p_sigacts; 257 mtx_assert(&psp->ps_mtx, MA_OWNED); 258 regs = td->td_frame; 259 oonstack = sigonstack(regs->tf_rsp); 260 261 /* Save user context. */ 262 bzero(&sf, sizeof(sf)); 263 sf.sf_uc.uc_sigmask = *mask; 264 sf.sf_uc.uc_stack = td->td_sigstk; 265 sf.sf_uc.uc_stack.ss_flags = (td->td_pflags & TDP_ALTSTACK) 266 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 267 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 268 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_rdi, sizeof(*regs)); 269 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */ 270 get_fpcontext(td, &sf.sf_uc.uc_mcontext); 271 fpstate_drop(td); 272 273 /* Allocate space for the signal handler context. */ 274 if ((td->td_pflags & TDP_ALTSTACK) != 0 && !oonstack && 275 SIGISMEMBER(psp->ps_sigonstack, sig)) { 276 sp = td->td_sigstk.ss_sp + 277 td->td_sigstk.ss_size - sizeof(struct sigframe); 278#if defined(COMPAT_43) 279 td->td_sigstk.ss_flags |= SS_ONSTACK; 280#endif 281 } else 282 sp = (char *)regs->tf_rsp - sizeof(struct sigframe) - 128; 283 /* Align to 16 bytes. */ 284 sfp = (struct sigframe *)((unsigned long)sp & ~0xFul); 285 286 /* Translate the signal if appropriate. */ 287 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 288 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 289 290 /* Build the argument list for the signal handler. */ 291 regs->tf_rdi = sig; /* arg 1 in %rdi */ 292 regs->tf_rdx = (register_t)&sfp->sf_uc; /* arg 3 in %rdx */ 293 if (SIGISMEMBER(psp->ps_siginfo, sig)) { 294 /* Signal handler installed with SA_SIGINFO. */ 295 regs->tf_rsi = (register_t)&sfp->sf_si; /* arg 2 in %rsi */ 296 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 297 298 /* Fill in POSIX parts */ 299 sf.sf_si.si_signo = sig; 300 sf.sf_si.si_code = code; 301 regs->tf_rcx = regs->tf_addr; /* arg 4 in %rcx */ 302 } else { 303 /* Old FreeBSD-style arguments. */ 304 regs->tf_rsi = code; /* arg 2 in %rsi */ 305 regs->tf_rcx = regs->tf_addr; /* arg 4 in %rcx */ 306 sf.sf_ahu.sf_handler = catcher; 307 } 308 mtx_unlock(&psp->ps_mtx); 309 PROC_UNLOCK(p); 310 311 /* 312 * Copy the sigframe out to the user's stack. 313 */ 314 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { 315#ifdef DEBUG 316 printf("process %ld has trashed its stack\n", (long)p->p_pid); 317#endif 318 PROC_LOCK(p); 319 sigexit(td, SIGILL); 320 } 321 322 regs->tf_rsp = (long)sfp; 323 regs->tf_rip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 324 regs->tf_rflags &= ~PSL_T; 325 regs->tf_cs = _ucodesel; 326 PROC_LOCK(p); 327 mtx_lock(&psp->ps_mtx); 328} 329 330/* 331 * Build siginfo_t for SA thread 332 */ 333void 334cpu_thread_siginfo(int sig, u_long code, siginfo_t *si) 335{ 336 struct proc *p; 337 struct thread *td; 338 struct trapframe *regs; 339 340 td = curthread; 341 p = td->td_proc; 342 regs = td->td_frame; 343 PROC_LOCK_ASSERT(p, MA_OWNED); 344 345 bzero(si, sizeof(*si)); 346 si->si_signo = sig; 347 si->si_code = code; 348 si->si_addr = (void *)regs->tf_addr; 349 /* XXXKSE fill other fields */ 350} 351 352/* 353 * System call to cleanup state after a signal 354 * has been taken. Reset signal mask and 355 * stack state from context left by sendsig (above). 356 * Return to previous pc and psl as specified by 357 * context left by sendsig. Check carefully to 358 * make sure that the user has not modified the 359 * state to gain improper privileges. 360 * 361 * MPSAFE 362 */ 363int 364sigreturn(td, uap) 365 struct thread *td; 366 struct sigreturn_args /* { 367 const __ucontext *sigcntxp; 368 } */ *uap; 369{ 370 ucontext_t uc; 371 struct proc *p = td->td_proc; 372 struct trapframe *regs; 373 const ucontext_t *ucp; 374 long rflags; 375 int cs, error, ret; 376 377 error = copyin(uap->sigcntxp, &uc, sizeof(uc)); 378 if (error != 0) 379 return (error); 380 ucp = &uc; 381 regs = td->td_frame; 382 rflags = ucp->uc_mcontext.mc_rflags; 383 /* 384 * Don't allow users to change privileged or reserved flags. 385 */ 386 /* 387 * XXX do allow users to change the privileged flag PSL_RF. 388 * The cpu sets PSL_RF in tf_rflags for faults. Debuggers 389 * should sometimes set it there too. tf_rflags is kept in 390 * the signal context during signal handling and there is no 391 * other place to remember it, so the PSL_RF bit may be 392 * corrupted by the signal handler without us knowing. 393 * Corruption of the PSL_RF bit at worst causes one more or 394 * one less debugger trap, so allowing it is fairly harmless. 395 */ 396 if (!EFL_SECURE(rflags & ~PSL_RF, regs->tf_rflags & ~PSL_RF)) { 397 printf("sigreturn: rflags = 0x%lx\n", rflags); 398 return (EINVAL); 399 } 400 401 /* 402 * Don't allow users to load a valid privileged %cs. Let the 403 * hardware check for invalid selectors, excess privilege in 404 * other selectors, invalid %eip's and invalid %esp's. 405 */ 406 cs = ucp->uc_mcontext.mc_cs; 407 if (!CS_SECURE(cs)) { 408 printf("sigreturn: cs = 0x%x\n", cs); 409 trapsignal(td, SIGBUS, T_PROTFLT); 410 return (EINVAL); 411 } 412 413 ret = set_fpcontext(td, &ucp->uc_mcontext); 414 if (ret != 0) 415 return (ret); 416 bcopy(&ucp->uc_mcontext.mc_rdi, regs, sizeof(*regs)); 417 418 PROC_LOCK(p); 419#if defined(COMPAT_43) 420 if (ucp->uc_mcontext.mc_onstack & 1) 421 td->td_sigstk.ss_flags |= SS_ONSTACK; 422 else 423 td->td_sigstk.ss_flags &= ~SS_ONSTACK; 424#endif 425 426 td->td_sigmask = ucp->uc_sigmask; 427 SIG_CANTMASK(td->td_sigmask); 428 signotify(td); 429 PROC_UNLOCK(p); 430 td->td_pcb->pcb_flags |= PCB_FULLCTX; 431 return (EJUSTRETURN); 432} 433 434#ifdef COMPAT_FREEBSD4 435int 436freebsd4_sigreturn(struct thread *td, struct freebsd4_sigreturn_args *uap) 437{ 438 439 return sigreturn(td, (struct sigreturn_args *)uap); 440} 441#endif 442 443 444/* 445 * Machine dependent boot() routine 446 * 447 * I haven't seen anything to put here yet 448 * Possibly some stuff might be grafted back here from boot() 449 */ 450void 451cpu_boot(int howto) 452{ 453} 454 455/* Get current clock frequency for the given cpu id. */ 456int 457cpu_est_clockrate(int cpu_id, uint64_t *rate) 458{ 459 register_t reg; 460 uint64_t tsc1, tsc2; 461 462 if (pcpu_find(cpu_id) == NULL || rate == NULL) 463 return (EINVAL); 464 465 /* If we're booting, trust the rate calibrated moments ago. */ 466 if (cold) { 467 *rate = tsc_freq; 468 return (0); 469 } 470 471#ifdef SMP 472 /* Schedule ourselves on the indicated cpu. */ 473 mtx_lock_spin(&sched_lock); 474 sched_bind(curthread, cpu_id); 475 mtx_unlock_spin(&sched_lock); 476#endif 477 478 /* Calibrate by measuring a short delay. */ 479 reg = intr_disable(); 480 tsc1 = rdtsc(); 481 DELAY(1000); 482 tsc2 = rdtsc(); 483 intr_restore(reg); 484 485#ifdef SMP 486 mtx_lock_spin(&sched_lock); 487 sched_unbind(curthread); 488 mtx_unlock_spin(&sched_lock); 489#endif 490 491 /* 492 * Calculate the difference in readings, convert to Mhz, and 493 * subtract 0.5% of the total. Empirical testing has shown that 494 * overhead in DELAY() works out to approximately this value. 495 */ 496 tsc2 -= tsc1; 497 *rate = tsc2 * 1000 - tsc2 * 5; 498 return (0); 499} 500 501/* 502 * Shutdown the CPU as much as possible 503 */ 504void 505cpu_halt(void) 506{ 507 for (;;) 508 __asm__ ("hlt"); 509} 510 511/* 512 * Hook to idle the CPU when possible. In the SMP case we default to 513 * off because a halted cpu will not currently pick up a new thread in the 514 * run queue until the next timer tick. If turned on this will result in 515 * approximately a 4.2% loss in real time performance in buildworld tests 516 * (but improves user and sys times oddly enough), and saves approximately 517 * 5% in power consumption on an idle machine (tests w/2xCPU 1.1GHz P3). 518 * 519 * XXX we need to have a cpu mask of idle cpus and generate an IPI or 520 * otherwise generate some sort of interrupt to wake up cpus sitting in HLT. 521 * Then we can have our cake and eat it too. 522 * 523 * XXX I'm turning it on for SMP as well by default for now. It seems to 524 * help lock contention somewhat, and this is critical for HTT. -Peter 525 */ 526static int cpu_idle_hlt = 1; 527SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, 528 &cpu_idle_hlt, 0, "Idle loop HLT enable"); 529 530static void 531cpu_idle_default(void) 532{ 533 /* 534 * we must absolutely guarentee that hlt is the 535 * absolute next instruction after sti or we 536 * introduce a timing window. 537 */ 538 __asm __volatile("sti; hlt"); 539} 540 541/* 542 * Note that we have to be careful here to avoid a race between checking 543 * sched_runnable() and actually halting. If we don't do this, we may waste 544 * the time between calling hlt and the next interrupt even though there 545 * is a runnable process. 546 */ 547void 548cpu_idle(void) 549{ 550 551#ifdef SMP 552 if (mp_grab_cpu_hlt()) 553 return; 554#endif 555 if (cpu_idle_hlt) { 556 disable_intr(); 557 if (sched_runnable()) 558 enable_intr(); 559 else 560 (*cpu_idle_hook)(); 561 } 562} 563 564/* Other subsystems (e.g., ACPI) can hook this later. */ 565void (*cpu_idle_hook)(void) = cpu_idle_default; 566 567/* 568 * Clear registers on exec 569 */ 570void 571exec_setregs(td, entry, stack, ps_strings) 572 struct thread *td; 573 u_long entry; 574 u_long stack; 575 u_long ps_strings; 576{ 577 struct trapframe *regs = td->td_frame; 578 struct pcb *pcb = td->td_pcb; 579 580 wrmsr(MSR_FSBASE, 0); 581 wrmsr(MSR_KGSBASE, 0); /* User value while we're in the kernel */ 582 pcb->pcb_fsbase = 0; 583 pcb->pcb_gsbase = 0; 584 load_ds(_udatasel); 585 load_es(_udatasel); 586 load_fs(_udatasel); 587 load_gs(_udatasel); 588 pcb->pcb_ds = _udatasel; 589 pcb->pcb_es = _udatasel; 590 pcb->pcb_fs = _udatasel; 591 pcb->pcb_gs = _udatasel; 592 593 bzero((char *)regs, sizeof(struct trapframe)); 594 regs->tf_rip = entry; 595 regs->tf_rsp = ((stack - 8) & ~0xFul) + 8; 596 regs->tf_rdi = stack; /* argv */ 597 regs->tf_rflags = PSL_USER | (regs->tf_rflags & PSL_T); 598 regs->tf_ss = _udatasel; 599 regs->tf_cs = _ucodesel; 600 601 /* 602 * Reset the hardware debug registers if they were in use. 603 * They won't have any meaning for the newly exec'd process. 604 */ 605 if (pcb->pcb_flags & PCB_DBREGS) { 606 pcb->pcb_dr0 = 0; 607 pcb->pcb_dr1 = 0; 608 pcb->pcb_dr2 = 0; 609 pcb->pcb_dr3 = 0; 610 pcb->pcb_dr6 = 0; 611 pcb->pcb_dr7 = 0; 612 if (pcb == PCPU_GET(curpcb)) { 613 /* 614 * Clear the debug registers on the running 615 * CPU, otherwise they will end up affecting 616 * the next process we switch to. 617 */ 618 reset_dbregs(); 619 } 620 pcb->pcb_flags &= ~PCB_DBREGS; 621 } 622 623 /* 624 * Drop the FP state if we hold it, so that the process gets a 625 * clean FP state if it uses the FPU again. 626 */ 627 fpstate_drop(td); 628} 629 630void 631cpu_setregs(void) 632{ 633 register_t cr0; 634 635 cr0 = rcr0(); 636 /* 637 * CR0_MP, CR0_NE and CR0_TS are also set by npx_probe() for the 638 * BSP. See the comments there about why we set them. 639 */ 640 cr0 |= CR0_MP | CR0_NE | CR0_TS | CR0_WP | CR0_AM; 641 load_cr0(cr0); 642} 643 644static int 645sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS) 646{ 647 int error; 648 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 649 req); 650 if (!error && req->newptr) 651 resettodr(); 652 return (error); 653} 654 655SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 656 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 657 658SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, 659 CTLFLAG_RW, &disable_rtc_set, 0, ""); 660 661SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, 662 CTLFLAG_RW, &wall_cmos_clock, 0, ""); 663 664/* 665 * Initialize 386 and configure to run kernel 666 */ 667 668/* 669 * Initialize segments & interrupt table 670 */ 671 672struct user_segment_descriptor gdt[NGDT * MAXCPU];/* global descriptor table */ 673static struct gate_descriptor idt0[NIDT]; 674struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 675 676static char dblfault_stack[PAGE_SIZE] __aligned(16); 677 678struct amd64tss common_tss[MAXCPU]; 679 680/* software prototypes -- in more palatable form */ 681struct soft_segment_descriptor gdt_segs[] = { 682/* GNULL_SEL 0 Null Descriptor */ 683{ 0x0, /* segment base address */ 684 0x0, /* length */ 685 0, /* segment type */ 686 0, /* segment descriptor priority level */ 687 0, /* segment descriptor present */ 688 0, /* long */ 689 0, /* default 32 vs 16 bit size */ 690 0 /* limit granularity (byte/page units)*/ }, 691/* GCODE_SEL 1 Code Descriptor for kernel */ 692{ 0x0, /* segment base address */ 693 0xfffff, /* length - all address space */ 694 SDT_MEMERA, /* segment type */ 695 SEL_KPL, /* segment descriptor priority level */ 696 1, /* segment descriptor present */ 697 1, /* long */ 698 0, /* default 32 vs 16 bit size */ 699 1 /* limit granularity (byte/page units)*/ }, 700/* GDATA_SEL 2 Data Descriptor for kernel */ 701{ 0x0, /* segment base address */ 702 0xfffff, /* length - all address space */ 703 SDT_MEMRWA, /* segment type */ 704 SEL_KPL, /* segment descriptor priority level */ 705 1, /* segment descriptor present */ 706 1, /* long */ 707 0, /* default 32 vs 16 bit size */ 708 1 /* limit granularity (byte/page units)*/ }, 709/* GUCODE32_SEL 3 32 bit Code Descriptor for user */ 710{ 0x0, /* segment base address */ 711 0xfffff, /* length - all address space */ 712 SDT_MEMERA, /* segment type */ 713 SEL_UPL, /* segment descriptor priority level */ 714 1, /* segment descriptor present */ 715 0, /* long */ 716 1, /* default 32 vs 16 bit size */ 717 1 /* limit granularity (byte/page units)*/ }, 718/* GUDATA_SEL 4 32/64 bit Data Descriptor for user */ 719{ 0x0, /* segment base address */ 720 0xfffff, /* length - all address space */ 721 SDT_MEMRWA, /* segment type */ 722 SEL_UPL, /* segment descriptor priority level */ 723 1, /* segment descriptor present */ 724 0, /* long */ 725 1, /* default 32 vs 16 bit size */ 726 1 /* limit granularity (byte/page units)*/ }, 727/* GUCODE_SEL 5 64 bit Code Descriptor for user */ 728{ 0x0, /* segment base address */ 729 0xfffff, /* length - all address space */ 730 SDT_MEMERA, /* segment type */ 731 SEL_UPL, /* segment descriptor priority level */ 732 1, /* segment descriptor present */ 733 1, /* long */ 734 0, /* default 32 vs 16 bit size */ 735 1 /* limit granularity (byte/page units)*/ }, 736/* GPROC0_SEL 6 Proc 0 Tss Descriptor */ 737{ 738 0x0, /* segment base address */ 739 sizeof(struct amd64tss)-1,/* length - all address space */ 740 SDT_SYSTSS, /* segment type */ 741 SEL_KPL, /* segment descriptor priority level */ 742 1, /* segment descriptor present */ 743 0, /* long */ 744 0, /* unused - default 32 vs 16 bit size */ 745 0 /* limit granularity (byte/page units)*/ }, 746/* Actually, the TSS is a system descriptor which is double size */ 747{ 0x0, /* segment base address */ 748 0x0, /* length */ 749 0, /* segment type */ 750 0, /* segment descriptor priority level */ 751 0, /* segment descriptor present */ 752 0, /* long */ 753 0, /* default 32 vs 16 bit size */ 754 0 /* limit granularity (byte/page units)*/ }, 755}; 756 757void 758setidt(idx, func, typ, dpl, ist) 759 int idx; 760 inthand_t *func; 761 int typ; 762 int dpl; 763 int ist; 764{ 765 struct gate_descriptor *ip; 766 767 ip = idt + idx; 768 ip->gd_looffset = (uintptr_t)func; 769 ip->gd_selector = GSEL(GCODE_SEL, SEL_KPL); 770 ip->gd_ist = ist; 771 ip->gd_xx = 0; 772 ip->gd_type = typ; 773 ip->gd_dpl = dpl; 774 ip->gd_p = 1; 775 ip->gd_hioffset = ((uintptr_t)func)>>16 ; 776} 777 778#define IDTVEC(name) __CONCAT(X,name) 779 780extern inthand_t 781 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 782 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 783 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 784 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 785 IDTVEC(xmm), IDTVEC(dblfault), 786 IDTVEC(fast_syscall), IDTVEC(fast_syscall32); 787 788void 789sdtossd(sd, ssd) 790 struct user_segment_descriptor *sd; 791 struct soft_segment_descriptor *ssd; 792{ 793 794 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 795 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 796 ssd->ssd_type = sd->sd_type; 797 ssd->ssd_dpl = sd->sd_dpl; 798 ssd->ssd_p = sd->sd_p; 799 ssd->ssd_long = sd->sd_long; 800 ssd->ssd_def32 = sd->sd_def32; 801 ssd->ssd_gran = sd->sd_gran; 802} 803 804void 805ssdtosd(ssd, sd) 806 struct soft_segment_descriptor *ssd; 807 struct user_segment_descriptor *sd; 808{ 809 810 sd->sd_lobase = (ssd->ssd_base) & 0xffffff; 811 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xff; 812 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff; 813 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf; 814 sd->sd_type = ssd->ssd_type; 815 sd->sd_dpl = ssd->ssd_dpl; 816 sd->sd_p = ssd->ssd_p; 817 sd->sd_long = ssd->ssd_long; 818 sd->sd_def32 = ssd->ssd_def32; 819 sd->sd_gran = ssd->ssd_gran; 820} 821 822void 823ssdtosyssd(ssd, sd) 824 struct soft_segment_descriptor *ssd; 825 struct system_segment_descriptor *sd; 826{ 827 828 sd->sd_lobase = (ssd->ssd_base) & 0xffffff; 829 sd->sd_hibase = (ssd->ssd_base >> 24) & 0xfffffffffful; 830 sd->sd_lolimit = (ssd->ssd_limit) & 0xffff; 831 sd->sd_hilimit = (ssd->ssd_limit >> 16) & 0xf; 832 sd->sd_type = ssd->ssd_type; 833 sd->sd_dpl = ssd->ssd_dpl; 834 sd->sd_p = ssd->ssd_p; 835 sd->sd_gran = ssd->ssd_gran; 836} 837 838#if !defined(DEV_ATPIC) && defined(DEV_ISA) 839#include <isa/isavar.h> 840u_int 841isa_irq_pending(void) 842{ 843 844 return (0); 845} 846#endif 847 848#define PHYSMAP_SIZE (2 * 8) 849 850u_int basemem; 851 852/* 853 * Populate the (physmap) array with base/bound pairs describing the 854 * available physical memory in the system, then test this memory and 855 * build the phys_avail array describing the actually-available memory. 856 * 857 * If we cannot accurately determine the physical memory map, then use 858 * value from the 0xE801 call, and failing that, the RTC. 859 * 860 * Total memory size may be set by the kernel environment variable 861 * hw.physmem or the compile-time define MAXMEM. 862 * 863 * XXX first should be vm_paddr_t. 864 */ 865static void 866getmemsize(caddr_t kmdp, u_int64_t first) 867{ 868 int i, physmap_idx, pa_indx; 869 vm_paddr_t pa, physmap[PHYSMAP_SIZE]; 870 pt_entry_t *pte; 871 char *cp; 872 struct bios_smap *smapbase, *smap, *smapend; 873 u_int32_t smapsize; 874 quad_t dcons_addr, dcons_size; 875 876 bzero(physmap, sizeof(physmap)); 877 basemem = 0; 878 physmap_idx = 0; 879 880 /* 881 * get memory map from INT 15:E820, kindly supplied by the loader. 882 * 883 * subr_module.c says: 884 * "Consumer may safely assume that size value precedes data." 885 * ie: an int32_t immediately precedes smap. 886 */ 887 smapbase = (struct bios_smap *)preload_search_info(kmdp, 888 MODINFO_METADATA | MODINFOMD_SMAP); 889 if (smapbase == NULL) 890 panic("No BIOS smap info from loader!"); 891 892 smapsize = *((u_int32_t *)smapbase - 1); 893 smapend = (struct bios_smap *)((uintptr_t)smapbase + smapsize); 894 895 for (smap = smapbase; smap < smapend; smap++) { 896 if (boothowto & RB_VERBOSE) 897 printf("SMAP type=%02x base=%016lx len=%016lx\n", 898 smap->type, smap->base, smap->length); 899 900 if (smap->type != 0x01) 901 continue; 902 903 if (smap->length == 0) 904 continue; 905 906 for (i = 0; i <= physmap_idx; i += 2) { 907 if (smap->base < physmap[i + 1]) { 908 if (boothowto & RB_VERBOSE) 909 printf( 910 "Overlapping or non-montonic memory region, ignoring second region\n"); 911 goto next_run; 912 } 913 } 914 915 if (smap->base == physmap[physmap_idx + 1]) { 916 physmap[physmap_idx + 1] += smap->length; 917next_run: 918 continue; 919 } 920 921 physmap_idx += 2; 922 if (physmap_idx == PHYSMAP_SIZE) { 923 printf( 924 "Too many segments in the physical address map, giving up\n"); 925 break; 926 } 927 physmap[physmap_idx] = smap->base; 928 physmap[physmap_idx + 1] = smap->base + smap->length; 929 } 930 931 /* 932 * Find the 'base memory' segment for SMP 933 */ 934 basemem = 0; 935 for (i = 0; i <= physmap_idx; i += 2) { 936 if (physmap[i] == 0x00000000) { 937 basemem = physmap[i + 1] / 1024; 938 break; 939 } 940 } 941 if (basemem == 0) 942 panic("BIOS smap did not include a basemem segment!"); 943 944#ifdef SMP 945 /* make hole for AP bootstrap code */ 946 physmap[1] = mp_bootaddress(physmap[1] / 1024); 947#endif 948 949 /* 950 * Maxmem isn't the "maximum memory", it's one larger than the 951 * highest page of the physical address space. It should be 952 * called something like "Maxphyspage". We may adjust this 953 * based on ``hw.physmem'' and the results of the memory test. 954 */ 955 Maxmem = atop(physmap[physmap_idx + 1]); 956 957#ifdef MAXMEM 958 Maxmem = MAXMEM / 4; 959#endif 960 961 /* 962 * hw.physmem is a size in bytes; we also allow k, m, and g suffixes 963 * for the appropriate modifiers. This overrides MAXMEM. 964 */ 965 cp = getenv("hw.physmem"); 966 if (cp != NULL) { 967 u_int64_t AllowMem, sanity; 968 char *ep; 969 970 sanity = AllowMem = strtouq(cp, &ep, 0); 971 if ((ep != cp) && (*ep != 0)) { 972 switch(*ep) { 973 case 'g': 974 case 'G': 975 AllowMem <<= 10; 976 case 'm': 977 case 'M': 978 AllowMem <<= 10; 979 case 'k': 980 case 'K': 981 AllowMem <<= 10; 982 break; 983 default: 984 AllowMem = sanity = 0; 985 } 986 if (AllowMem < sanity) 987 AllowMem = 0; 988 } 989 if (AllowMem == 0) 990 printf("Ignoring invalid memory size of '%s'\n", cp); 991 else 992 Maxmem = atop(AllowMem); 993 freeenv(cp); 994 } 995 996 if (atop(physmap[physmap_idx + 1]) != Maxmem && 997 (boothowto & RB_VERBOSE)) 998 printf("Physical memory use set to %ldK\n", Maxmem * 4); 999 1000 /* 1001 * If Maxmem has been increased beyond what the system has detected, 1002 * extend the last memory segment to the new limit. 1003 */ 1004 if (atop(physmap[physmap_idx + 1]) < Maxmem) 1005 physmap[physmap_idx + 1] = ptoa((vm_paddr_t)Maxmem); 1006 1007 /* call pmap initialization to make new kernel address space */ 1008 pmap_bootstrap(&first); 1009 1010 /* 1011 * Size up each available chunk of physical memory. 1012 */ 1013 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 1014 pa_indx = 0; 1015 phys_avail[pa_indx++] = physmap[0]; 1016 phys_avail[pa_indx] = physmap[0]; 1017 pte = CMAP1; 1018 1019 /* 1020 * Get dcons buffer address 1021 */ 1022 if (getenv_quad("dcons.addr", &dcons_addr) == 0 || 1023 getenv_quad("dcons.size", &dcons_size) == 0) 1024 dcons_addr = 0; 1025 1026 /* 1027 * physmap is in bytes, so when converting to page boundaries, 1028 * round up the start address and round down the end address. 1029 */ 1030 for (i = 0; i <= physmap_idx; i += 2) { 1031 vm_paddr_t end; 1032 1033 end = ptoa((vm_paddr_t)Maxmem); 1034 if (physmap[i + 1] < end) 1035 end = trunc_page(physmap[i + 1]); 1036 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 1037 int tmp, page_bad; 1038 int *ptr = (int *)CADDR1; 1039 1040 /* 1041 * block out kernel memory as not available. 1042 */ 1043 if (pa >= 0x100000 && pa < first) 1044 continue; 1045 1046 /* 1047 * block out dcons buffer 1048 */ 1049 if (dcons_addr > 0 1050 && pa >= trunc_page(dcons_addr) 1051 && pa < dcons_addr + dcons_size) 1052 continue; 1053 1054 page_bad = FALSE; 1055 1056 /* 1057 * map page into kernel: valid, read/write,non-cacheable 1058 */ 1059 *pte = pa | PG_V | PG_RW | PG_N; 1060 invltlb(); 1061 1062 tmp = *(int *)ptr; 1063 /* 1064 * Test for alternating 1's and 0's 1065 */ 1066 *(volatile int *)ptr = 0xaaaaaaaa; 1067 if (*(volatile int *)ptr != 0xaaaaaaaa) 1068 page_bad = TRUE; 1069 /* 1070 * Test for alternating 0's and 1's 1071 */ 1072 *(volatile int *)ptr = 0x55555555; 1073 if (*(volatile int *)ptr != 0x55555555) 1074 page_bad = TRUE; 1075 /* 1076 * Test for all 1's 1077 */ 1078 *(volatile int *)ptr = 0xffffffff; 1079 if (*(volatile int *)ptr != 0xffffffff) 1080 page_bad = TRUE; 1081 /* 1082 * Test for all 0's 1083 */ 1084 *(volatile int *)ptr = 0x0; 1085 if (*(volatile int *)ptr != 0x0) 1086 page_bad = TRUE; 1087 /* 1088 * Restore original value. 1089 */ 1090 *(int *)ptr = tmp; 1091 1092 /* 1093 * Adjust array of valid/good pages. 1094 */ 1095 if (page_bad == TRUE) 1096 continue; 1097 /* 1098 * If this good page is a continuation of the 1099 * previous set of good pages, then just increase 1100 * the end pointer. Otherwise start a new chunk. 1101 * Note that "end" points one higher than end, 1102 * making the range >= start and < end. 1103 * If we're also doing a speculative memory 1104 * test and we at or past the end, bump up Maxmem 1105 * so that we keep going. The first bad page 1106 * will terminate the loop. 1107 */ 1108 if (phys_avail[pa_indx] == pa) { 1109 phys_avail[pa_indx] += PAGE_SIZE; 1110 } else { 1111 pa_indx++; 1112 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1113 printf( 1114 "Too many holes in the physical address space, giving up\n"); 1115 pa_indx--; 1116 break; 1117 } 1118 phys_avail[pa_indx++] = pa; /* start */ 1119 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1120 } 1121 physmem++; 1122 } 1123 } 1124 *pte = 0; 1125 invltlb(); 1126 1127 /* 1128 * XXX 1129 * The last chunk must contain at least one page plus the message 1130 * buffer to avoid complicating other code (message buffer address 1131 * calculation, etc.). 1132 */ 1133 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1134 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { 1135 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1136 phys_avail[pa_indx--] = 0; 1137 phys_avail[pa_indx--] = 0; 1138 } 1139 1140 Maxmem = atop(phys_avail[pa_indx]); 1141 1142 /* Trim off space for the message buffer. */ 1143 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); 1144 1145 avail_end = phys_avail[pa_indx]; 1146} 1147 1148u_int64_t 1149hammer_time(u_int64_t modulep, u_int64_t physfree) 1150{ 1151 caddr_t kmdp; 1152 int gsel_tss, off, x; 1153 struct pcpu *pc; 1154 u_int64_t msr; 1155 char *env; 1156 1157#ifdef DEV_ISA 1158 /* Preemptively mask the atpics and leave them shut down */ 1159 outb(IO_ICU1 + ICU_IMR_OFFSET, 0xff); 1160 outb(IO_ICU2 + ICU_IMR_OFFSET, 0xff); 1161#else 1162#error "have you forgotten the isa device?"; 1163#endif 1164 1165 thread0.td_kstack = physfree + KERNBASE; 1166 bzero((void *)thread0.td_kstack, KSTACK_PAGES * PAGE_SIZE); 1167 physfree += KSTACK_PAGES * PAGE_SIZE; 1168 thread0.td_pcb = (struct pcb *) 1169 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; 1170 1171 /* 1172 * This may be done better later if it gets more high level 1173 * components in it. If so just link td->td_proc here. 1174 */ 1175 proc_linkup(&proc0, &ksegrp0, &thread0); 1176 1177 preload_metadata = (caddr_t)(uintptr_t)(modulep + KERNBASE); 1178 preload_bootstrap_relocate(KERNBASE); 1179 kmdp = preload_search_by_type("elf kernel"); 1180 if (kmdp == NULL) 1181 kmdp = preload_search_by_type("elf64 kernel"); 1182 boothowto = MD_FETCH(kmdp, MODINFOMD_HOWTO, int); 1183 kern_envp = MD_FETCH(kmdp, MODINFOMD_ENVP, char *) + KERNBASE; 1184#ifdef DDB 1185 ksym_start = MD_FETCH(kmdp, MODINFOMD_SSYM, uintptr_t); 1186 ksym_end = MD_FETCH(kmdp, MODINFOMD_ESYM, uintptr_t); 1187#endif 1188 1189 /* Init basic tunables, hz etc */ 1190 init_param1(); 1191 1192 /* 1193 * make gdt memory segments 1194 */ 1195 gdt_segs[GPROC0_SEL].ssd_base = (uintptr_t)&common_tss[0]; 1196 1197 for (x = 0; x < NGDT; x++) { 1198 if (x != GPROC0_SEL && x != (GPROC0_SEL + 1)) 1199 ssdtosd(&gdt_segs[x], &gdt[x]); 1200 } 1201 ssdtosyssd(&gdt_segs[GPROC0_SEL], 1202 (struct system_segment_descriptor *)&gdt[GPROC0_SEL]); 1203 1204 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1205 r_gdt.rd_base = (long) gdt; 1206 lgdt(&r_gdt); 1207 pc = &__pcpu[0]; 1208 1209 wrmsr(MSR_FSBASE, 0); /* User value */ 1210 wrmsr(MSR_GSBASE, (u_int64_t)pc); 1211 wrmsr(MSR_KGSBASE, 0); /* User value while in the kernel */ 1212 1213 pcpu_init(pc, 0, sizeof(struct pcpu)); 1214 PCPU_SET(prvspace, pc); 1215 PCPU_SET(curthread, &thread0); 1216 PCPU_SET(curpcb, thread0.td_pcb); 1217 PCPU_SET(tssp, &common_tss[0]); 1218 1219 /* 1220 * Initialize mutexes. 1221 * 1222 * icu_lock: in order to allow an interrupt to occur in a critical 1223 * section, to set pcpu->ipending (etc...) properly, we 1224 * must be able to get the icu lock, so it can't be 1225 * under witness. 1226 */ 1227 mutex_init(); 1228 mtx_init(&clock_lock, "clk", NULL, MTX_SPIN); 1229 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS); 1230 1231 /* exceptions */ 1232 for (x = 0; x < NIDT; x++) 1233 setidt(x, &IDTVEC(rsvd), SDT_SYSIGT, SEL_KPL, 0); 1234 setidt(IDT_DE, &IDTVEC(div), SDT_SYSIGT, SEL_KPL, 0); 1235 setidt(IDT_DB, &IDTVEC(dbg), SDT_SYSIGT, SEL_KPL, 0); 1236 setidt(IDT_NMI, &IDTVEC(nmi), SDT_SYSIGT, SEL_KPL, 0); 1237 setidt(IDT_BP, &IDTVEC(bpt), SDT_SYSIGT, SEL_UPL, 0); 1238 setidt(IDT_OF, &IDTVEC(ofl), SDT_SYSIGT, SEL_KPL, 0); 1239 setidt(IDT_BR, &IDTVEC(bnd), SDT_SYSIGT, SEL_KPL, 0); 1240 setidt(IDT_UD, &IDTVEC(ill), SDT_SYSIGT, SEL_KPL, 0); 1241 setidt(IDT_NM, &IDTVEC(dna), SDT_SYSIGT, SEL_KPL, 0); 1242 setidt(IDT_DF, &IDTVEC(dblfault), SDT_SYSIGT, SEL_KPL, 1); 1243 setidt(IDT_FPUGP, &IDTVEC(fpusegm), SDT_SYSIGT, SEL_KPL, 0); 1244 setidt(IDT_TS, &IDTVEC(tss), SDT_SYSIGT, SEL_KPL, 0); 1245 setidt(IDT_NP, &IDTVEC(missing), SDT_SYSIGT, SEL_KPL, 0); 1246 setidt(IDT_SS, &IDTVEC(stk), SDT_SYSIGT, SEL_KPL, 0); 1247 setidt(IDT_GP, &IDTVEC(prot), SDT_SYSIGT, SEL_KPL, 0); 1248 setidt(IDT_PF, &IDTVEC(page), SDT_SYSIGT, SEL_KPL, 0); 1249 setidt(IDT_MF, &IDTVEC(fpu), SDT_SYSIGT, SEL_KPL, 0); 1250 setidt(IDT_AC, &IDTVEC(align), SDT_SYSIGT, SEL_KPL, 0); 1251 setidt(IDT_MC, &IDTVEC(mchk), SDT_SYSIGT, SEL_KPL, 0); 1252 setidt(IDT_XF, &IDTVEC(xmm), SDT_SYSIGT, SEL_KPL, 0); 1253 1254 r_idt.rd_limit = sizeof(idt0) - 1; 1255 r_idt.rd_base = (long) idt; 1256 lidt(&r_idt); 1257 1258 /* 1259 * Initialize the console before we print anything out. 1260 */ 1261 cninit(); 1262 1263#ifdef DEV_ATPIC 1264 elcr_probe(); 1265 atpic_startup(); 1266#endif 1267 1268 kdb_init(); 1269 1270#ifdef KDB 1271 if (boothowto & RB_KDB) 1272 kdb_enter("Boot flags requested debugger"); 1273#endif 1274 1275 identify_cpu(); /* Final stage of CPU initialization */ 1276 initializecpu(); /* Initialize CPU registers */ 1277 1278 /* make an initial tss so cpu can get interrupt stack on syscall! */ 1279 common_tss[0].tss_rsp0 = thread0.td_kstack + \ 1280 KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb); 1281 /* Ensure the stack is aligned to 16 bytes */ 1282 common_tss[0].tss_rsp0 &= ~0xFul; 1283 PCPU_SET(rsp0, common_tss[0].tss_rsp0); 1284 1285 /* doublefault stack space, runs on ist1 */ 1286 common_tss[0].tss_ist1 = (long)&dblfault_stack[sizeof(dblfault_stack)]; 1287 1288 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1289 ltr(gsel_tss); 1290 1291 /* Set up the fast syscall stuff */ 1292 msr = rdmsr(MSR_EFER) | EFER_SCE; 1293 wrmsr(MSR_EFER, msr); 1294 wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall)); 1295 wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32)); 1296 msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) | 1297 ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48); 1298 wrmsr(MSR_STAR, msr); 1299 wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D); 1300 1301 getmemsize(kmdp, physfree); 1302 init_param2(physmem); 1303 1304 /* now running on new page tables, configured,and u/iom is accessible */ 1305 1306 /* Map the message buffer. */ 1307 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) 1308 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off); 1309 1310 msgbufinit(msgbufp, MSGBUF_SIZE); 1311 fpuinit(); 1312 1313 /* transfer to user mode */ 1314 1315 _ucodesel = GSEL(GUCODE_SEL, SEL_UPL); 1316 _udatasel = GSEL(GUDATA_SEL, SEL_UPL); 1317 _ucode32sel = GSEL(GUCODE32_SEL, SEL_UPL); 1318 1319 /* setup proc 0's pcb */ 1320 thread0.td_pcb->pcb_flags = 0; /* XXXKSE */ 1321 thread0.td_pcb->pcb_cr3 = KPML4phys; 1322 thread0.td_frame = &proc0_tf; 1323 1324 env = getenv("kernelname"); 1325 if (env != NULL) 1326 strlcpy(kernelname, env, sizeof(kernelname)); 1327 1328 /* Location of kernel stack for locore */ 1329 return ((u_int64_t)thread0.td_pcb); 1330} 1331 1332void 1333cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) 1334{ 1335 1336 pcpu->pc_acpi_id = 0xffffffff; 1337} 1338 1339/* 1340 * Construct a PCB from a trapframe. This is called from kdb_trap() where 1341 * we want to start a backtrace from the function that caused us to enter 1342 * the debugger. We have the context in the trapframe, but base the trace 1343 * on the PCB. The PCB doesn't have to be perfect, as long as it contains 1344 * enough for a backtrace. 1345 */ 1346void 1347makectx(struct trapframe *tf, struct pcb *pcb) 1348{ 1349 1350 pcb->pcb_r12 = tf->tf_r12; 1351 pcb->pcb_r13 = tf->tf_r13; 1352 pcb->pcb_r14 = tf->tf_r14; 1353 pcb->pcb_r15 = tf->tf_r15; 1354 pcb->pcb_rbp = tf->tf_rbp; 1355 pcb->pcb_rbx = tf->tf_rbx; 1356 pcb->pcb_rip = tf->tf_rip; 1357 pcb->pcb_rsp = (ISPL(tf->tf_cs)) ? tf->tf_rsp : (long)(tf + 1) - 8; 1358} 1359 1360int 1361ptrace_set_pc(struct thread *td, unsigned long addr) 1362{ 1363 td->td_frame->tf_rip = addr; 1364 return (0); 1365} 1366 1367int 1368ptrace_single_step(struct thread *td) 1369{ 1370 td->td_frame->tf_rflags |= PSL_T; 1371 return (0); 1372} 1373 1374int 1375ptrace_clear_single_step(struct thread *td) 1376{ 1377 td->td_frame->tf_rflags &= ~PSL_T; 1378 return (0); 1379} 1380 1381int 1382fill_regs(struct thread *td, struct reg *regs) 1383{ 1384 struct pcb *pcb; 1385 struct trapframe *tp; 1386 1387 tp = td->td_frame; 1388 regs->r_r15 = tp->tf_r15; 1389 regs->r_r14 = tp->tf_r14; 1390 regs->r_r13 = tp->tf_r13; 1391 regs->r_r12 = tp->tf_r12; 1392 regs->r_r11 = tp->tf_r11; 1393 regs->r_r10 = tp->tf_r10; 1394 regs->r_r9 = tp->tf_r9; 1395 regs->r_r8 = tp->tf_r8; 1396 regs->r_rdi = tp->tf_rdi; 1397 regs->r_rsi = tp->tf_rsi; 1398 regs->r_rbp = tp->tf_rbp; 1399 regs->r_rbx = tp->tf_rbx; 1400 regs->r_rdx = tp->tf_rdx; 1401 regs->r_rcx = tp->tf_rcx; 1402 regs->r_rax = tp->tf_rax; 1403 regs->r_rip = tp->tf_rip; 1404 regs->r_cs = tp->tf_cs; 1405 regs->r_rflags = tp->tf_rflags; 1406 regs->r_rsp = tp->tf_rsp; 1407 regs->r_ss = tp->tf_ss; 1408 pcb = td->td_pcb; 1409 return (0); 1410} 1411 1412int 1413set_regs(struct thread *td, struct reg *regs) 1414{ 1415 struct pcb *pcb; 1416 struct trapframe *tp; 1417 register_t rflags; 1418 1419 tp = td->td_frame; 1420 rflags = regs->r_rflags & 0xffffffff; 1421 if (!EFL_SECURE(rflags, tp->tf_rflags) || !CS_SECURE(regs->r_cs)) 1422 return (EINVAL); 1423 tp->tf_r15 = regs->r_r15; 1424 tp->tf_r14 = regs->r_r14; 1425 tp->tf_r13 = regs->r_r13; 1426 tp->tf_r12 = regs->r_r12; 1427 tp->tf_r11 = regs->r_r11; 1428 tp->tf_r10 = regs->r_r10; 1429 tp->tf_r9 = regs->r_r9; 1430 tp->tf_r8 = regs->r_r8; 1431 tp->tf_rdi = regs->r_rdi; 1432 tp->tf_rsi = regs->r_rsi; 1433 tp->tf_rbp = regs->r_rbp; 1434 tp->tf_rbx = regs->r_rbx; 1435 tp->tf_rdx = regs->r_rdx; 1436 tp->tf_rcx = regs->r_rcx; 1437 tp->tf_rax = regs->r_rax; 1438 tp->tf_rip = regs->r_rip; 1439 tp->tf_cs = regs->r_cs; 1440 tp->tf_rflags = rflags; 1441 tp->tf_rsp = regs->r_rsp; 1442 tp->tf_ss = regs->r_ss; 1443 pcb = td->td_pcb; 1444 return (0); 1445} 1446 1447/* XXX check all this stuff! */ 1448/* externalize from sv_xmm */ 1449static void 1450fill_fpregs_xmm(struct savefpu *sv_xmm, struct fpreg *fpregs) 1451{ 1452 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env; 1453 struct envxmm *penv_xmm = &sv_xmm->sv_env; 1454 int i; 1455 1456 /* pcb -> fpregs */ 1457 bzero(fpregs, sizeof(*fpregs)); 1458 1459 /* FPU control/status */ 1460 penv_fpreg->en_cw = penv_xmm->en_cw; 1461 penv_fpreg->en_sw = penv_xmm->en_sw; 1462 penv_fpreg->en_tw = penv_xmm->en_tw; 1463 penv_fpreg->en_opcode = penv_xmm->en_opcode; 1464 penv_fpreg->en_rip = penv_xmm->en_rip; 1465 penv_fpreg->en_rdp = penv_xmm->en_rdp; 1466 penv_fpreg->en_mxcsr = penv_xmm->en_mxcsr; 1467 penv_fpreg->en_mxcsr_mask = penv_xmm->en_mxcsr_mask; 1468 1469 /* FPU registers */ 1470 for (i = 0; i < 8; ++i) 1471 bcopy(sv_xmm->sv_fp[i].fp_acc.fp_bytes, fpregs->fpr_acc[i], 10); 1472 1473 /* SSE registers */ 1474 for (i = 0; i < 16; ++i) 1475 bcopy(sv_xmm->sv_xmm[i].xmm_bytes, fpregs->fpr_xacc[i], 16); 1476} 1477 1478/* internalize from fpregs into sv_xmm */ 1479static void 1480set_fpregs_xmm(struct fpreg *fpregs, struct savefpu *sv_xmm) 1481{ 1482 struct envxmm *penv_xmm = &sv_xmm->sv_env; 1483 struct envxmm *penv_fpreg = (struct envxmm *)&fpregs->fpr_env; 1484 int i; 1485 1486 /* fpregs -> pcb */ 1487 /* FPU control/status */ 1488 penv_xmm->en_cw = penv_fpreg->en_cw; 1489 penv_xmm->en_sw = penv_fpreg->en_sw; 1490 penv_xmm->en_tw = penv_fpreg->en_tw; 1491 penv_xmm->en_opcode = penv_fpreg->en_opcode; 1492 penv_xmm->en_rip = penv_fpreg->en_rip; 1493 penv_xmm->en_rdp = penv_fpreg->en_rdp; 1494 penv_xmm->en_mxcsr = penv_fpreg->en_mxcsr; 1495 penv_xmm->en_mxcsr_mask = penv_fpreg->en_mxcsr_mask; 1496 1497 /* FPU registers */ 1498 for (i = 0; i < 8; ++i) 1499 bcopy(fpregs->fpr_acc[i], sv_xmm->sv_fp[i].fp_acc.fp_bytes, 10); 1500 1501 /* SSE registers */ 1502 for (i = 0; i < 16; ++i) 1503 bcopy(fpregs->fpr_xacc[i], sv_xmm->sv_xmm[i].xmm_bytes, 16); 1504} 1505 1506/* externalize from td->pcb */ 1507int 1508fill_fpregs(struct thread *td, struct fpreg *fpregs) 1509{ 1510 1511 fill_fpregs_xmm(&td->td_pcb->pcb_save, fpregs); 1512 return (0); 1513} 1514 1515/* internalize to td->pcb */ 1516int 1517set_fpregs(struct thread *td, struct fpreg *fpregs) 1518{ 1519 1520 set_fpregs_xmm(fpregs, &td->td_pcb->pcb_save); 1521 return (0); 1522} 1523 1524/* 1525 * Get machine context. 1526 */ 1527int 1528get_mcontext(struct thread *td, mcontext_t *mcp, int flags) 1529{ 1530 struct trapframe *tp; 1531 1532 tp = td->td_frame; 1533 PROC_LOCK(curthread->td_proc); 1534 mcp->mc_onstack = sigonstack(tp->tf_rsp); 1535 PROC_UNLOCK(curthread->td_proc); 1536 mcp->mc_r15 = tp->tf_r15; 1537 mcp->mc_r14 = tp->tf_r14; 1538 mcp->mc_r13 = tp->tf_r13; 1539 mcp->mc_r12 = tp->tf_r12; 1540 mcp->mc_r11 = tp->tf_r11; 1541 mcp->mc_r10 = tp->tf_r10; 1542 mcp->mc_r9 = tp->tf_r9; 1543 mcp->mc_r8 = tp->tf_r8; 1544 mcp->mc_rdi = tp->tf_rdi; 1545 mcp->mc_rsi = tp->tf_rsi; 1546 mcp->mc_rbp = tp->tf_rbp; 1547 mcp->mc_rbx = tp->tf_rbx; 1548 mcp->mc_rcx = tp->tf_rcx; 1549 if (flags & GET_MC_CLEAR_RET) { 1550 mcp->mc_rax = 0; 1551 mcp->mc_rdx = 0; 1552 } else { 1553 mcp->mc_rax = tp->tf_rax; 1554 mcp->mc_rdx = tp->tf_rdx; 1555 } 1556 mcp->mc_rip = tp->tf_rip; 1557 mcp->mc_cs = tp->tf_cs; 1558 mcp->mc_rflags = tp->tf_rflags; 1559 mcp->mc_rsp = tp->tf_rsp; 1560 mcp->mc_ss = tp->tf_ss; 1561 mcp->mc_len = sizeof(*mcp); 1562 get_fpcontext(td, mcp); 1563 return (0); 1564} 1565 1566/* 1567 * Set machine context. 1568 * 1569 * However, we don't set any but the user modifiable flags, and we won't 1570 * touch the cs selector. 1571 */ 1572int 1573set_mcontext(struct thread *td, const mcontext_t *mcp) 1574{ 1575 struct trapframe *tp; 1576 long rflags; 1577 int ret; 1578 1579 tp = td->td_frame; 1580 if (mcp->mc_len != sizeof(*mcp)) 1581 return (EINVAL); 1582 rflags = (mcp->mc_rflags & PSL_USERCHANGE) | 1583 (tp->tf_rflags & ~PSL_USERCHANGE); 1584 ret = set_fpcontext(td, mcp); 1585 if (ret != 0) 1586 return (ret); 1587 tp->tf_r15 = mcp->mc_r15; 1588 tp->tf_r14 = mcp->mc_r14; 1589 tp->tf_r13 = mcp->mc_r13; 1590 tp->tf_r12 = mcp->mc_r12; 1591 tp->tf_r11 = mcp->mc_r11; 1592 tp->tf_r10 = mcp->mc_r10; 1593 tp->tf_r9 = mcp->mc_r9; 1594 tp->tf_r8 = mcp->mc_r8; 1595 tp->tf_rdi = mcp->mc_rdi; 1596 tp->tf_rsi = mcp->mc_rsi; 1597 tp->tf_rbp = mcp->mc_rbp; 1598 tp->tf_rbx = mcp->mc_rbx; 1599 tp->tf_rdx = mcp->mc_rdx; 1600 tp->tf_rcx = mcp->mc_rcx; 1601 tp->tf_rax = mcp->mc_rax; 1602 tp->tf_rip = mcp->mc_rip; 1603 tp->tf_rflags = rflags; 1604 tp->tf_rsp = mcp->mc_rsp; 1605 tp->tf_ss = mcp->mc_ss; 1606 td->td_pcb->pcb_flags |= PCB_FULLCTX; 1607 return (0); 1608} 1609 1610static void 1611get_fpcontext(struct thread *td, mcontext_t *mcp) 1612{ 1613 1614 mcp->mc_ownedfp = fpugetregs(td, (struct savefpu *)&mcp->mc_fpstate); 1615 mcp->mc_fpformat = fpuformat(); 1616} 1617 1618static int 1619set_fpcontext(struct thread *td, const mcontext_t *mcp) 1620{ 1621 1622 if (mcp->mc_fpformat == _MC_FPFMT_NODEV) 1623 return (0); 1624 else if (mcp->mc_fpformat != _MC_FPFMT_XMM) 1625 return (EINVAL); 1626 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) 1627 /* We don't care what state is left in the FPU or PCB. */ 1628 fpstate_drop(td); 1629 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU || 1630 mcp->mc_ownedfp == _MC_FPOWNED_PCB) { 1631 /* 1632 * XXX we violate the dubious requirement that fpusetregs() 1633 * be called with interrupts disabled. 1634 * XXX obsolete on trap-16 systems? 1635 */ 1636 fpusetregs(td, (struct savefpu *)&mcp->mc_fpstate); 1637 } else 1638 return (EINVAL); 1639 return (0); 1640} 1641 1642void 1643fpstate_drop(struct thread *td) 1644{ 1645 register_t s; 1646 1647 s = intr_disable(); 1648 if (PCPU_GET(fpcurthread) == td) 1649 fpudrop(); 1650 /* 1651 * XXX force a full drop of the fpu. The above only drops it if we 1652 * owned it. 1653 * 1654 * XXX I don't much like fpugetregs()'s semantics of doing a full 1655 * drop. Dropping only to the pcb matches fnsave's behaviour. 1656 * We only need to drop to !PCB_INITDONE in sendsig(). But 1657 * sendsig() is the only caller of fpugetregs()... perhaps we just 1658 * have too many layers. 1659 */ 1660 curthread->td_pcb->pcb_flags &= ~PCB_FPUINITDONE; 1661 intr_restore(s); 1662} 1663 1664int 1665fill_dbregs(struct thread *td, struct dbreg *dbregs) 1666{ 1667 struct pcb *pcb; 1668 1669 if (td == NULL) { 1670 dbregs->dr[0] = rdr0(); 1671 dbregs->dr[1] = rdr1(); 1672 dbregs->dr[2] = rdr2(); 1673 dbregs->dr[3] = rdr3(); 1674 dbregs->dr[6] = rdr6(); 1675 dbregs->dr[7] = rdr7(); 1676 } else { 1677 pcb = td->td_pcb; 1678 dbregs->dr[0] = pcb->pcb_dr0; 1679 dbregs->dr[1] = pcb->pcb_dr1; 1680 dbregs->dr[2] = pcb->pcb_dr2; 1681 dbregs->dr[3] = pcb->pcb_dr3; 1682 dbregs->dr[6] = pcb->pcb_dr6; 1683 dbregs->dr[7] = pcb->pcb_dr7; 1684 } 1685 dbregs->dr[4] = 0; 1686 dbregs->dr[5] = 0; 1687 dbregs->dr[8] = 0; 1688 dbregs->dr[9] = 0; 1689 dbregs->dr[10] = 0; 1690 dbregs->dr[11] = 0; 1691 dbregs->dr[12] = 0; 1692 dbregs->dr[13] = 0; 1693 dbregs->dr[14] = 0; 1694 dbregs->dr[15] = 0; 1695 return (0); 1696} 1697 1698int 1699set_dbregs(struct thread *td, struct dbreg *dbregs) 1700{ 1701 struct pcb *pcb; 1702 int i; 1703 u_int64_t mask1, mask2; 1704 1705 if (td == NULL) { 1706 load_dr0(dbregs->dr[0]); 1707 load_dr1(dbregs->dr[1]); 1708 load_dr2(dbregs->dr[2]); 1709 load_dr3(dbregs->dr[3]); 1710 load_dr6(dbregs->dr[6]); 1711 load_dr7(dbregs->dr[7]); 1712 } else { 1713 /* 1714 * Don't let an illegal value for dr7 get set. Specifically, 1715 * check for undefined settings. Setting these bit patterns 1716 * result in undefined behaviour and can lead to an unexpected 1717 * TRCTRAP or a general protection fault right here. 1718 */ 1719 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 8; 1720 i++, mask1 <<= 2, mask2 <<= 2) 1721 if ((dbregs->dr[7] & mask1) == mask2) 1722 return (EINVAL); 1723 1724 pcb = td->td_pcb; 1725 1726 /* 1727 * Don't let a process set a breakpoint that is not within the 1728 * process's address space. If a process could do this, it 1729 * could halt the system by setting a breakpoint in the kernel 1730 * (if ddb was enabled). Thus, we need to check to make sure 1731 * that no breakpoints are being enabled for addresses outside 1732 * process's address space, unless, perhaps, we were called by 1733 * uid 0. 1734 * 1735 * XXX - what about when the watched area of the user's 1736 * address space is written into from within the kernel 1737 * ... wouldn't that still cause a breakpoint to be generated 1738 * from within kernel mode? 1739 */ 1740 1741 if (suser(td) != 0) { 1742 if (dbregs->dr[7] & 0x3) { 1743 /* dr0 is enabled */ 1744 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS) 1745 return (EINVAL); 1746 } 1747 if (dbregs->dr[7] & 0x3<<2) { 1748 /* dr1 is enabled */ 1749 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS) 1750 return (EINVAL); 1751 } 1752 if (dbregs->dr[7] & 0x3<<4) { 1753 /* dr2 is enabled */ 1754 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS) 1755 return (EINVAL); 1756 } 1757 if (dbregs->dr[7] & 0x3<<6) { 1758 /* dr3 is enabled */ 1759 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS) 1760 return (EINVAL); 1761 } 1762 } 1763 1764 pcb->pcb_dr0 = dbregs->dr[0]; 1765 pcb->pcb_dr1 = dbregs->dr[1]; 1766 pcb->pcb_dr2 = dbregs->dr[2]; 1767 pcb->pcb_dr3 = dbregs->dr[3]; 1768 pcb->pcb_dr6 = dbregs->dr[6]; 1769 pcb->pcb_dr7 = dbregs->dr[7]; 1770 1771 pcb->pcb_flags |= PCB_DBREGS; 1772 } 1773 1774 return (0); 1775} 1776 1777void 1778reset_dbregs(void) 1779{ 1780 1781 load_dr7(0); /* Turn off the control bits first */ 1782 load_dr0(0); 1783 load_dr1(0); 1784 load_dr2(0); 1785 load_dr3(0); 1786 load_dr6(0); 1787} 1788 1789/* 1790 * Return > 0 if a hardware breakpoint has been hit, and the 1791 * breakpoint was in user space. Return 0, otherwise. 1792 */ 1793int 1794user_dbreg_trap(void) 1795{ 1796 u_int64_t dr7, dr6; /* debug registers dr6 and dr7 */ 1797 u_int64_t bp; /* breakpoint bits extracted from dr6 */ 1798 int nbp; /* number of breakpoints that triggered */ 1799 caddr_t addr[4]; /* breakpoint addresses */ 1800 int i; 1801 1802 dr7 = rdr7(); 1803 if ((dr7 & 0x000000ff) == 0) { 1804 /* 1805 * all GE and LE bits in the dr7 register are zero, 1806 * thus the trap couldn't have been caused by the 1807 * hardware debug registers 1808 */ 1809 return 0; 1810 } 1811 1812 nbp = 0; 1813 dr6 = rdr6(); 1814 bp = dr6 & 0x0000000f; 1815 1816 if (!bp) { 1817 /* 1818 * None of the breakpoint bits are set meaning this 1819 * trap was not caused by any of the debug registers 1820 */ 1821 return 0; 1822 } 1823 1824 /* 1825 * at least one of the breakpoints were hit, check to see 1826 * which ones and if any of them are user space addresses 1827 */ 1828 1829 if (bp & 0x01) { 1830 addr[nbp++] = (caddr_t)rdr0(); 1831 } 1832 if (bp & 0x02) { 1833 addr[nbp++] = (caddr_t)rdr1(); 1834 } 1835 if (bp & 0x04) { 1836 addr[nbp++] = (caddr_t)rdr2(); 1837 } 1838 if (bp & 0x08) { 1839 addr[nbp++] = (caddr_t)rdr3(); 1840 } 1841 1842 for (i=0; i<nbp; i++) { 1843 if (addr[i] < 1844 (caddr_t)VM_MAXUSER_ADDRESS) { 1845 /* 1846 * addr[i] is in user space 1847 */ 1848 return nbp; 1849 } 1850 } 1851 1852 /* 1853 * None of the breakpoints are in user space. 1854 */ 1855 return 0; 1856} 1857 1858#ifdef KDB 1859 1860/* 1861 * Provide inb() and outb() as functions. They are normally only 1862 * available as macros calling inlined functions, thus cannot be 1863 * called from the debugger. 1864 * 1865 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 1866 */ 1867 1868#undef inb 1869#undef outb 1870 1871/* silence compiler warnings */ 1872u_char inb(u_int); 1873void outb(u_int, u_char); 1874 1875u_char 1876inb(u_int port) 1877{ 1878 u_char data; 1879 /* 1880 * We use %%dx and not %1 here because i/o is done at %dx and not at 1881 * %edx, while gcc generates inferior code (movw instead of movl) 1882 * if we tell it to load (u_short) port. 1883 */ 1884 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 1885 return (data); 1886} 1887 1888void 1889outb(u_int port, u_char data) 1890{ 1891 u_char al; 1892 /* 1893 * Use an unnecessary assignment to help gcc's register allocator. 1894 * This make a large difference for gcc-1.40 and a tiny difference 1895 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 1896 * best results. gcc-2.6.0 can't handle this. 1897 */ 1898 al = data; 1899 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 1900} 1901 1902#endif /* KDB */ 1903