machdep.c revision 88322
1/*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 38 * $FreeBSD: head/sys/amd64/amd64/machdep.c 88322 2001-12-20 23:48:31Z jhb $ 39 */ 40 41#include "opt_atalk.h" 42#include "opt_compat.h" 43#include "opt_cpu.h" 44#include "opt_ddb.h" 45#include "opt_inet.h" 46#include "opt_ipx.h" 47#include "opt_isa.h" 48#include "opt_maxmem.h" 49#include "opt_msgbuf.h" 50#include "opt_npx.h" 51#include "opt_perfmon.h" 52#include "opt_kstack_pages.h" 53/* #include "opt_userconfig.h" */ 54 55#include <sys/param.h> 56#include <sys/systm.h> 57#include <sys/sysproto.h> 58#include <sys/signalvar.h> 59#include <sys/kernel.h> 60#include <sys/ktr.h> 61#include <sys/linker.h> 62#include <sys/lock.h> 63#include <sys/malloc.h> 64#include <sys/mutex.h> 65#include <sys/pcpu.h> 66#include <sys/proc.h> 67#include <sys/bio.h> 68#include <sys/buf.h> 69#include <sys/reboot.h> 70#include <sys/smp.h> 71#include <sys/callout.h> 72#include <sys/msgbuf.h> 73#include <sys/sysent.h> 74#include <sys/sysctl.h> 75#include <sys/vmmeter.h> 76#include <sys/bus.h> 77#include <sys/eventhandler.h> 78 79#include <vm/vm.h> 80#include <vm/vm_param.h> 81#include <sys/lock.h> 82#include <vm/vm_kern.h> 83#include <vm/vm_object.h> 84#include <vm/vm_page.h> 85#include <vm/vm_map.h> 86#include <vm/vm_pager.h> 87#include <vm/vm_extern.h> 88 89#include <sys/user.h> 90#include <sys/exec.h> 91#include <sys/cons.h> 92 93#include <ddb/ddb.h> 94 95#include <net/netisr.h> 96 97#include <machine/cpu.h> 98#include <machine/cputypes.h> 99#include <machine/reg.h> 100#include <machine/clock.h> 101#include <machine/specialreg.h> 102#include <machine/bootinfo.h> 103#include <machine/md_var.h> 104#include <machine/pc/bios.h> 105#include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */ 106#include <machine/proc.h> 107#ifdef PERFMON 108#include <machine/perfmon.h> 109#endif 110#ifdef SMP 111#include <machine/privatespace.h> 112#endif 113 114#include <i386/isa/icu.h> 115#include <i386/isa/intr_machdep.h> 116#include <isa/rtc.h> 117#include <machine/vm86.h> 118#include <sys/ptrace.h> 119#include <machine/sigframe.h> 120 121extern void init386 __P((int first)); 122extern void dblfault_handler __P((void)); 123 124extern void printcpuinfo(void); /* XXX header file */ 125extern void earlysetcpuclass(void); /* same header file */ 126extern void finishidentcpu(void); 127extern void panicifcpuunsupported(void); 128extern void initializecpu(void); 129 130#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 131#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 132 133static void cpu_startup __P((void *)); 134#ifdef CPU_ENABLE_SSE 135static void set_fpregs_xmm __P((struct save87 *, struct savexmm *)); 136static void fill_fpregs_xmm __P((struct savexmm *, struct save87 *)); 137#endif /* CPU_ENABLE_SSE */ 138SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) 139 140int _udatasel, _ucodesel; 141u_int atdevbase; 142 143#if defined(SWTCH_OPTIM_STATS) 144extern int swtch_optim_stats; 145SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, 146 CTLFLAG_RD, &swtch_optim_stats, 0, ""); 147SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, 148 CTLFLAG_RD, &tlb_flush_count, 0, ""); 149#endif 150 151#ifdef PC98 152static int ispc98 = 1; 153#else 154static int ispc98 = 0; 155#endif 156SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, ""); 157 158int physmem = 0; 159int cold = 1; 160 161#ifdef COMPAT_43 162static void osendsig __P((sig_t catcher, int sig, sigset_t *mask, u_long code)); 163#endif 164 165static int 166sysctl_hw_physmem(SYSCTL_HANDLER_ARGS) 167{ 168 int error = sysctl_handle_int(oidp, 0, ctob(physmem), req); 169 return (error); 170} 171 172SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD, 173 0, 0, sysctl_hw_physmem, "IU", ""); 174 175static int 176sysctl_hw_usermem(SYSCTL_HANDLER_ARGS) 177{ 178 int error = sysctl_handle_int(oidp, 0, 179 ctob(physmem - cnt.v_wire_count), req); 180 return (error); 181} 182 183SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD, 184 0, 0, sysctl_hw_usermem, "IU", ""); 185 186static int 187sysctl_hw_availpages(SYSCTL_HANDLER_ARGS) 188{ 189 int error = sysctl_handle_int(oidp, 0, 190 i386_btop(avail_end - avail_start), req); 191 return (error); 192} 193 194SYSCTL_PROC(_hw, OID_AUTO, availpages, CTLTYPE_INT|CTLFLAG_RD, 195 0, 0, sysctl_hw_availpages, "I", ""); 196 197int Maxmem = 0; 198long dumplo; 199 200vm_offset_t phys_avail[10]; 201 202/* must be 2 less so 0 0 can signal end of chunks */ 203#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2) 204 205struct kva_md_info kmi; 206 207static struct trapframe proc0_tf; 208#ifndef SMP 209static struct pcpu __pcpu; 210#endif 211 212struct mtx sched_lock; 213struct mtx Giant; 214struct mtx icu_lock; 215 216static void 217cpu_startup(dummy) 218 void *dummy; 219{ 220 /* 221 * Good {morning,afternoon,evening,night}. 222 */ 223 earlysetcpuclass(); 224 startrtclock(); 225 printcpuinfo(); 226 panicifcpuunsupported(); 227#ifdef PERFMON 228 perfmon_init(); 229#endif 230 printf("real memory = %u (%uK bytes)\n", ptoa(Maxmem), 231 ptoa(Maxmem) / 1024); 232 /* 233 * Display any holes after the first chunk of extended memory. 234 */ 235 if (bootverbose) { 236 int indx; 237 238 printf("Physical memory chunk(s):\n"); 239 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 240 unsigned int size1; 241 242 size1 = phys_avail[indx + 1] - phys_avail[indx]; 243 printf("0x%08x - 0x%08x, %u bytes (%u pages)\n", 244 phys_avail[indx], phys_avail[indx + 1] - 1, size1, 245 size1 / PAGE_SIZE); 246 } 247 } 248 249 vm_ksubmap_init(&kmi); 250 251#if defined(USERCONFIG) 252 userconfig(); 253 cninit(); /* the preferred console may have changed */ 254#endif 255 256 printf("avail memory = %u (%uK bytes)\n", ptoa(cnt.v_free_count), 257 ptoa(cnt.v_free_count) / 1024); 258 259 /* 260 * Set up buffers, so they can be used to read disk labels. 261 */ 262 bufinit(); 263 vm_pager_bufferinit(); 264 265#ifndef SMP 266 /* For SMP, we delay the cpu_setregs() until after SMP startup. */ 267 cpu_setregs(); 268#endif 269} 270 271/* 272 * Send an interrupt to process. 273 * 274 * Stack is set up to allow sigcode stored 275 * at top to call routine, followed by kcall 276 * to sigreturn routine below. After sigreturn 277 * resets the signal mask, the stack, and the 278 * frame pointer, it returns to the user 279 * specified pc, psl. 280 */ 281#ifdef COMPAT_43 282static void 283osendsig(catcher, sig, mask, code) 284 sig_t catcher; 285 int sig; 286 sigset_t *mask; 287 u_long code; 288{ 289 struct osigframe sf; 290 struct osigframe *fp; 291 struct proc *p; 292 struct thread *td; 293 struct sigacts *psp; 294 struct trapframe *regs; 295 int oonstack; 296 297 td = curthread; 298 p = td->td_proc; 299 PROC_LOCK_ASSERT(p, MA_OWNED); 300 psp = p->p_sigacts; 301 regs = td->td_frame; 302 oonstack = sigonstack(regs->tf_esp); 303 304 /* Allocate and validate space for the signal handler context. */ 305 if ((p->p_flag & P_ALTSTACK) && !oonstack && 306 SIGISMEMBER(psp->ps_sigonstack, sig)) { 307 fp = (struct osigframe *)(p->p_sigstk.ss_sp + 308 p->p_sigstk.ss_size - sizeof(struct osigframe)); 309#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 310 p->p_sigstk.ss_flags |= SS_ONSTACK; 311#endif 312 } else 313 fp = (struct osigframe *)regs->tf_esp - 1; 314 PROC_UNLOCK(p); 315 316 /* 317 * grow_stack() will return 0 if *fp does not fit inside the stack 318 * and the stack can not be grown. 319 * useracc() will return FALSE if access is denied. 320 */ 321 if (grow_stack(p, (int)fp) == 0 || 322 !useracc((caddr_t)fp, sizeof(*fp), VM_PROT_WRITE)) { 323 /* 324 * Process has trashed its stack; give it an illegal 325 * instruction to halt it in its tracks. 326 */ 327 PROC_LOCK(p); 328 SIGACTION(p, SIGILL) = SIG_DFL; 329 SIGDELSET(p->p_sigignore, SIGILL); 330 SIGDELSET(p->p_sigcatch, SIGILL); 331 SIGDELSET(p->p_sigmask, SIGILL); 332 psignal(p, SIGILL); 333 return; 334 } 335 336 /* Translate the signal if appropriate. */ 337 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 338 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 339 340 /* Build the argument list for the signal handler. */ 341 sf.sf_signum = sig; 342 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc; 343 PROC_LOCK(p); 344 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 345 /* Signal handler installed with SA_SIGINFO. */ 346 sf.sf_arg2 = (register_t)&fp->sf_siginfo; 347 sf.sf_siginfo.si_signo = sig; 348 sf.sf_siginfo.si_code = code; 349 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher; 350 } else { 351 /* Old FreeBSD-style arguments. */ 352 sf.sf_arg2 = code; 353 sf.sf_addr = regs->tf_err; 354 sf.sf_ahu.sf_handler = catcher; 355 } 356 PROC_UNLOCK(p); 357 358 /* Save most if not all of trap frame. */ 359 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax; 360 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx; 361 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx; 362 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx; 363 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi; 364 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi; 365 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs; 366 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds; 367 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss; 368 sf.sf_siginfo.si_sc.sc_es = regs->tf_es; 369 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs; 370 sf.sf_siginfo.si_sc.sc_gs = rgs(); 371 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp; 372 373 /* Build the signal context to be used by osigreturn(). */ 374 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0; 375 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask); 376 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp; 377 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp; 378 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip; 379 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags; 380 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno; 381 sf.sf_siginfo.si_sc.sc_err = regs->tf_err; 382 383 /* 384 * If we're a vm86 process, we want to save the segment registers. 385 * We also change eflags to be our emulated eflags, not the actual 386 * eflags. 387 */ 388 if (regs->tf_eflags & PSL_VM) { 389 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */ 390 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 391 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86; 392 393 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs; 394 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs; 395 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es; 396 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds; 397 398 if (vm86->vm86_has_vme == 0) 399 sf.sf_siginfo.si_sc.sc_ps = 400 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 401 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 402 403 /* See sendsig() for comments. */ 404 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP); 405 } 406 407 /* Copy the sigframe out to the user's stack. */ 408 if (copyout(&sf, fp, sizeof(*fp)) != 0) { 409 /* 410 * Something is wrong with the stack pointer. 411 * ...Kill the process. 412 */ 413 PROC_LOCK(p); 414 sigexit(td, SIGILL); 415 /* NOTREACHED */ 416 } 417 418 regs->tf_esp = (int)fp; 419 regs->tf_eip = PS_STRINGS - szosigcode; 420 regs->tf_cs = _ucodesel; 421 regs->tf_ds = _udatasel; 422 regs->tf_es = _udatasel; 423 regs->tf_fs = _udatasel; 424 load_gs(_udatasel); 425 regs->tf_ss = _udatasel; 426 PROC_LOCK(p); 427} 428#endif 429 430void 431sendsig(catcher, sig, mask, code) 432 sig_t catcher; 433 int sig; 434 sigset_t *mask; 435 u_long code; 436{ 437 struct sigframe sf; 438 struct proc *p; 439 struct thread *td; 440 struct sigacts *psp; 441 struct trapframe *regs; 442 struct sigframe *sfp; 443 int oonstack; 444 445 td = curthread; 446 p = td->td_proc; 447 PROC_LOCK_ASSERT(p, MA_OWNED); 448 psp = p->p_sigacts; 449#ifdef COMPAT_43 450 if (SIGISMEMBER(psp->ps_osigset, sig)) { 451 osendsig(catcher, sig, mask, code); 452 return; 453 } 454#endif 455 regs = td->td_frame; 456 oonstack = sigonstack(regs->tf_esp); 457 458 /* Save user context. */ 459 bzero(&sf, sizeof(sf)); 460 sf.sf_uc.uc_sigmask = *mask; 461 sf.sf_uc.uc_stack = p->p_sigstk; 462 sf.sf_uc.uc_stack.ss_flags = (p->p_flag & P_ALTSTACK) 463 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 464 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 465 sf.sf_uc.uc_mcontext.mc_gs = rgs(); 466 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs)); 467 468 /* Allocate and validate space for the signal handler context. */ 469 if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack && 470 SIGISMEMBER(psp->ps_sigonstack, sig)) { 471 sfp = (struct sigframe *)(p->p_sigstk.ss_sp + 472 p->p_sigstk.ss_size - sizeof(struct sigframe)); 473#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 474 p->p_sigstk.ss_flags |= SS_ONSTACK; 475#endif 476 } else 477 sfp = (struct sigframe *)regs->tf_esp - 1; 478 PROC_UNLOCK(p); 479 480 /* 481 * grow_stack() will return 0 if *sfp does not fit inside the stack 482 * and the stack can not be grown. 483 * useracc() will return FALSE if access is denied. 484 */ 485 if (grow_stack(p, (int)sfp) == 0 || 486 !useracc((caddr_t)sfp, sizeof(*sfp), VM_PROT_WRITE)) { 487 /* 488 * Process has trashed its stack; give it an illegal 489 * instruction to halt it in its tracks. 490 */ 491#ifdef DEBUG 492 printf("process %d has trashed its stack\n", p->p_pid); 493#endif 494 PROC_LOCK(p); 495 SIGACTION(p, SIGILL) = SIG_DFL; 496 SIGDELSET(p->p_sigignore, SIGILL); 497 SIGDELSET(p->p_sigcatch, SIGILL); 498 SIGDELSET(p->p_sigmask, SIGILL); 499 psignal(p, SIGILL); 500 return; 501 } 502 503 /* Translate the signal if appropriate. */ 504 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 505 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 506 507 /* Build the argument list for the signal handler. */ 508 sf.sf_signum = sig; 509 sf.sf_ucontext = (register_t)&sfp->sf_uc; 510 PROC_LOCK(p); 511 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 512 /* Signal handler installed with SA_SIGINFO. */ 513 sf.sf_siginfo = (register_t)&sfp->sf_si; 514 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 515 516 /* Fill siginfo structure. */ 517 sf.sf_si.si_signo = sig; 518 sf.sf_si.si_code = code; 519 sf.sf_si.si_addr = (void *)regs->tf_err; 520 } else { 521 /* Old FreeBSD-style arguments. */ 522 sf.sf_siginfo = code; 523 sf.sf_addr = regs->tf_err; 524 sf.sf_ahu.sf_handler = catcher; 525 } 526 PROC_UNLOCK(p); 527 528 /* 529 * If we're a vm86 process, we want to save the segment registers. 530 * We also change eflags to be our emulated eflags, not the actual 531 * eflags. 532 */ 533 if (regs->tf_eflags & PSL_VM) { 534 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 535 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86; 536 537 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 538 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 539 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 540 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 541 542 if (vm86->vm86_has_vme == 0) 543 sf.sf_uc.uc_mcontext.mc_eflags = 544 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 545 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 546 547 /* 548 * We should never have PSL_T set when returning from vm86 549 * mode. It may be set here if we deliver a signal before 550 * getting to vm86 mode, so turn it off. 551 * 552 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 553 * syscalls made by the signal handler. This just avoids 554 * wasting time for our lazy fixup of such faults. PSL_NT 555 * does nothing in vm86 mode, but vm86 programs can set it 556 * almost legitimately in probes for old cpu types. 557 */ 558 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP); 559 } 560 561 /* Copy the sigframe out to the user's stack. */ 562 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { 563 /* 564 * Something is wrong with the stack pointer. 565 * ...Kill the process. 566 */ 567 PROC_LOCK(p); 568 sigexit(td, SIGILL); 569 /* NOTREACHED */ 570 } 571 572 regs->tf_esp = (int)sfp; 573 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 574 regs->tf_cs = _ucodesel; 575 regs->tf_ds = _udatasel; 576 regs->tf_es = _udatasel; 577 regs->tf_fs = _udatasel; 578 regs->tf_ss = _udatasel; 579 PROC_LOCK(p); 580} 581 582/* 583 * System call to cleanup state after a signal 584 * has been taken. Reset signal mask and 585 * stack state from context left by sendsig (above). 586 * Return to previous pc and psl as specified by 587 * context left by sendsig. Check carefully to 588 * make sure that the user has not modified the 589 * state to gain improper privileges. 590 */ 591#ifdef COMPAT_43 592int 593osigreturn(td, uap) 594 struct thread *td; 595 struct osigreturn_args /* { 596 struct osigcontext *sigcntxp; 597 } */ *uap; 598{ 599 struct trapframe *regs; 600 struct osigcontext *scp; 601 struct proc *p = td->td_proc; 602 int eflags; 603 604 regs = td->td_frame; 605 scp = uap->sigcntxp; 606 if (!useracc((caddr_t)scp, sizeof(*scp), VM_PROT_READ)) 607 return (EFAULT); 608 eflags = scp->sc_ps; 609 if (eflags & PSL_VM) { 610 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 611 struct vm86_kernel *vm86; 612 613 /* 614 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 615 * set up the vm86 area, and we can't enter vm86 mode. 616 */ 617 if (td->td_pcb->pcb_ext == 0) 618 return (EINVAL); 619 vm86 = &td->td_pcb->pcb_ext->ext_vm86; 620 if (vm86->vm86_inited == 0) 621 return (EINVAL); 622 623 /* Go back to user mode if both flags are set. */ 624 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 625 trapsignal(p, SIGBUS, 0); 626 627 if (vm86->vm86_has_vme) { 628 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 629 (eflags & VME_USERCHANGE) | PSL_VM; 630 } else { 631 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 632 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | 633 (eflags & VM_USERCHANGE) | PSL_VM; 634 } 635 tf->tf_vm86_ds = scp->sc_ds; 636 tf->tf_vm86_es = scp->sc_es; 637 tf->tf_vm86_fs = scp->sc_fs; 638 tf->tf_vm86_gs = scp->sc_gs; 639 tf->tf_ds = _udatasel; 640 tf->tf_es = _udatasel; 641 tf->tf_fs = _udatasel; 642 } else { 643 /* 644 * Don't allow users to change privileged or reserved flags. 645 */ 646 /* 647 * XXX do allow users to change the privileged flag PSL_RF. 648 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 649 * should sometimes set it there too. tf_eflags is kept in 650 * the signal context during signal handling and there is no 651 * other place to remember it, so the PSL_RF bit may be 652 * corrupted by the signal handler without us knowing. 653 * Corruption of the PSL_RF bit at worst causes one more or 654 * one less debugger trap, so allowing it is fairly harmless. 655 */ 656 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 657 return (EINVAL); 658 } 659 660 /* 661 * Don't allow users to load a valid privileged %cs. Let the 662 * hardware check for invalid selectors, excess privilege in 663 * other selectors, invalid %eip's and invalid %esp's. 664 */ 665 if (!CS_SECURE(scp->sc_cs)) { 666 trapsignal(p, SIGBUS, T_PROTFLT); 667 return (EINVAL); 668 } 669 regs->tf_ds = scp->sc_ds; 670 regs->tf_es = scp->sc_es; 671 regs->tf_fs = scp->sc_fs; 672 } 673 674 /* Restore remaining registers. */ 675 regs->tf_eax = scp->sc_eax; 676 regs->tf_ebx = scp->sc_ebx; 677 regs->tf_ecx = scp->sc_ecx; 678 regs->tf_edx = scp->sc_edx; 679 regs->tf_esi = scp->sc_esi; 680 regs->tf_edi = scp->sc_edi; 681 regs->tf_cs = scp->sc_cs; 682 regs->tf_ss = scp->sc_ss; 683 regs->tf_isp = scp->sc_isp; 684 685 PROC_LOCK(p); 686#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 687 if (scp->sc_onstack & 1) 688 p->p_sigstk.ss_flags |= SS_ONSTACK; 689 else 690 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 691#endif 692 693 SIGSETOLD(p->p_sigmask, scp->sc_mask); 694 SIG_CANTMASK(p->p_sigmask); 695 PROC_UNLOCK(p); 696 regs->tf_ebp = scp->sc_fp; 697 regs->tf_esp = scp->sc_sp; 698 regs->tf_eip = scp->sc_pc; 699 regs->tf_eflags = eflags; 700 return (EJUSTRETURN); 701} 702#endif 703 704int 705sigreturn(td, uap) 706 struct thread *td; 707 struct sigreturn_args /* { 708 ucontext_t *sigcntxp; 709 } */ *uap; 710{ 711 struct proc *p = td->td_proc; 712 struct trapframe *regs; 713 ucontext_t *ucp; 714 int cs, eflags; 715 716 ucp = uap->sigcntxp; 717#ifdef COMPAT_43 718 if (!useracc((caddr_t)ucp, sizeof(struct osigcontext), VM_PROT_READ)) 719 return (EFAULT); 720 if (((struct osigcontext *)ucp)->sc_trapno == 0x01d516) 721 return (osigreturn(td, (struct osigreturn_args *)uap)); 722 /* 723 * Since ucp is not an osigcontext but a ucontext_t, we have to 724 * check again if all of it is accessible. A ucontext_t is 725 * much larger, so instead of just checking for the pointer 726 * being valid for the size of an osigcontext, now check for 727 * it being valid for a whole, new-style ucontext_t. 728 */ 729#endif 730 if (!useracc((caddr_t)ucp, sizeof(*ucp), VM_PROT_READ)) 731 return (EFAULT); 732 733 regs = td->td_frame; 734 eflags = ucp->uc_mcontext.mc_eflags; 735 if (eflags & PSL_VM) { 736 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 737 struct vm86_kernel *vm86; 738 739 /* 740 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 741 * set up the vm86 area, and we can't enter vm86 mode. 742 */ 743 if (td->td_pcb->pcb_ext == 0) 744 return (EINVAL); 745 vm86 = &td->td_pcb->pcb_ext->ext_vm86; 746 if (vm86->vm86_inited == 0) 747 return (EINVAL); 748 749 /* Go back to user mode if both flags are set. */ 750 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 751 trapsignal(p, SIGBUS, 0); 752 753 if (vm86->vm86_has_vme) { 754 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 755 (eflags & VME_USERCHANGE) | PSL_VM; 756 } else { 757 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 758 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | 759 (eflags & VM_USERCHANGE) | PSL_VM; 760 } 761 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe)); 762 tf->tf_eflags = eflags; 763 tf->tf_vm86_ds = tf->tf_ds; 764 tf->tf_vm86_es = tf->tf_es; 765 tf->tf_vm86_fs = tf->tf_fs; 766 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs; 767 tf->tf_ds = _udatasel; 768 tf->tf_es = _udatasel; 769 tf->tf_fs = _udatasel; 770 } else { 771 /* 772 * Don't allow users to change privileged or reserved flags. 773 */ 774 /* 775 * XXX do allow users to change the privileged flag PSL_RF. 776 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 777 * should sometimes set it there too. tf_eflags is kept in 778 * the signal context during signal handling and there is no 779 * other place to remember it, so the PSL_RF bit may be 780 * corrupted by the signal handler without us knowing. 781 * Corruption of the PSL_RF bit at worst causes one more or 782 * one less debugger trap, so allowing it is fairly harmless. 783 */ 784 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 785 printf("sigreturn: eflags = 0x%x\n", eflags); 786 return (EINVAL); 787 } 788 789 /* 790 * Don't allow users to load a valid privileged %cs. Let the 791 * hardware check for invalid selectors, excess privilege in 792 * other selectors, invalid %eip's and invalid %esp's. 793 */ 794 cs = ucp->uc_mcontext.mc_cs; 795 if (!CS_SECURE(cs)) { 796 printf("sigreturn: cs = 0x%x\n", cs); 797 trapsignal(p, SIGBUS, T_PROTFLT); 798 return (EINVAL); 799 } 800 801 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs)); 802 } 803 804 PROC_LOCK(p); 805#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 806 if (ucp->uc_mcontext.mc_onstack & 1) 807 p->p_sigstk.ss_flags |= SS_ONSTACK; 808 else 809 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 810#endif 811 812 p->p_sigmask = ucp->uc_sigmask; 813 SIG_CANTMASK(p->p_sigmask); 814 PROC_UNLOCK(p); 815 return (EJUSTRETURN); 816} 817 818/* 819 * Machine dependent boot() routine 820 * 821 * I haven't seen anything to put here yet 822 * Possibly some stuff might be grafted back here from boot() 823 */ 824void 825cpu_boot(int howto) 826{ 827} 828 829/* 830 * Shutdown the CPU as much as possible 831 */ 832void 833cpu_halt(void) 834{ 835 for (;;) 836 __asm__ ("hlt"); 837} 838 839/* 840 * Hook to idle the CPU when possible. This currently only works in 841 * the !SMP case, as there is no clean way to ensure that a CPU will be 842 * woken when there is work available for it. 843 */ 844static int cpu_idle_hlt = 1; 845SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, 846 &cpu_idle_hlt, 0, "Idle loop HLT enable"); 847 848/* 849 * Note that we have to be careful here to avoid a race between checking 850 * procrunnable() and actually halting. If we don't do this, we may waste 851 * the time between calling hlt and the next interrupt even though there 852 * is a runnable process. 853 */ 854void 855cpu_idle(void) 856{ 857#ifndef SMP 858 if (cpu_idle_hlt) { 859 disable_intr(); 860 if (procrunnable()) 861 enable_intr(); 862 else { 863 enable_intr(); 864 __asm __volatile("hlt"); 865 } 866 } 867#endif 868} 869 870/* 871 * Clear registers on exec 872 */ 873void 874setregs(td, entry, stack, ps_strings) 875 struct thread *td; 876 u_long entry; 877 u_long stack; 878 u_long ps_strings; 879{ 880 struct trapframe *regs = td->td_frame; 881 struct pcb *pcb = td->td_pcb; 882 883 if (td->td_proc->p_md.md_ldt) 884 user_ldt_free(td); 885 886 bzero((char *)regs, sizeof(struct trapframe)); 887 regs->tf_eip = entry; 888 regs->tf_esp = stack; 889 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T); 890 regs->tf_ss = _udatasel; 891 regs->tf_ds = _udatasel; 892 regs->tf_es = _udatasel; 893 regs->tf_fs = _udatasel; 894 regs->tf_cs = _ucodesel; 895 896 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */ 897 regs->tf_ebx = ps_strings; 898 899 /* reset %gs as well */ 900 if (pcb == PCPU_GET(curpcb)) 901 load_gs(_udatasel); 902 else 903 pcb->pcb_gs = _udatasel; 904 905 /* 906 * Reset the hardware debug registers if they were in use. 907 * They won't have any meaning for the newly exec'd process. 908 */ 909 if (pcb->pcb_flags & PCB_DBREGS) { 910 pcb->pcb_dr0 = 0; 911 pcb->pcb_dr1 = 0; 912 pcb->pcb_dr2 = 0; 913 pcb->pcb_dr3 = 0; 914 pcb->pcb_dr6 = 0; 915 pcb->pcb_dr7 = 0; 916 if (pcb == PCPU_GET(curpcb)) { 917 /* 918 * Clear the debug registers on the running 919 * CPU, otherwise they will end up affecting 920 * the next process we switch to. 921 */ 922 reset_dbregs(); 923 } 924 pcb->pcb_flags &= ~PCB_DBREGS; 925 } 926 927 /* 928 * Initialize the math emulator (if any) for the current process. 929 * Actually, just clear the bit that says that the emulator has 930 * been initialized. Initialization is delayed until the process 931 * traps to the emulator (if it is done at all) mainly because 932 * emulators don't provide an entry point for initialization. 933 */ 934 td->td_pcb->pcb_flags &= ~FP_SOFTFP; 935 936 /* 937 * Arrange to trap the next npx or `fwait' instruction (see npx.c 938 * for why fwait must be trapped at least if there is an npx or an 939 * emulator). This is mainly to handle the case where npx0 is not 940 * configured, since the npx routines normally set up the trap 941 * otherwise. It should be done only at boot time, but doing it 942 * here allows modifying `npx_exists' for testing the emulator on 943 * systems with an npx. 944 */ 945 load_cr0(rcr0() | CR0_MP | CR0_TS); 946 947#ifdef DEV_NPX 948 /* Initialize the npx (if any) for the current process. */ 949 npxinit(__INITIAL_NPXCW__); 950#endif 951 952 /* 953 * XXX - Linux emulator 954 * Make sure sure edx is 0x0 on entry. Linux binaries depend 955 * on it. 956 */ 957 td->td_retval[1] = 0; 958} 959 960void 961cpu_setregs(void) 962{ 963 unsigned int cr0; 964 965 cr0 = rcr0(); 966#ifdef SMP 967 cr0 |= CR0_NE; /* Done by npxinit() */ 968#endif 969 cr0 |= CR0_MP | CR0_TS; /* Done at every execve() too. */ 970#ifndef I386_CPU 971 cr0 |= CR0_WP | CR0_AM; 972#endif 973 load_cr0(cr0); 974 load_gs(_udatasel); 975} 976 977static int 978sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS) 979{ 980 int error; 981 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 982 req); 983 if (!error && req->newptr) 984 resettodr(); 985 return (error); 986} 987 988SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 989 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 990 991SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, 992 CTLFLAG_RW, &disable_rtc_set, 0, ""); 993 994SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo, 995 CTLFLAG_RD, &bootinfo, bootinfo, ""); 996 997SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, 998 CTLFLAG_RW, &wall_cmos_clock, 0, ""); 999 1000/* 1001 * Initialize 386 and configure to run kernel 1002 */ 1003 1004/* 1005 * Initialize segments & interrupt table 1006 */ 1007 1008int _default_ldt; 1009union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */ 1010static struct gate_descriptor idt0[NIDT]; 1011struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 1012union descriptor ldt[NLDT]; /* local descriptor table */ 1013#ifdef SMP 1014/* table descriptors - used to load tables by microp */ 1015struct region_descriptor r_gdt, r_idt; 1016#endif 1017 1018int private_tss; /* flag indicating private tss */ 1019 1020#if defined(I586_CPU) && !defined(NO_F00F_HACK) 1021extern int has_f00f_bug; 1022#endif 1023 1024static struct i386tss dblfault_tss; 1025static char dblfault_stack[PAGE_SIZE]; 1026 1027extern struct user *proc0uarea; 1028extern vm_offset_t proc0kstack; 1029 1030 1031/* software prototypes -- in more palatable form */ 1032struct soft_segment_descriptor gdt_segs[] = { 1033/* GNULL_SEL 0 Null Descriptor */ 1034{ 0x0, /* segment base address */ 1035 0x0, /* length */ 1036 0, /* segment type */ 1037 0, /* segment descriptor priority level */ 1038 0, /* segment descriptor present */ 1039 0, 0, 1040 0, /* default 32 vs 16 bit size */ 1041 0 /* limit granularity (byte/page units)*/ }, 1042/* GCODE_SEL 1 Code Descriptor for kernel */ 1043{ 0x0, /* segment base address */ 1044 0xfffff, /* length - all address space */ 1045 SDT_MEMERA, /* segment type */ 1046 0, /* segment descriptor priority level */ 1047 1, /* segment descriptor present */ 1048 0, 0, 1049 1, /* default 32 vs 16 bit size */ 1050 1 /* limit granularity (byte/page units)*/ }, 1051/* GDATA_SEL 2 Data Descriptor for kernel */ 1052{ 0x0, /* segment base address */ 1053 0xfffff, /* length - all address space */ 1054 SDT_MEMRWA, /* segment type */ 1055 0, /* segment descriptor priority level */ 1056 1, /* segment descriptor present */ 1057 0, 0, 1058 1, /* default 32 vs 16 bit size */ 1059 1 /* limit granularity (byte/page units)*/ }, 1060/* GPRIV_SEL 3 SMP Per-Processor Private Data Descriptor */ 1061{ 0x0, /* segment base address */ 1062 0xfffff, /* length - all address space */ 1063 SDT_MEMRWA, /* segment type */ 1064 0, /* segment descriptor priority level */ 1065 1, /* segment descriptor present */ 1066 0, 0, 1067 1, /* default 32 vs 16 bit size */ 1068 1 /* limit granularity (byte/page units)*/ }, 1069/* GPROC0_SEL 4 Proc 0 Tss Descriptor */ 1070{ 1071 0x0, /* segment base address */ 1072 sizeof(struct i386tss)-1,/* length - all address space */ 1073 SDT_SYS386TSS, /* segment type */ 1074 0, /* segment descriptor priority level */ 1075 1, /* segment descriptor present */ 1076 0, 0, 1077 0, /* unused - default 32 vs 16 bit size */ 1078 0 /* limit granularity (byte/page units)*/ }, 1079/* GLDT_SEL 5 LDT Descriptor */ 1080{ (int) ldt, /* segment base address */ 1081 sizeof(ldt)-1, /* length - all address space */ 1082 SDT_SYSLDT, /* segment type */ 1083 SEL_UPL, /* segment descriptor priority level */ 1084 1, /* segment descriptor present */ 1085 0, 0, 1086 0, /* unused - default 32 vs 16 bit size */ 1087 0 /* limit granularity (byte/page units)*/ }, 1088/* GUSERLDT_SEL 6 User LDT Descriptor per process */ 1089{ (int) ldt, /* segment base address */ 1090 (512 * sizeof(union descriptor)-1), /* length */ 1091 SDT_SYSLDT, /* segment type */ 1092 0, /* segment descriptor priority level */ 1093 1, /* segment descriptor present */ 1094 0, 0, 1095 0, /* unused - default 32 vs 16 bit size */ 1096 0 /* limit granularity (byte/page units)*/ }, 1097/* GTGATE_SEL 7 Null Descriptor - Placeholder */ 1098{ 0x0, /* segment base address */ 1099 0x0, /* length - all address space */ 1100 0, /* segment type */ 1101 0, /* segment descriptor priority level */ 1102 0, /* segment descriptor present */ 1103 0, 0, 1104 0, /* default 32 vs 16 bit size */ 1105 0 /* limit granularity (byte/page units)*/ }, 1106/* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */ 1107{ 0x400, /* segment base address */ 1108 0xfffff, /* length */ 1109 SDT_MEMRWA, /* segment type */ 1110 0, /* segment descriptor priority level */ 1111 1, /* segment descriptor present */ 1112 0, 0, 1113 1, /* default 32 vs 16 bit size */ 1114 1 /* limit granularity (byte/page units)*/ }, 1115/* GPANIC_SEL 9 Panic Tss Descriptor */ 1116{ (int) &dblfault_tss, /* segment base address */ 1117 sizeof(struct i386tss)-1,/* length - all address space */ 1118 SDT_SYS386TSS, /* segment type */ 1119 0, /* segment descriptor priority level */ 1120 1, /* segment descriptor present */ 1121 0, 0, 1122 0, /* unused - default 32 vs 16 bit size */ 1123 0 /* limit granularity (byte/page units)*/ }, 1124/* GBIOSCODE32_SEL 10 BIOS 32-bit interface (32bit Code) */ 1125{ 0, /* segment base address (overwritten) */ 1126 0xfffff, /* length */ 1127 SDT_MEMERA, /* segment type */ 1128 0, /* segment descriptor priority level */ 1129 1, /* segment descriptor present */ 1130 0, 0, 1131 0, /* default 32 vs 16 bit size */ 1132 1 /* limit granularity (byte/page units)*/ }, 1133/* GBIOSCODE16_SEL 11 BIOS 32-bit interface (16bit Code) */ 1134{ 0, /* segment base address (overwritten) */ 1135 0xfffff, /* length */ 1136 SDT_MEMERA, /* segment type */ 1137 0, /* segment descriptor priority level */ 1138 1, /* segment descriptor present */ 1139 0, 0, 1140 0, /* default 32 vs 16 bit size */ 1141 1 /* limit granularity (byte/page units)*/ }, 1142/* GBIOSDATA_SEL 12 BIOS 32-bit interface (Data) */ 1143{ 0, /* segment base address (overwritten) */ 1144 0xfffff, /* length */ 1145 SDT_MEMRWA, /* segment type */ 1146 0, /* segment descriptor priority level */ 1147 1, /* segment descriptor present */ 1148 0, 0, 1149 1, /* default 32 vs 16 bit size */ 1150 1 /* limit granularity (byte/page units)*/ }, 1151/* GBIOSUTIL_SEL 13 BIOS 16-bit interface (Utility) */ 1152{ 0, /* segment base address (overwritten) */ 1153 0xfffff, /* length */ 1154 SDT_MEMRWA, /* segment type */ 1155 0, /* segment descriptor priority level */ 1156 1, /* segment descriptor present */ 1157 0, 0, 1158 0, /* default 32 vs 16 bit size */ 1159 1 /* limit granularity (byte/page units)*/ }, 1160/* GBIOSARGS_SEL 14 BIOS 16-bit interface (Arguments) */ 1161{ 0, /* segment base address (overwritten) */ 1162 0xfffff, /* length */ 1163 SDT_MEMRWA, /* segment type */ 1164 0, /* segment descriptor priority level */ 1165 1, /* segment descriptor present */ 1166 0, 0, 1167 0, /* default 32 vs 16 bit size */ 1168 1 /* limit granularity (byte/page units)*/ }, 1169}; 1170 1171static struct soft_segment_descriptor ldt_segs[] = { 1172 /* Null Descriptor - overwritten by call gate */ 1173{ 0x0, /* segment base address */ 1174 0x0, /* length - all address space */ 1175 0, /* segment type */ 1176 0, /* segment descriptor priority level */ 1177 0, /* segment descriptor present */ 1178 0, 0, 1179 0, /* default 32 vs 16 bit size */ 1180 0 /* limit granularity (byte/page units)*/ }, 1181 /* Null Descriptor - overwritten by call gate */ 1182{ 0x0, /* segment base address */ 1183 0x0, /* length - all address space */ 1184 0, /* segment type */ 1185 0, /* segment descriptor priority level */ 1186 0, /* segment descriptor present */ 1187 0, 0, 1188 0, /* default 32 vs 16 bit size */ 1189 0 /* limit granularity (byte/page units)*/ }, 1190 /* Null Descriptor - overwritten by call gate */ 1191{ 0x0, /* segment base address */ 1192 0x0, /* length - all address space */ 1193 0, /* segment type */ 1194 0, /* segment descriptor priority level */ 1195 0, /* segment descriptor present */ 1196 0, 0, 1197 0, /* default 32 vs 16 bit size */ 1198 0 /* limit granularity (byte/page units)*/ }, 1199 /* Code Descriptor for user */ 1200{ 0x0, /* segment base address */ 1201 0xfffff, /* length - all address space */ 1202 SDT_MEMERA, /* segment type */ 1203 SEL_UPL, /* segment descriptor priority level */ 1204 1, /* segment descriptor present */ 1205 0, 0, 1206 1, /* default 32 vs 16 bit size */ 1207 1 /* limit granularity (byte/page units)*/ }, 1208 /* Null Descriptor - overwritten by call gate */ 1209{ 0x0, /* segment base address */ 1210 0x0, /* length - all address space */ 1211 0, /* segment type */ 1212 0, /* segment descriptor priority level */ 1213 0, /* segment descriptor present */ 1214 0, 0, 1215 0, /* default 32 vs 16 bit size */ 1216 0 /* limit granularity (byte/page units)*/ }, 1217 /* Data Descriptor for user */ 1218{ 0x0, /* segment base address */ 1219 0xfffff, /* length - all address space */ 1220 SDT_MEMRWA, /* segment type */ 1221 SEL_UPL, /* segment descriptor priority level */ 1222 1, /* segment descriptor present */ 1223 0, 0, 1224 1, /* default 32 vs 16 bit size */ 1225 1 /* limit granularity (byte/page units)*/ }, 1226}; 1227 1228void 1229setidt(idx, func, typ, dpl, selec) 1230 int idx; 1231 inthand_t *func; 1232 int typ; 1233 int dpl; 1234 int selec; 1235{ 1236 struct gate_descriptor *ip; 1237 1238 ip = idt + idx; 1239 ip->gd_looffset = (int)func; 1240 ip->gd_selector = selec; 1241 ip->gd_stkcpy = 0; 1242 ip->gd_xx = 0; 1243 ip->gd_type = typ; 1244 ip->gd_dpl = dpl; 1245 ip->gd_p = 1; 1246 ip->gd_hioffset = ((int)func)>>16 ; 1247} 1248 1249#define IDTVEC(name) __CONCAT(X,name) 1250 1251extern inthand_t 1252 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 1253 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 1254 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 1255 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 1256 IDTVEC(xmm), IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall); 1257 1258void 1259sdtossd(sd, ssd) 1260 struct segment_descriptor *sd; 1261 struct soft_segment_descriptor *ssd; 1262{ 1263 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 1264 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 1265 ssd->ssd_type = sd->sd_type; 1266 ssd->ssd_dpl = sd->sd_dpl; 1267 ssd->ssd_p = sd->sd_p; 1268 ssd->ssd_def32 = sd->sd_def32; 1269 ssd->ssd_gran = sd->sd_gran; 1270} 1271 1272#define PHYSMAP_SIZE (2 * 8) 1273 1274/* 1275 * Populate the (physmap) array with base/bound pairs describing the 1276 * available physical memory in the system, then test this memory and 1277 * build the phys_avail array describing the actually-available memory. 1278 * 1279 * If we cannot accurately determine the physical memory map, then use 1280 * value from the 0xE801 call, and failing that, the RTC. 1281 * 1282 * Total memory size may be set by the kernel environment variable 1283 * hw.physmem or the compile-time define MAXMEM. 1284 */ 1285static void 1286getmemsize(int first) 1287{ 1288 int i, physmap_idx, pa_indx; 1289 u_int basemem, extmem; 1290 struct vm86frame vmf; 1291 struct vm86context vmc; 1292 vm_offset_t pa, physmap[PHYSMAP_SIZE]; 1293 pt_entry_t *pte; 1294 const char *cp; 1295 struct bios_smap *smap; 1296 1297 bzero(&vmf, sizeof(struct vm86frame)); 1298 bzero(physmap, sizeof(physmap)); 1299 1300 /* 1301 * Perform "base memory" related probes & setup 1302 */ 1303 vm86_intcall(0x12, &vmf); 1304 basemem = vmf.vmf_ax; 1305 if (basemem > 640) { 1306 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", 1307 basemem); 1308 basemem = 640; 1309 } 1310 1311 /* 1312 * XXX if biosbasemem is now < 640, there is a `hole' 1313 * between the end of base memory and the start of 1314 * ISA memory. The hole may be empty or it may 1315 * contain BIOS code or data. Map it read/write so 1316 * that the BIOS can write to it. (Memory from 0 to 1317 * the physical end of the kernel is mapped read-only 1318 * to begin with and then parts of it are remapped. 1319 * The parts that aren't remapped form holes that 1320 * remain read-only and are unused by the kernel. 1321 * The base memory area is below the physical end of 1322 * the kernel and right now forms a read-only hole. 1323 * The part of it from PAGE_SIZE to 1324 * (trunc_page(biosbasemem * 1024) - 1) will be 1325 * remapped and used by the kernel later.) 1326 * 1327 * This code is similar to the code used in 1328 * pmap_mapdev, but since no memory needs to be 1329 * allocated we simply change the mapping. 1330 */ 1331 for (pa = trunc_page(basemem * 1024); 1332 pa < ISA_HOLE_START; pa += PAGE_SIZE) { 1333 pte = vtopte(pa + KERNBASE); 1334 *pte = pa | PG_RW | PG_V; 1335 } 1336 1337 /* 1338 * if basemem != 640, map pages r/w into vm86 page table so 1339 * that the bios can scribble on it. 1340 */ 1341 pte = (pt_entry_t *)vm86paddr; 1342 for (i = basemem / 4; i < 160; i++) 1343 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; 1344 1345 /* 1346 * map page 1 R/W into the kernel page table so we can use it 1347 * as a buffer. The kernel will unmap this page later. 1348 */ 1349 pte = vtopte(KERNBASE + (1 << PAGE_SHIFT)); 1350 *pte = (1 << PAGE_SHIFT) | PG_RW | PG_V; 1351 1352 /* 1353 * get memory map with INT 15:E820 1354 */ 1355 vmc.npages = 0; 1356 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT)); 1357 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di); 1358 1359 physmap_idx = 0; 1360 vmf.vmf_ebx = 0; 1361 do { 1362 vmf.vmf_eax = 0xE820; 1363 vmf.vmf_edx = SMAP_SIG; 1364 vmf.vmf_ecx = sizeof(struct bios_smap); 1365 i = vm86_datacall(0x15, &vmf, &vmc); 1366 if (i || vmf.vmf_eax != SMAP_SIG) 1367 break; 1368 if (boothowto & RB_VERBOSE) 1369 printf("SMAP type=%02x base=%08x %08x len=%08x %08x\n", 1370 smap->type, 1371 *(u_int32_t *)((char *)&smap->base + 4), 1372 (u_int32_t)smap->base, 1373 *(u_int32_t *)((char *)&smap->length + 4), 1374 (u_int32_t)smap->length); 1375 1376 if (smap->type != 0x01) 1377 goto next_run; 1378 1379 if (smap->length == 0) 1380 goto next_run; 1381 1382 if (smap->base >= 0xffffffff) { 1383 printf("%uK of memory above 4GB ignored\n", 1384 (u_int)(smap->length / 1024)); 1385 goto next_run; 1386 } 1387 1388 for (i = 0; i <= physmap_idx; i += 2) { 1389 if (smap->base < physmap[i + 1]) { 1390 if (boothowto & RB_VERBOSE) 1391 printf( 1392 "Overlapping or non-montonic memory region, ignoring second region\n"); 1393 goto next_run; 1394 } 1395 } 1396 1397 if (smap->base == physmap[physmap_idx + 1]) { 1398 physmap[physmap_idx + 1] += smap->length; 1399 goto next_run; 1400 } 1401 1402 physmap_idx += 2; 1403 if (physmap_idx == PHYSMAP_SIZE) { 1404 printf( 1405 "Too many segments in the physical address map, giving up\n"); 1406 break; 1407 } 1408 physmap[physmap_idx] = smap->base; 1409 physmap[physmap_idx + 1] = smap->base + smap->length; 1410next_run: 1411 } while (vmf.vmf_ebx != 0); 1412 1413 if (physmap[1] != 0) 1414 goto physmap_done; 1415 1416 /* 1417 * If we failed above, try memory map with INT 15:E801 1418 */ 1419 vmf.vmf_ax = 0xE801; 1420 if (vm86_intcall(0x15, &vmf) == 0) { 1421 extmem = vmf.vmf_cx + vmf.vmf_dx * 64; 1422 } else { 1423#if 0 1424 vmf.vmf_ah = 0x88; 1425 vm86_intcall(0x15, &vmf); 1426 extmem = vmf.vmf_ax; 1427#else 1428 /* 1429 * Prefer the RTC value for extended memory. 1430 */ 1431 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8); 1432#endif 1433 } 1434 1435 /* 1436 * Special hack for chipsets that still remap the 384k hole when 1437 * there's 16MB of memory - this really confuses people that 1438 * are trying to use bus mastering ISA controllers with the 1439 * "16MB limit"; they only have 16MB, but the remapping puts 1440 * them beyond the limit. 1441 * 1442 * If extended memory is between 15-16MB (16-17MB phys address range), 1443 * chop it to 15MB. 1444 */ 1445 if ((extmem > 15 * 1024) && (extmem < 16 * 1024)) 1446 extmem = 15 * 1024; 1447 1448 physmap[0] = 0; 1449 physmap[1] = basemem * 1024; 1450 physmap_idx = 2; 1451 physmap[physmap_idx] = 0x100000; 1452 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024; 1453 1454physmap_done: 1455 /* 1456 * Now, physmap contains a map of physical memory. 1457 */ 1458 1459#ifdef SMP 1460 /* make hole for AP bootstrap code */ 1461 physmap[1] = mp_bootaddress(physmap[1] / 1024); 1462 1463 /* look for the MP hardware - needed for apic addresses */ 1464 i386_mp_probe(); 1465#endif 1466 1467 /* 1468 * Maxmem isn't the "maximum memory", it's one larger than the 1469 * highest page of the physical address space. It should be 1470 * called something like "Maxphyspage". We may adjust this 1471 * based on ``hw.physmem'' and the results of the memory test. 1472 */ 1473 Maxmem = atop(physmap[physmap_idx + 1]); 1474 1475#ifdef MAXMEM 1476 Maxmem = MAXMEM / 4; 1477#endif 1478 1479 /* 1480 * hw.physmem is a size in bytes; we also allow k, m, and g suffixes 1481 * for the appropriate modifiers. This overrides MAXMEM. 1482 */ 1483 if ((cp = getenv("hw.physmem")) != NULL) { 1484 u_int64_t AllowMem, sanity; 1485 char *ep; 1486 1487 sanity = AllowMem = strtouq(cp, &ep, 0); 1488 if ((ep != cp) && (*ep != 0)) { 1489 switch(*ep) { 1490 case 'g': 1491 case 'G': 1492 AllowMem <<= 10; 1493 case 'm': 1494 case 'M': 1495 AllowMem <<= 10; 1496 case 'k': 1497 case 'K': 1498 AllowMem <<= 10; 1499 break; 1500 default: 1501 AllowMem = sanity = 0; 1502 } 1503 if (AllowMem < sanity) 1504 AllowMem = 0; 1505 } 1506 if (AllowMem == 0) 1507 printf("Ignoring invalid memory size of '%s'\n", cp); 1508 else 1509 Maxmem = atop(AllowMem); 1510 } 1511 1512 if (atop(physmap[physmap_idx + 1]) != Maxmem && 1513 (boothowto & RB_VERBOSE)) 1514 printf("Physical memory use set to %uK\n", Maxmem * 4); 1515 1516 /* 1517 * If Maxmem has been increased beyond what the system has detected, 1518 * extend the last memory segment to the new limit. 1519 */ 1520 if (atop(physmap[physmap_idx + 1]) < Maxmem) 1521 physmap[physmap_idx + 1] = ptoa(Maxmem); 1522 1523 /* call pmap initialization to make new kernel address space */ 1524 pmap_bootstrap(first, 0); 1525 1526 /* 1527 * Size up each available chunk of physical memory. 1528 */ 1529 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 1530 pa_indx = 0; 1531 phys_avail[pa_indx++] = physmap[0]; 1532 phys_avail[pa_indx] = physmap[0]; 1533#if 0 1534 pte = vtopte(KERNBASE); 1535#else 1536 pte = CMAP1; 1537#endif 1538 1539 /* 1540 * physmap is in bytes, so when converting to page boundaries, 1541 * round up the start address and round down the end address. 1542 */ 1543 for (i = 0; i <= physmap_idx; i += 2) { 1544 vm_offset_t end; 1545 1546 end = ptoa(Maxmem); 1547 if (physmap[i + 1] < end) 1548 end = trunc_page(physmap[i + 1]); 1549 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 1550 int tmp, page_bad; 1551#if 0 1552 int *ptr = 0; 1553#else 1554 int *ptr = (int *)CADDR1; 1555#endif 1556 1557 /* 1558 * block out kernel memory as not available. 1559 */ 1560 if (pa >= 0x100000 && pa < first) 1561 continue; 1562 1563 page_bad = FALSE; 1564 1565 /* 1566 * map page into kernel: valid, read/write,non-cacheable 1567 */ 1568 *pte = pa | PG_V | PG_RW | PG_N; 1569 invltlb(); 1570 1571 tmp = *(int *)ptr; 1572 /* 1573 * Test for alternating 1's and 0's 1574 */ 1575 *(volatile int *)ptr = 0xaaaaaaaa; 1576 if (*(volatile int *)ptr != 0xaaaaaaaa) { 1577 page_bad = TRUE; 1578 } 1579 /* 1580 * Test for alternating 0's and 1's 1581 */ 1582 *(volatile int *)ptr = 0x55555555; 1583 if (*(volatile int *)ptr != 0x55555555) { 1584 page_bad = TRUE; 1585 } 1586 /* 1587 * Test for all 1's 1588 */ 1589 *(volatile int *)ptr = 0xffffffff; 1590 if (*(volatile int *)ptr != 0xffffffff) { 1591 page_bad = TRUE; 1592 } 1593 /* 1594 * Test for all 0's 1595 */ 1596 *(volatile int *)ptr = 0x0; 1597 if (*(volatile int *)ptr != 0x0) { 1598 page_bad = TRUE; 1599 } 1600 /* 1601 * Restore original value. 1602 */ 1603 *(int *)ptr = tmp; 1604 1605 /* 1606 * Adjust array of valid/good pages. 1607 */ 1608 if (page_bad == TRUE) { 1609 continue; 1610 } 1611 /* 1612 * If this good page is a continuation of the 1613 * previous set of good pages, then just increase 1614 * the end pointer. Otherwise start a new chunk. 1615 * Note that "end" points one higher than end, 1616 * making the range >= start and < end. 1617 * If we're also doing a speculative memory 1618 * test and we at or past the end, bump up Maxmem 1619 * so that we keep going. The first bad page 1620 * will terminate the loop. 1621 */ 1622 if (phys_avail[pa_indx] == pa) { 1623 phys_avail[pa_indx] += PAGE_SIZE; 1624 } else { 1625 pa_indx++; 1626 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1627 printf( 1628 "Too many holes in the physical address space, giving up\n"); 1629 pa_indx--; 1630 break; 1631 } 1632 phys_avail[pa_indx++] = pa; /* start */ 1633 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1634 } 1635 physmem++; 1636 } 1637 } 1638 *pte = 0; 1639 invltlb(); 1640 1641 /* 1642 * XXX 1643 * The last chunk must contain at least one page plus the message 1644 * buffer to avoid complicating other code (message buffer address 1645 * calculation, etc.). 1646 */ 1647 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1648 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { 1649 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1650 phys_avail[pa_indx--] = 0; 1651 phys_avail[pa_indx--] = 0; 1652 } 1653 1654 Maxmem = atop(phys_avail[pa_indx]); 1655 1656 /* Trim off space for the message buffer. */ 1657 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); 1658 1659 avail_end = phys_avail[pa_indx]; 1660} 1661 1662void 1663init386(first) 1664 int first; 1665{ 1666 struct gate_descriptor *gdp; 1667 int gsel_tss, metadata_missing, off, x; 1668#ifndef SMP 1669 /* table descriptors - used to load tables by microp */ 1670 struct region_descriptor r_gdt, r_idt; 1671#endif 1672 struct pcpu *pc; 1673 1674 proc_linkup(&proc0); 1675 proc0.p_uarea = proc0uarea; 1676 thread0 = &proc0.p_thread; 1677 thread0->td_kstack = proc0kstack; 1678 thread0->td_pcb = (struct pcb *) 1679 (thread0->td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; 1680 atdevbase = ISA_HOLE_START + KERNBASE; 1681 1682 metadata_missing = 0; 1683 if (bootinfo.bi_modulep) { 1684 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE; 1685 preload_bootstrap_relocate(KERNBASE); 1686 } else { 1687 metadata_missing = 1; 1688 } 1689 if (envmode == 1) 1690 kern_envp = static_env; 1691 else if (bootinfo.bi_envp) 1692 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE; 1693 1694 /* Init basic tunables, hz etc */ 1695 init_param1(); 1696 1697 /* 1698 * make gdt memory segments, the code segment goes up to end of the 1699 * page with etext in it, the data segment goes to the end of 1700 * the address space 1701 */ 1702 /* 1703 * XXX text protection is temporarily (?) disabled. The limit was 1704 * i386_btop(round_page(etext)) - 1. 1705 */ 1706 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1); 1707 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1); 1708#ifdef SMP 1709 pc = &SMP_prvspace[0]; 1710 gdt_segs[GPRIV_SEL].ssd_limit = 1711 atop(sizeof(struct privatespace) - 1); 1712#else 1713 pc = &__pcpu; 1714 gdt_segs[GPRIV_SEL].ssd_limit = 1715 atop(sizeof(struct pcpu) - 1); 1716#endif 1717 gdt_segs[GPRIV_SEL].ssd_base = (int) pc; 1718 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss; 1719 1720 for (x = 0; x < NGDT; x++) { 1721#ifdef BDE_DEBUGGER 1722 /* avoid overwriting db entries with APM ones */ 1723 if (x >= GAPMCODE32_SEL && x <= GAPMDATA_SEL) 1724 continue; 1725#endif 1726 ssdtosd(&gdt_segs[x], &gdt[x].sd); 1727 } 1728 1729 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1730 r_gdt.rd_base = (int) gdt; 1731 lgdt(&r_gdt); 1732 1733 pcpu_init(pc, 0, sizeof(struct pcpu)); 1734 PCPU_SET(prvspace, pc); 1735 1736 /* setup curproc so that mutexes work */ 1737 PCPU_SET(curthread, thread0); 1738 1739 LIST_INIT(&thread0->td_contested); 1740 1741 /* 1742 * Initialize mutexes. 1743 */ 1744 mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE); 1745 mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE); 1746 mtx_init(&proc0.p_mtx, "process lock", MTX_DEF); 1747 mtx_init(&clock_lock, "clk", MTX_SPIN | MTX_RECURSE); 1748 mtx_init(&icu_lock, "icu", MTX_SPIN); 1749 mtx_lock(&Giant); 1750 1751 /* make ldt memory segments */ 1752 /* 1753 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it 1754 * should be spelled ...MAX_USER... 1755 */ 1756 ldt_segs[LUCODE_SEL].ssd_limit = atop(VM_MAXUSER_ADDRESS - 1); 1757 ldt_segs[LUDATA_SEL].ssd_limit = atop(VM_MAXUSER_ADDRESS - 1); 1758 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++) 1759 ssdtosd(&ldt_segs[x], &ldt[x].sd); 1760 1761 _default_ldt = GSEL(GLDT_SEL, SEL_KPL); 1762 lldt(_default_ldt); 1763 PCPU_SET(currentldt, _default_ldt); 1764 1765 /* exceptions */ 1766 for (x = 0; x < NIDT; x++) 1767 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, 1768 GSEL(GCODE_SEL, SEL_KPL)); 1769 setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, 1770 GSEL(GCODE_SEL, SEL_KPL)); 1771 setidt(1, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL, 1772 GSEL(GCODE_SEL, SEL_KPL)); 1773 setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, 1774 GSEL(GCODE_SEL, SEL_KPL)); 1775 setidt(3, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL, 1776 GSEL(GCODE_SEL, SEL_KPL)); 1777 setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, 1778 GSEL(GCODE_SEL, SEL_KPL)); 1779 setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, 1780 GSEL(GCODE_SEL, SEL_KPL)); 1781 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, 1782 GSEL(GCODE_SEL, SEL_KPL)); 1783 setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL 1784 , GSEL(GCODE_SEL, SEL_KPL)); 1785 setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL)); 1786 setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, 1787 GSEL(GCODE_SEL, SEL_KPL)); 1788 setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, 1789 GSEL(GCODE_SEL, SEL_KPL)); 1790 setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, 1791 GSEL(GCODE_SEL, SEL_KPL)); 1792 setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, 1793 GSEL(GCODE_SEL, SEL_KPL)); 1794 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, 1795 GSEL(GCODE_SEL, SEL_KPL)); 1796 setidt(14, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, 1797 GSEL(GCODE_SEL, SEL_KPL)); 1798 setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, 1799 GSEL(GCODE_SEL, SEL_KPL)); 1800 setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, 1801 GSEL(GCODE_SEL, SEL_KPL)); 1802 setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, 1803 GSEL(GCODE_SEL, SEL_KPL)); 1804 setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, 1805 GSEL(GCODE_SEL, SEL_KPL)); 1806 setidt(19, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL, 1807 GSEL(GCODE_SEL, SEL_KPL)); 1808 setidt(0x80, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL, 1809 GSEL(GCODE_SEL, SEL_KPL)); 1810 1811 r_idt.rd_limit = sizeof(idt0) - 1; 1812 r_idt.rd_base = (int) idt; 1813 lidt(&r_idt); 1814 1815 /* 1816 * Initialize the console before we print anything out. 1817 */ 1818 cninit(); 1819 1820 if (metadata_missing) 1821 printf("WARNING: loader(8) metadata is missing!\n"); 1822 1823#ifdef DEV_ISA 1824 isa_defaultirq(); 1825#endif 1826 1827#ifdef DDB 1828 kdb_init(); 1829 if (boothowto & RB_KDB) 1830 Debugger("Boot flags requested debugger"); 1831#endif 1832 1833 finishidentcpu(); /* Final stage of CPU initialization */ 1834 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, 1835 GSEL(GCODE_SEL, SEL_KPL)); 1836 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, 1837 GSEL(GCODE_SEL, SEL_KPL)); 1838 initializecpu(); /* Initialize CPU registers */ 1839 1840 /* make an initial tss so cpu can get interrupt stack on syscall! */ 1841 /* Note: -16 is so we can grow the trapframe if we came from vm86 */ 1842 PCPU_SET(common_tss.tss_esp0, thread0->td_kstack + 1843 KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb) - 16); 1844 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); 1845 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1846 private_tss = 0; 1847 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd); 1848 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 1849 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16); 1850 ltr(gsel_tss); 1851 1852 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = 1853 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)]; 1854 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = 1855 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); 1856 dblfault_tss.tss_cr3 = (int)IdlePTD; 1857 dblfault_tss.tss_eip = (int)dblfault_handler; 1858 dblfault_tss.tss_eflags = PSL_KERNEL; 1859 dblfault_tss.tss_ds = dblfault_tss.tss_es = 1860 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); 1861 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL); 1862 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); 1863 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); 1864 1865 vm86_initialize(); 1866 getmemsize(first); 1867 init_param2(physmem); 1868 1869 /* now running on new page tables, configured,and u/iom is accessible */ 1870 1871 /* Map the message buffer. */ 1872 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) 1873 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off); 1874 1875 msgbufinit(msgbufp, MSGBUF_SIZE); 1876 1877 /* make a call gate to reenter kernel with */ 1878 gdp = &ldt[LSYS5CALLS_SEL].gd; 1879 1880 x = (int) &IDTVEC(lcall_syscall); 1881 gdp->gd_looffset = x; 1882 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL); 1883 gdp->gd_stkcpy = 1; 1884 gdp->gd_type = SDT_SYS386CGT; 1885 gdp->gd_dpl = SEL_UPL; 1886 gdp->gd_p = 1; 1887 gdp->gd_hioffset = x >> 16; 1888 1889 /* XXX does this work? */ 1890 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL]; 1891 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL]; 1892 1893 /* transfer to user mode */ 1894 1895 _ucodesel = LSEL(LUCODE_SEL, SEL_UPL); 1896 _udatasel = LSEL(LUDATA_SEL, SEL_UPL); 1897 1898 /* setup proc 0's pcb */ 1899 thread0->td_pcb->pcb_flags = 0; /* XXXKSE */ 1900 thread0->td_pcb->pcb_cr3 = (int)IdlePTD; 1901 thread0->td_pcb->pcb_ext = 0; 1902 thread0->td_frame = &proc0_tf; 1903} 1904 1905void 1906cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) 1907{ 1908} 1909 1910#if defined(I586_CPU) && !defined(NO_F00F_HACK) 1911static void f00f_hack(void *unused); 1912SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL); 1913 1914static void 1915f00f_hack(void *unused) { 1916 struct gate_descriptor *new_idt; 1917#ifndef SMP 1918 struct region_descriptor r_idt; 1919#endif 1920 vm_offset_t tmp; 1921 1922 if (!has_f00f_bug) 1923 return; 1924 1925 GIANT_REQUIRED; 1926 1927 printf("Intel Pentium detected, installing workaround for F00F bug\n"); 1928 1929 r_idt.rd_limit = sizeof(idt0) - 1; 1930 1931 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2); 1932 if (tmp == 0) 1933 panic("kmem_alloc returned 0"); 1934 if (((unsigned int)tmp & (PAGE_SIZE-1)) != 0) 1935 panic("kmem_alloc returned non-page-aligned memory"); 1936 /* Put the first seven entries in the lower page */ 1937 new_idt = (struct gate_descriptor*)(tmp + PAGE_SIZE - (7*8)); 1938 bcopy(idt, new_idt, sizeof(idt0)); 1939 r_idt.rd_base = (int)new_idt; 1940 lidt(&r_idt); 1941 idt = new_idt; 1942 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE, 1943 VM_PROT_READ, FALSE) != KERN_SUCCESS) 1944 panic("vm_map_protect failed"); 1945 return; 1946} 1947#endif /* defined(I586_CPU) && !NO_F00F_HACK */ 1948 1949int 1950ptrace_set_pc(struct thread *td, unsigned long addr) 1951{ 1952 td->td_frame->tf_eip = addr; 1953 return (0); 1954} 1955 1956int 1957ptrace_single_step(struct thread *td) 1958{ 1959 td->td_frame->tf_eflags |= PSL_T; 1960 return (0); 1961} 1962 1963int 1964fill_regs(struct thread *td, struct reg *regs) 1965{ 1966 struct pcb *pcb; 1967 struct trapframe *tp; 1968 1969 tp = td->td_frame; 1970 regs->r_fs = tp->tf_fs; 1971 regs->r_es = tp->tf_es; 1972 regs->r_ds = tp->tf_ds; 1973 regs->r_edi = tp->tf_edi; 1974 regs->r_esi = tp->tf_esi; 1975 regs->r_ebp = tp->tf_ebp; 1976 regs->r_ebx = tp->tf_ebx; 1977 regs->r_edx = tp->tf_edx; 1978 regs->r_ecx = tp->tf_ecx; 1979 regs->r_eax = tp->tf_eax; 1980 regs->r_eip = tp->tf_eip; 1981 regs->r_cs = tp->tf_cs; 1982 regs->r_eflags = tp->tf_eflags; 1983 regs->r_esp = tp->tf_esp; 1984 regs->r_ss = tp->tf_ss; 1985 pcb = td->td_pcb; 1986 regs->r_gs = pcb->pcb_gs; 1987 return (0); 1988} 1989 1990int 1991set_regs(struct thread *td, struct reg *regs) 1992{ 1993 struct pcb *pcb; 1994 struct trapframe *tp; 1995 1996 tp = td->td_frame; 1997 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) || 1998 !CS_SECURE(regs->r_cs)) 1999 return (EINVAL); 2000 tp->tf_fs = regs->r_fs; 2001 tp->tf_es = regs->r_es; 2002 tp->tf_ds = regs->r_ds; 2003 tp->tf_edi = regs->r_edi; 2004 tp->tf_esi = regs->r_esi; 2005 tp->tf_ebp = regs->r_ebp; 2006 tp->tf_ebx = regs->r_ebx; 2007 tp->tf_edx = regs->r_edx; 2008 tp->tf_ecx = regs->r_ecx; 2009 tp->tf_eax = regs->r_eax; 2010 tp->tf_eip = regs->r_eip; 2011 tp->tf_cs = regs->r_cs; 2012 tp->tf_eflags = regs->r_eflags; 2013 tp->tf_esp = regs->r_esp; 2014 tp->tf_ss = regs->r_ss; 2015 pcb = td->td_pcb; 2016 pcb->pcb_gs = regs->r_gs; 2017 return (0); 2018} 2019 2020#ifdef CPU_ENABLE_SSE 2021static void 2022fill_fpregs_xmm(sv_xmm, sv_87) 2023 struct savexmm *sv_xmm; 2024 struct save87 *sv_87; 2025{ 2026 register struct env87 *penv_87 = &sv_87->sv_env; 2027 register struct envxmm *penv_xmm = &sv_xmm->sv_env; 2028 int i; 2029 2030 /* FPU control/status */ 2031 penv_87->en_cw = penv_xmm->en_cw; 2032 penv_87->en_sw = penv_xmm->en_sw; 2033 penv_87->en_tw = penv_xmm->en_tw; 2034 penv_87->en_fip = penv_xmm->en_fip; 2035 penv_87->en_fcs = penv_xmm->en_fcs; 2036 penv_87->en_opcode = penv_xmm->en_opcode; 2037 penv_87->en_foo = penv_xmm->en_foo; 2038 penv_87->en_fos = penv_xmm->en_fos; 2039 2040 /* FPU registers */ 2041 for (i = 0; i < 8; ++i) 2042 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc; 2043 2044 sv_87->sv_ex_sw = sv_xmm->sv_ex_sw; 2045} 2046 2047static void 2048set_fpregs_xmm(sv_87, sv_xmm) 2049 struct save87 *sv_87; 2050 struct savexmm *sv_xmm; 2051{ 2052 register struct env87 *penv_87 = &sv_87->sv_env; 2053 register struct envxmm *penv_xmm = &sv_xmm->sv_env; 2054 int i; 2055 2056 /* FPU control/status */ 2057 penv_xmm->en_cw = penv_87->en_cw; 2058 penv_xmm->en_sw = penv_87->en_sw; 2059 penv_xmm->en_tw = penv_87->en_tw; 2060 penv_xmm->en_fip = penv_87->en_fip; 2061 penv_xmm->en_fcs = penv_87->en_fcs; 2062 penv_xmm->en_opcode = penv_87->en_opcode; 2063 penv_xmm->en_foo = penv_87->en_foo; 2064 penv_xmm->en_fos = penv_87->en_fos; 2065 2066 /* FPU registers */ 2067 for (i = 0; i < 8; ++i) 2068 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i]; 2069 2070 sv_xmm->sv_ex_sw = sv_87->sv_ex_sw; 2071} 2072#endif /* CPU_ENABLE_SSE */ 2073 2074int 2075fill_fpregs(struct thread *td, struct fpreg *fpregs) 2076{ 2077#ifdef CPU_ENABLE_SSE 2078 if (cpu_fxsr) { 2079 fill_fpregs_xmm(&td->td_pcb->pcb_save.sv_xmm, 2080 (struct save87 *)fpregs); 2081 return (0); 2082 } 2083#endif /* CPU_ENABLE_SSE */ 2084 bcopy(&td->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs); 2085 return (0); 2086} 2087 2088int 2089set_fpregs(struct thread *td, struct fpreg *fpregs) 2090{ 2091#ifdef CPU_ENABLE_SSE 2092 if (cpu_fxsr) { 2093 set_fpregs_xmm((struct save87 *)fpregs, 2094 &td->td_pcb->pcb_save.sv_xmm); 2095 return (0); 2096 } 2097#endif /* CPU_ENABLE_SSE */ 2098 bcopy(fpregs, &td->td_pcb->pcb_save.sv_87, sizeof *fpregs); 2099 return (0); 2100} 2101 2102int 2103fill_dbregs(struct thread *td, struct dbreg *dbregs) 2104{ 2105 struct pcb *pcb; 2106 2107 if (td == NULL) { 2108 dbregs->dr0 = rdr0(); 2109 dbregs->dr1 = rdr1(); 2110 dbregs->dr2 = rdr2(); 2111 dbregs->dr3 = rdr3(); 2112 dbregs->dr4 = rdr4(); 2113 dbregs->dr5 = rdr5(); 2114 dbregs->dr6 = rdr6(); 2115 dbregs->dr7 = rdr7(); 2116 } else { 2117 pcb = td->td_pcb; 2118 dbregs->dr0 = pcb->pcb_dr0; 2119 dbregs->dr1 = pcb->pcb_dr1; 2120 dbregs->dr2 = pcb->pcb_dr2; 2121 dbregs->dr3 = pcb->pcb_dr3; 2122 dbregs->dr4 = 0; 2123 dbregs->dr5 = 0; 2124 dbregs->dr6 = pcb->pcb_dr6; 2125 dbregs->dr7 = pcb->pcb_dr7; 2126 } 2127 return (0); 2128} 2129 2130int 2131set_dbregs(struct thread *td, struct dbreg *dbregs) 2132{ 2133 struct pcb *pcb; 2134 int i; 2135 u_int32_t mask1, mask2; 2136 2137 if (td == NULL) { 2138 load_dr0(dbregs->dr0); 2139 load_dr1(dbregs->dr1); 2140 load_dr2(dbregs->dr2); 2141 load_dr3(dbregs->dr3); 2142 load_dr4(dbregs->dr4); 2143 load_dr5(dbregs->dr5); 2144 load_dr6(dbregs->dr6); 2145 load_dr7(dbregs->dr7); 2146 } else { 2147 /* 2148 * Don't let an illegal value for dr7 get set. Specifically, 2149 * check for undefined settings. Setting these bit patterns 2150 * result in undefined behaviour and can lead to an unexpected 2151 * TRCTRAP. 2152 */ 2153 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 8; 2154 i++, mask1 <<= 2, mask2 <<= 2) 2155 if ((dbregs->dr7 & mask1) == mask2) 2156 return (EINVAL); 2157 2158 pcb = td->td_pcb; 2159 2160 /* 2161 * Don't let a process set a breakpoint that is not within the 2162 * process's address space. If a process could do this, it 2163 * could halt the system by setting a breakpoint in the kernel 2164 * (if ddb was enabled). Thus, we need to check to make sure 2165 * that no breakpoints are being enabled for addresses outside 2166 * process's address space, unless, perhaps, we were called by 2167 * uid 0. 2168 * 2169 * XXX - what about when the watched area of the user's 2170 * address space is written into from within the kernel 2171 * ... wouldn't that still cause a breakpoint to be generated 2172 * from within kernel mode? 2173 */ 2174 2175 if (suser_td(td) != 0) { 2176 if (dbregs->dr7 & 0x3) { 2177 /* dr0 is enabled */ 2178 if (dbregs->dr0 >= VM_MAXUSER_ADDRESS) 2179 return (EINVAL); 2180 } 2181 2182 if (dbregs->dr7 & (0x3<<2)) { 2183 /* dr1 is enabled */ 2184 if (dbregs->dr1 >= VM_MAXUSER_ADDRESS) 2185 return (EINVAL); 2186 } 2187 2188 if (dbregs->dr7 & (0x3<<4)) { 2189 /* dr2 is enabled */ 2190 if (dbregs->dr2 >= VM_MAXUSER_ADDRESS) 2191 return (EINVAL); 2192 } 2193 2194 if (dbregs->dr7 & (0x3<<6)) { 2195 /* dr3 is enabled */ 2196 if (dbregs->dr3 >= VM_MAXUSER_ADDRESS) 2197 return (EINVAL); 2198 } 2199 } 2200 2201 pcb->pcb_dr0 = dbregs->dr0; 2202 pcb->pcb_dr1 = dbregs->dr1; 2203 pcb->pcb_dr2 = dbregs->dr2; 2204 pcb->pcb_dr3 = dbregs->dr3; 2205 pcb->pcb_dr6 = dbregs->dr6; 2206 pcb->pcb_dr7 = dbregs->dr7; 2207 2208 pcb->pcb_flags |= PCB_DBREGS; 2209 } 2210 2211 return (0); 2212} 2213 2214/* 2215 * Return > 0 if a hardware breakpoint has been hit, and the 2216 * breakpoint was in user space. Return 0, otherwise. 2217 */ 2218int 2219user_dbreg_trap(void) 2220{ 2221 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */ 2222 u_int32_t bp; /* breakpoint bits extracted from dr6 */ 2223 int nbp; /* number of breakpoints that triggered */ 2224 caddr_t addr[4]; /* breakpoint addresses */ 2225 int i; 2226 2227 dr7 = rdr7(); 2228 if ((dr7 & 0x000000ff) == 0) { 2229 /* 2230 * all GE and LE bits in the dr7 register are zero, 2231 * thus the trap couldn't have been caused by the 2232 * hardware debug registers 2233 */ 2234 return 0; 2235 } 2236 2237 nbp = 0; 2238 dr6 = rdr6(); 2239 bp = dr6 & 0x0000000f; 2240 2241 if (!bp) { 2242 /* 2243 * None of the breakpoint bits are set meaning this 2244 * trap was not caused by any of the debug registers 2245 */ 2246 return 0; 2247 } 2248 2249 /* 2250 * at least one of the breakpoints were hit, check to see 2251 * which ones and if any of them are user space addresses 2252 */ 2253 2254 if (bp & 0x01) { 2255 addr[nbp++] = (caddr_t)rdr0(); 2256 } 2257 if (bp & 0x02) { 2258 addr[nbp++] = (caddr_t)rdr1(); 2259 } 2260 if (bp & 0x04) { 2261 addr[nbp++] = (caddr_t)rdr2(); 2262 } 2263 if (bp & 0x08) { 2264 addr[nbp++] = (caddr_t)rdr3(); 2265 } 2266 2267 for (i=0; i<nbp; i++) { 2268 if (addr[i] < 2269 (caddr_t)VM_MAXUSER_ADDRESS) { 2270 /* 2271 * addr[i] is in user space 2272 */ 2273 return nbp; 2274 } 2275 } 2276 2277 /* 2278 * None of the breakpoints are in user space. 2279 */ 2280 return 0; 2281} 2282 2283 2284#ifndef DDB 2285void 2286Debugger(const char *msg) 2287{ 2288 printf("Debugger(\"%s\") called.\n", msg); 2289} 2290#endif /* no DDB */ 2291 2292#include <sys/disklabel.h> 2293 2294/* 2295 * Determine the size of the transfer, and make sure it is 2296 * within the boundaries of the partition. Adjust transfer 2297 * if needed, and signal errors or early completion. 2298 */ 2299int 2300bounds_check_with_label(struct bio *bp, struct disklabel *lp, int wlabel) 2301{ 2302 struct partition *p = lp->d_partitions + dkpart(bp->bio_dev); 2303 int labelsect = lp->d_partitions[0].p_offset; 2304 int maxsz = p->p_size, 2305 sz = (bp->bio_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT; 2306 2307 /* overwriting disk label ? */ 2308 /* XXX should also protect bootstrap in first 8K */ 2309 if (bp->bio_blkno + p->p_offset <= LABELSECTOR + labelsect && 2310#if LABELSECTOR != 0 2311 bp->bio_blkno + p->p_offset + sz > LABELSECTOR + labelsect && 2312#endif 2313 (bp->bio_cmd == BIO_WRITE) && wlabel == 0) { 2314 bp->bio_error = EROFS; 2315 goto bad; 2316 } 2317 2318#if defined(DOSBBSECTOR) && defined(notyet) 2319 /* overwriting master boot record? */ 2320 if (bp->bio_blkno + p->p_offset <= DOSBBSECTOR && 2321 (bp->bio_cmd == BIO_WRITE) && wlabel == 0) { 2322 bp->bio_error = EROFS; 2323 goto bad; 2324 } 2325#endif 2326 2327 /* beyond partition? */ 2328 if (bp->bio_blkno < 0 || bp->bio_blkno + sz > maxsz) { 2329 /* if exactly at end of disk, return an EOF */ 2330 if (bp->bio_blkno == maxsz) { 2331 bp->bio_resid = bp->bio_bcount; 2332 return(0); 2333 } 2334 /* or truncate if part of it fits */ 2335 sz = maxsz - bp->bio_blkno; 2336 if (sz <= 0) { 2337 bp->bio_error = EINVAL; 2338 goto bad; 2339 } 2340 bp->bio_bcount = sz << DEV_BSHIFT; 2341 } 2342 2343 bp->bio_pblkno = bp->bio_blkno + p->p_offset; 2344 return(1); 2345 2346bad: 2347 bp->bio_flags |= BIO_ERROR; 2348 return(-1); 2349} 2350 2351#ifdef DDB 2352 2353/* 2354 * Provide inb() and outb() as functions. They are normally only 2355 * available as macros calling inlined functions, thus cannot be 2356 * called inside DDB. 2357 * 2358 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 2359 */ 2360 2361#undef inb 2362#undef outb 2363 2364/* silence compiler warnings */ 2365u_char inb(u_int); 2366void outb(u_int, u_char); 2367 2368u_char 2369inb(u_int port) 2370{ 2371 u_char data; 2372 /* 2373 * We use %%dx and not %1 here because i/o is done at %dx and not at 2374 * %edx, while gcc generates inferior code (movw instead of movl) 2375 * if we tell it to load (u_short) port. 2376 */ 2377 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 2378 return (data); 2379} 2380 2381void 2382outb(u_int port, u_char data) 2383{ 2384 u_char al; 2385 /* 2386 * Use an unnecessary assignment to help gcc's register allocator. 2387 * This make a large difference for gcc-1.40 and a tiny difference 2388 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 2389 * best results. gcc-2.6.0 can't handle this. 2390 */ 2391 al = data; 2392 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 2393} 2394 2395#endif /* DDB */ 2396