machdep.c revision 82031
1/*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 38 * $FreeBSD: head/sys/amd64/amd64/machdep.c 82031 2001-08-21 07:20:06Z dillon $ 39 */ 40 41#include "opt_atalk.h" 42#include "opt_compat.h" 43#include "opt_cpu.h" 44#include "opt_ddb.h" 45#include "opt_inet.h" 46#include "opt_ipx.h" 47#include "opt_isa.h" 48#include "opt_maxmem.h" 49#include "opt_msgbuf.h" 50#include "opt_npx.h" 51#include "opt_perfmon.h" 52/* #include "opt_userconfig.h" */ 53 54#include <sys/param.h> 55#include <sys/systm.h> 56#include <sys/sysproto.h> 57#include <sys/signalvar.h> 58#include <sys/kernel.h> 59#include <sys/ktr.h> 60#include <sys/linker.h> 61#include <sys/lock.h> 62#include <sys/malloc.h> 63#include <sys/mutex.h> 64#include <sys/pcpu.h> 65#include <sys/proc.h> 66#include <sys/bio.h> 67#include <sys/buf.h> 68#include <sys/reboot.h> 69#include <sys/smp.h> 70#include <sys/callout.h> 71#include <sys/msgbuf.h> 72#include <sys/sysent.h> 73#include <sys/sysctl.h> 74#include <sys/vmmeter.h> 75#include <sys/bus.h> 76#include <sys/eventhandler.h> 77 78#include <vm/vm.h> 79#include <vm/vm_param.h> 80#include <sys/lock.h> 81#include <vm/vm_kern.h> 82#include <vm/vm_object.h> 83#include <vm/vm_page.h> 84#include <vm/vm_map.h> 85#include <vm/vm_pager.h> 86#include <vm/vm_extern.h> 87 88#include <sys/user.h> 89#include <sys/exec.h> 90#include <sys/cons.h> 91 92#include <ddb/ddb.h> 93 94#include <net/netisr.h> 95 96#include <machine/cpu.h> 97#include <machine/cputypes.h> 98#include <machine/reg.h> 99#include <machine/clock.h> 100#include <machine/specialreg.h> 101#include <machine/bootinfo.h> 102#include <machine/md_var.h> 103#include <machine/pc/bios.h> 104#include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */ 105#include <machine/globals.h> 106#ifdef PERFMON 107#include <machine/perfmon.h> 108#endif 109 110#include <i386/isa/icu.h> 111#include <i386/isa/intr_machdep.h> 112#include <isa/rtc.h> 113#include <machine/vm86.h> 114#include <sys/ptrace.h> 115#include <machine/sigframe.h> 116 117extern void init386 __P((int first)); 118extern void dblfault_handler __P((void)); 119 120extern void printcpuinfo(void); /* XXX header file */ 121extern void earlysetcpuclass(void); /* same header file */ 122extern void finishidentcpu(void); 123extern void panicifcpuunsupported(void); 124extern void initializecpu(void); 125 126#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 127#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 128 129static void cpu_startup __P((void *)); 130#ifdef CPU_ENABLE_SSE 131static void set_fpregs_xmm __P((struct save87 *, struct savexmm *)); 132static void fill_fpregs_xmm __P((struct savexmm *, struct save87 *)); 133#endif /* CPU_ENABLE_SSE */ 134SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) 135 136int _udatasel, _ucodesel; 137u_int atdevbase; 138 139#if defined(SWTCH_OPTIM_STATS) 140extern int swtch_optim_stats; 141SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, 142 CTLFLAG_RD, &swtch_optim_stats, 0, ""); 143SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, 144 CTLFLAG_RD, &tlb_flush_count, 0, ""); 145#endif 146 147#ifdef PC98 148static int ispc98 = 1; 149#else 150static int ispc98 = 0; 151#endif 152SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, ""); 153 154int physmem = 0; 155int cold = 1; 156 157#ifdef COMPAT_43 158static void osendsig __P((sig_t catcher, int sig, sigset_t *mask, u_long code)); 159#endif 160 161static int 162sysctl_hw_physmem(SYSCTL_HANDLER_ARGS) 163{ 164 int error = sysctl_handle_int(oidp, 0, ctob(physmem), req); 165 return (error); 166} 167 168SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD, 169 0, 0, sysctl_hw_physmem, "IU", ""); 170 171static int 172sysctl_hw_usermem(SYSCTL_HANDLER_ARGS) 173{ 174 int error = sysctl_handle_int(oidp, 0, 175 ctob(physmem - cnt.v_wire_count), req); 176 return (error); 177} 178 179SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD, 180 0, 0, sysctl_hw_usermem, "IU", ""); 181 182static int 183sysctl_hw_availpages(SYSCTL_HANDLER_ARGS) 184{ 185 int error = sysctl_handle_int(oidp, 0, 186 i386_btop(avail_end - avail_start), req); 187 return (error); 188} 189 190SYSCTL_PROC(_hw, OID_AUTO, availpages, CTLTYPE_INT|CTLFLAG_RD, 191 0, 0, sysctl_hw_availpages, "I", ""); 192 193int Maxmem = 0; 194long dumplo; 195 196vm_offset_t phys_avail[10]; 197 198/* must be 2 less so 0 0 can signal end of chunks */ 199#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2) 200 201static vm_offset_t buffer_sva, buffer_eva; 202vm_offset_t clean_sva, clean_eva; 203static vm_offset_t pager_sva, pager_eva; 204static struct trapframe proc0_tf; 205#ifndef SMP 206static struct globaldata __globaldata; 207#endif 208 209struct mtx sched_lock; 210struct mtx Giant; 211 212static void 213cpu_startup(dummy) 214 void *dummy; 215{ 216 register unsigned i; 217 register caddr_t v; 218 vm_offset_t maxaddr; 219 vm_size_t size = 0; 220 int firstaddr; 221 vm_offset_t minaddr; 222 int physmem_est; /* in pages */ 223 224 /* 225 * Good {morning,afternoon,evening,night}. 226 */ 227 earlysetcpuclass(); 228 startrtclock(); 229 printcpuinfo(); 230 panicifcpuunsupported(); 231#ifdef PERFMON 232 perfmon_init(); 233#endif 234 printf("real memory = %u (%uK bytes)\n", ptoa(Maxmem), 235 ptoa(Maxmem) / 1024); 236 /* 237 * Display any holes after the first chunk of extended memory. 238 */ 239 if (bootverbose) { 240 int indx; 241 242 printf("Physical memory chunk(s):\n"); 243 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 244 unsigned int size1; 245 246 size1 = phys_avail[indx + 1] - phys_avail[indx]; 247 printf("0x%08x - 0x%08x, %u bytes (%u pages)\n", 248 phys_avail[indx], phys_avail[indx + 1] - 1, size1, 249 size1 / PAGE_SIZE); 250 } 251 } 252 253 /* 254 * Calculate callout wheel size 255 */ 256 for (callwheelsize = 1, callwheelbits = 0; 257 callwheelsize < ncallout; 258 callwheelsize <<= 1, ++callwheelbits) 259 ; 260 callwheelmask = callwheelsize - 1; 261 262 /* 263 * Allocate space for system data structures. 264 * The first available kernel virtual address is in "v". 265 * As pages of kernel virtual memory are allocated, "v" is incremented. 266 * As pages of memory are allocated and cleared, 267 * "firstaddr" is incremented. 268 * An index into the kernel page table corresponding to the 269 * virtual memory address maintained in "v" is kept in "mapaddr". 270 */ 271 272 /* 273 * Make two passes. The first pass calculates how much memory is 274 * needed and allocates it. The second pass assigns virtual 275 * addresses to the various data structures. 276 */ 277 firstaddr = 0; 278again: 279 v = (caddr_t)firstaddr; 280 281#define valloc(name, type, num) \ 282 (name) = (type *)v; v = (caddr_t)((name)+(num)) 283#define valloclim(name, type, num, lim) \ 284 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num))) 285 286 valloc(callout, struct callout, ncallout); 287 valloc(callwheel, struct callout_tailq, callwheelsize); 288 289 /* 290 * Discount the physical memory larger than the size of kernel_map 291 * to avoid eating up all of KVA space. 292 */ 293 if (kernel_map->first_free == NULL) { 294 printf("Warning: no free entries in kernel_map.\n"); 295 physmem_est = physmem; 296 } else { 297 physmem_est = min(physmem, btoc(kernel_map->max_offset - 298 kernel_map->min_offset)); 299 } 300 301 /* 302 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE. 303 * For the first 64MB of ram nominally allocate sufficient buffers to 304 * cover 1/4 of our ram. Beyond the first 64MB allocate additional 305 * buffers to cover 1/20 of our ram over 64MB. When auto-sizing 306 * the buffer cache we limit the eventual kva reservation to 307 * maxbcache bytes. 308 * 309 * factor represents the 1/4 x ram conversion. 310 */ 311 if (nbuf == 0) { 312 int factor = 4 * BKVASIZE / PAGE_SIZE; 313 314 nbuf = 50; 315 if (physmem_est > 1024) 316 nbuf += min((physmem_est - 1024) / factor, 317 16384 / factor); 318 if (physmem_est > 16384) 319 nbuf += (physmem_est - 16384) * 2 / (factor * 5); 320 321 if (maxbcache && nbuf > maxbcache / BKVASIZE) 322 nbuf = maxbcache / BKVASIZE; 323 } 324 325 /* 326 * Do not allow the buffer_map to be more then 1/2 the size of the 327 * kernel_map. 328 */ 329 if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) / 330 (BKVASIZE * 2)) { 331 nbuf = (kernel_map->max_offset - kernel_map->min_offset) / 332 (BKVASIZE * 2); 333 printf("Warning: nbufs capped at %d\n", nbuf); 334 } 335 336 nswbuf = max(min(nbuf/4, 256), 16); 337 338 valloc(swbuf, struct buf, nswbuf); 339 valloc(buf, struct buf, nbuf); 340 v = bufhashinit(v); 341 342 /* 343 * End of first pass, size has been calculated so allocate memory 344 */ 345 if (firstaddr == 0) { 346 size = (vm_size_t)(v - firstaddr); 347 firstaddr = (int)kmem_alloc(kernel_map, round_page(size)); 348 if (firstaddr == 0) 349 panic("startup: no room for tables"); 350 goto again; 351 } 352 353 /* 354 * End of second pass, addresses have been assigned 355 */ 356 if ((vm_size_t)(v - firstaddr) != size) 357 panic("startup: table size inconsistency"); 358 359 clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva, 360 (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size); 361 buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva, 362 (nbuf*BKVASIZE)); 363 buffer_map->system_map = 1; 364 pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva, 365 (nswbuf*MAXPHYS) + pager_map_size); 366 pager_map->system_map = 1; 367 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 368 (16*(ARG_MAX+(PAGE_SIZE*3)))); 369 370 /* 371 * XXX: Mbuf system machine-specific initializations should 372 * go here, if anywhere. 373 */ 374 375 /* 376 * Initialize callouts 377 */ 378 SLIST_INIT(&callfree); 379 for (i = 0; i < ncallout; i++) { 380 callout_init(&callout[i], 0); 381 callout[i].c_flags = CALLOUT_LOCAL_ALLOC; 382 SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); 383 } 384 385 for (i = 0; i < callwheelsize; i++) { 386 TAILQ_INIT(&callwheel[i]); 387 } 388 389 mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE); 390 391#if defined(USERCONFIG) 392 userconfig(); 393 cninit(); /* the preferred console may have changed */ 394#endif 395 396 printf("avail memory = %u (%uK bytes)\n", ptoa(cnt.v_free_count), 397 ptoa(cnt.v_free_count) / 1024); 398 399 /* 400 * Set up buffers, so they can be used to read disk labels. 401 */ 402 bufinit(); 403 vm_pager_bufferinit(); 404 405 globaldata_register(GLOBALDATA); 406#ifndef SMP 407 /* For SMP, we delay the cpu_setregs() until after SMP startup. */ 408 cpu_setregs(); 409#endif 410} 411 412/* 413 * Send an interrupt to process. 414 * 415 * Stack is set up to allow sigcode stored 416 * at top to call routine, followed by kcall 417 * to sigreturn routine below. After sigreturn 418 * resets the signal mask, the stack, and the 419 * frame pointer, it returns to the user 420 * specified pc, psl. 421 */ 422#ifdef COMPAT_43 423static void 424osendsig(catcher, sig, mask, code) 425 sig_t catcher; 426 int sig; 427 sigset_t *mask; 428 u_long code; 429{ 430 struct osigframe sf; 431 struct osigframe *fp; 432 struct proc *p; 433 struct sigacts *psp; 434 struct trapframe *regs; 435 int oonstack; 436 437 p = curproc; 438 PROC_LOCK(p); 439 psp = p->p_sigacts; 440 regs = p->p_frame; 441 oonstack = sigonstack(regs->tf_esp); 442 443 /* Allocate and validate space for the signal handler context. */ 444 if ((p->p_flag & P_ALTSTACK) && !oonstack && 445 SIGISMEMBER(psp->ps_sigonstack, sig)) { 446 fp = (struct osigframe *)(p->p_sigstk.ss_sp + 447 p->p_sigstk.ss_size - sizeof(struct osigframe)); 448#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 449 p->p_sigstk.ss_flags |= SS_ONSTACK; 450#endif 451 } else 452 fp = (struct osigframe *)regs->tf_esp - 1; 453 PROC_UNLOCK(p); 454 455 /* 456 * grow_stack() will return 0 if *fp does not fit inside the stack 457 * and the stack can not be grown. 458 * useracc() will return FALSE if access is denied. 459 */ 460 if (grow_stack(p, (int)fp) == 0 || 461 !useracc((caddr_t)fp, sizeof(*fp), VM_PROT_WRITE)) { 462 /* 463 * Process has trashed its stack; give it an illegal 464 * instruction to halt it in its tracks. 465 */ 466 PROC_LOCK(p); 467 SIGACTION(p, SIGILL) = SIG_DFL; 468 SIGDELSET(p->p_sigignore, SIGILL); 469 SIGDELSET(p->p_sigcatch, SIGILL); 470 SIGDELSET(p->p_sigmask, SIGILL); 471 psignal(p, SIGILL); 472 PROC_UNLOCK(p); 473 return; 474 } 475 476 /* Translate the signal if appropriate. */ 477 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 478 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 479 480 /* Build the argument list for the signal handler. */ 481 sf.sf_signum = sig; 482 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc; 483 PROC_LOCK(p); 484 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 485 /* Signal handler installed with SA_SIGINFO. */ 486 sf.sf_arg2 = (register_t)&fp->sf_siginfo; 487 sf.sf_siginfo.si_signo = sig; 488 sf.sf_siginfo.si_code = code; 489 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher; 490 } else { 491 /* Old FreeBSD-style arguments. */ 492 sf.sf_arg2 = code; 493 sf.sf_addr = regs->tf_err; 494 sf.sf_ahu.sf_handler = catcher; 495 } 496 PROC_UNLOCK(p); 497 498 /* Save most if not all of trap frame. */ 499 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax; 500 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx; 501 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx; 502 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx; 503 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi; 504 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi; 505 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs; 506 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds; 507 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss; 508 sf.sf_siginfo.si_sc.sc_es = regs->tf_es; 509 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs; 510 sf.sf_siginfo.si_sc.sc_gs = rgs(); 511 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp; 512 513 /* Build the signal context to be used by osigreturn(). */ 514 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0; 515 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask); 516 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp; 517 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp; 518 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip; 519 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags; 520 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno; 521 sf.sf_siginfo.si_sc.sc_err = regs->tf_err; 522 523 /* 524 * If we're a vm86 process, we want to save the segment registers. 525 * We also change eflags to be our emulated eflags, not the actual 526 * eflags. 527 */ 528 if (regs->tf_eflags & PSL_VM) { 529 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */ 530 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 531 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 532 533 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs; 534 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs; 535 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es; 536 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds; 537 538 if (vm86->vm86_has_vme == 0) 539 sf.sf_siginfo.si_sc.sc_ps = 540 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 541 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 542 543 /* See sendsig() for comments. */ 544 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP); 545 } 546 547 /* Copy the sigframe out to the user's stack. */ 548 if (copyout(&sf, fp, sizeof(*fp)) != 0) { 549 /* 550 * Something is wrong with the stack pointer. 551 * ...Kill the process. 552 */ 553 PROC_LOCK(p); 554 sigexit(p, SIGILL); 555 /* NOTREACHED */ 556 } 557 558 regs->tf_esp = (int)fp; 559 regs->tf_eip = PS_STRINGS - szosigcode; 560 regs->tf_cs = _ucodesel; 561 regs->tf_ds = _udatasel; 562 regs->tf_es = _udatasel; 563 regs->tf_fs = _udatasel; 564 load_gs(_udatasel); 565 regs->tf_ss = _udatasel; 566} 567#endif 568 569void 570sendsig(catcher, sig, mask, code) 571 sig_t catcher; 572 int sig; 573 sigset_t *mask; 574 u_long code; 575{ 576 struct sigframe sf; 577 struct proc *p; 578 struct sigacts *psp; 579 struct trapframe *regs; 580 struct sigframe *sfp; 581 int oonstack; 582 583 p = curproc; 584 PROC_LOCK(p); 585 psp = p->p_sigacts; 586#ifdef COMPAT_43 587 if (SIGISMEMBER(psp->ps_osigset, sig)) { 588 PROC_UNLOCK(p); 589 osendsig(catcher, sig, mask, code); 590 return; 591 } 592#endif 593 regs = p->p_frame; 594 oonstack = sigonstack(regs->tf_esp); 595 596 /* Save user context. */ 597 bzero(&sf, sizeof(sf)); 598 sf.sf_uc.uc_sigmask = *mask; 599 sf.sf_uc.uc_stack = p->p_sigstk; 600 sf.sf_uc.uc_stack.ss_flags = (p->p_flag & P_ALTSTACK) 601 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 602 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 603 sf.sf_uc.uc_mcontext.mc_gs = rgs(); 604 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs)); 605 606 /* Allocate and validate space for the signal handler context. */ 607 if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack && 608 SIGISMEMBER(psp->ps_sigonstack, sig)) { 609 sfp = (struct sigframe *)(p->p_sigstk.ss_sp + 610 p->p_sigstk.ss_size - sizeof(struct sigframe)); 611#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 612 p->p_sigstk.ss_flags |= SS_ONSTACK; 613#endif 614 } else 615 sfp = (struct sigframe *)regs->tf_esp - 1; 616 PROC_UNLOCK(p); 617 618 /* 619 * grow_stack() will return 0 if *sfp does not fit inside the stack 620 * and the stack can not be grown. 621 * useracc() will return FALSE if access is denied. 622 */ 623 if (grow_stack(p, (int)sfp) == 0 || 624 !useracc((caddr_t)sfp, sizeof(*sfp), VM_PROT_WRITE)) { 625 /* 626 * Process has trashed its stack; give it an illegal 627 * instruction to halt it in its tracks. 628 */ 629#ifdef DEBUG 630 printf("process %d has trashed its stack\n", p->p_pid); 631#endif 632 PROC_LOCK(p); 633 SIGACTION(p, SIGILL) = SIG_DFL; 634 SIGDELSET(p->p_sigignore, SIGILL); 635 SIGDELSET(p->p_sigcatch, SIGILL); 636 SIGDELSET(p->p_sigmask, SIGILL); 637 psignal(p, SIGILL); 638 PROC_UNLOCK(p); 639 return; 640 } 641 642 /* Translate the signal if appropriate. */ 643 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 644 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 645 646 /* Build the argument list for the signal handler. */ 647 sf.sf_signum = sig; 648 sf.sf_ucontext = (register_t)&sfp->sf_uc; 649 PROC_LOCK(p); 650 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 651 /* Signal handler installed with SA_SIGINFO. */ 652 sf.sf_siginfo = (register_t)&sfp->sf_si; 653 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 654 655 /* Fill siginfo structure. */ 656 sf.sf_si.si_signo = sig; 657 sf.sf_si.si_code = code; 658 sf.sf_si.si_addr = (void *)regs->tf_err; 659 } else { 660 /* Old FreeBSD-style arguments. */ 661 sf.sf_siginfo = code; 662 sf.sf_addr = regs->tf_err; 663 sf.sf_ahu.sf_handler = catcher; 664 } 665 PROC_UNLOCK(p); 666 667 /* 668 * If we're a vm86 process, we want to save the segment registers. 669 * We also change eflags to be our emulated eflags, not the actual 670 * eflags. 671 */ 672 if (regs->tf_eflags & PSL_VM) { 673 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 674 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 675 676 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 677 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 678 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 679 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 680 681 if (vm86->vm86_has_vme == 0) 682 sf.sf_uc.uc_mcontext.mc_eflags = 683 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 684 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 685 686 /* 687 * We should never have PSL_T set when returning from vm86 688 * mode. It may be set here if we deliver a signal before 689 * getting to vm86 mode, so turn it off. 690 * 691 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 692 * syscalls made by the signal handler. This just avoids 693 * wasting time for our lazy fixup of such faults. PSL_NT 694 * does nothing in vm86 mode, but vm86 programs can set it 695 * almost legitimately in probes for old cpu types. 696 */ 697 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP); 698 } 699 700 /* Copy the sigframe out to the user's stack. */ 701 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { 702 /* 703 * Something is wrong with the stack pointer. 704 * ...Kill the process. 705 */ 706 PROC_LOCK(p); 707 sigexit(p, SIGILL); 708 /* NOTREACHED */ 709 } 710 711 regs->tf_esp = (int)sfp; 712 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 713 regs->tf_cs = _ucodesel; 714 regs->tf_ds = _udatasel; 715 regs->tf_es = _udatasel; 716 regs->tf_fs = _udatasel; 717 regs->tf_ss = _udatasel; 718} 719 720/* 721 * System call to cleanup state after a signal 722 * has been taken. Reset signal mask and 723 * stack state from context left by sendsig (above). 724 * Return to previous pc and psl as specified by 725 * context left by sendsig. Check carefully to 726 * make sure that the user has not modified the 727 * state to gain improper privileges. 728 */ 729#ifdef COMPAT_43 730int 731osigreturn(p, uap) 732 struct proc *p; 733 struct osigreturn_args /* { 734 struct osigcontext *sigcntxp; 735 } */ *uap; 736{ 737 struct trapframe *regs; 738 struct osigcontext *scp; 739 int eflags; 740 741 regs = p->p_frame; 742 scp = uap->sigcntxp; 743 if (!useracc((caddr_t)scp, sizeof(*scp), VM_PROT_READ)) 744 return (EFAULT); 745 eflags = scp->sc_ps; 746 if (eflags & PSL_VM) { 747 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 748 struct vm86_kernel *vm86; 749 750 /* 751 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 752 * set up the vm86 area, and we can't enter vm86 mode. 753 */ 754 if (p->p_addr->u_pcb.pcb_ext == 0) 755 return (EINVAL); 756 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 757 if (vm86->vm86_inited == 0) 758 return (EINVAL); 759 760 /* Go back to user mode if both flags are set. */ 761 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 762 trapsignal(p, SIGBUS, 0); 763 764 if (vm86->vm86_has_vme) { 765 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 766 (eflags & VME_USERCHANGE) | PSL_VM; 767 } else { 768 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 769 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | 770 (eflags & VM_USERCHANGE) | PSL_VM; 771 } 772 tf->tf_vm86_ds = scp->sc_ds; 773 tf->tf_vm86_es = scp->sc_es; 774 tf->tf_vm86_fs = scp->sc_fs; 775 tf->tf_vm86_gs = scp->sc_gs; 776 tf->tf_ds = _udatasel; 777 tf->tf_es = _udatasel; 778 tf->tf_fs = _udatasel; 779 } else { 780 /* 781 * Don't allow users to change privileged or reserved flags. 782 */ 783 /* 784 * XXX do allow users to change the privileged flag PSL_RF. 785 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 786 * should sometimes set it there too. tf_eflags is kept in 787 * the signal context during signal handling and there is no 788 * other place to remember it, so the PSL_RF bit may be 789 * corrupted by the signal handler without us knowing. 790 * Corruption of the PSL_RF bit at worst causes one more or 791 * one less debugger trap, so allowing it is fairly harmless. 792 */ 793 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 794 return (EINVAL); 795 } 796 797 /* 798 * Don't allow users to load a valid privileged %cs. Let the 799 * hardware check for invalid selectors, excess privilege in 800 * other selectors, invalid %eip's and invalid %esp's. 801 */ 802 if (!CS_SECURE(scp->sc_cs)) { 803 trapsignal(p, SIGBUS, T_PROTFLT); 804 return (EINVAL); 805 } 806 regs->tf_ds = scp->sc_ds; 807 regs->tf_es = scp->sc_es; 808 regs->tf_fs = scp->sc_fs; 809 } 810 811 /* Restore remaining registers. */ 812 regs->tf_eax = scp->sc_eax; 813 regs->tf_ebx = scp->sc_ebx; 814 regs->tf_ecx = scp->sc_ecx; 815 regs->tf_edx = scp->sc_edx; 816 regs->tf_esi = scp->sc_esi; 817 regs->tf_edi = scp->sc_edi; 818 regs->tf_cs = scp->sc_cs; 819 regs->tf_ss = scp->sc_ss; 820 regs->tf_isp = scp->sc_isp; 821 822 PROC_LOCK(p); 823#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 824 if (scp->sc_onstack & 1) 825 p->p_sigstk.ss_flags |= SS_ONSTACK; 826 else 827 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 828#endif 829 830 SIGSETOLD(p->p_sigmask, scp->sc_mask); 831 SIG_CANTMASK(p->p_sigmask); 832 PROC_UNLOCK(p); 833 regs->tf_ebp = scp->sc_fp; 834 regs->tf_esp = scp->sc_sp; 835 regs->tf_eip = scp->sc_pc; 836 regs->tf_eflags = eflags; 837 return (EJUSTRETURN); 838} 839#endif 840 841int 842sigreturn(p, uap) 843 struct proc *p; 844 struct sigreturn_args /* { 845 ucontext_t *sigcntxp; 846 } */ *uap; 847{ 848 struct trapframe *regs; 849 ucontext_t *ucp; 850 int cs, eflags; 851 852 ucp = uap->sigcntxp; 853#ifdef COMPAT_43 854 if (!useracc((caddr_t)ucp, sizeof(struct osigcontext), VM_PROT_READ)) 855 return (EFAULT); 856 if (((struct osigcontext *)ucp)->sc_trapno == 0x01d516) 857 return (osigreturn(p, (struct osigreturn_args *)uap)); 858 /* 859 * Since ucp is not an osigcontext but a ucontext_t, we have to 860 * check again if all of it is accessible. A ucontext_t is 861 * much larger, so instead of just checking for the pointer 862 * being valid for the size of an osigcontext, now check for 863 * it being valid for a whole, new-style ucontext_t. 864 */ 865#endif 866 if (!useracc((caddr_t)ucp, sizeof(*ucp), VM_PROT_READ)) 867 return (EFAULT); 868 869 regs = p->p_frame; 870 eflags = ucp->uc_mcontext.mc_eflags; 871 if (eflags & PSL_VM) { 872 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 873 struct vm86_kernel *vm86; 874 875 /* 876 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 877 * set up the vm86 area, and we can't enter vm86 mode. 878 */ 879 if (p->p_addr->u_pcb.pcb_ext == 0) 880 return (EINVAL); 881 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 882 if (vm86->vm86_inited == 0) 883 return (EINVAL); 884 885 /* Go back to user mode if both flags are set. */ 886 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 887 trapsignal(p, SIGBUS, 0); 888 889 if (vm86->vm86_has_vme) { 890 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 891 (eflags & VME_USERCHANGE) | PSL_VM; 892 } else { 893 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 894 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | 895 (eflags & VM_USERCHANGE) | PSL_VM; 896 } 897 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe)); 898 tf->tf_eflags = eflags; 899 tf->tf_vm86_ds = tf->tf_ds; 900 tf->tf_vm86_es = tf->tf_es; 901 tf->tf_vm86_fs = tf->tf_fs; 902 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs; 903 tf->tf_ds = _udatasel; 904 tf->tf_es = _udatasel; 905 tf->tf_fs = _udatasel; 906 } else { 907 /* 908 * Don't allow users to change privileged or reserved flags. 909 */ 910 /* 911 * XXX do allow users to change the privileged flag PSL_RF. 912 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 913 * should sometimes set it there too. tf_eflags is kept in 914 * the signal context during signal handling and there is no 915 * other place to remember it, so the PSL_RF bit may be 916 * corrupted by the signal handler without us knowing. 917 * Corruption of the PSL_RF bit at worst causes one more or 918 * one less debugger trap, so allowing it is fairly harmless. 919 */ 920 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 921 printf("sigreturn: eflags = 0x%x\n", eflags); 922 return (EINVAL); 923 } 924 925 /* 926 * Don't allow users to load a valid privileged %cs. Let the 927 * hardware check for invalid selectors, excess privilege in 928 * other selectors, invalid %eip's and invalid %esp's. 929 */ 930 cs = ucp->uc_mcontext.mc_cs; 931 if (!CS_SECURE(cs)) { 932 printf("sigreturn: cs = 0x%x\n", cs); 933 trapsignal(p, SIGBUS, T_PROTFLT); 934 return (EINVAL); 935 } 936 937 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs)); 938 } 939 940 PROC_LOCK(p); 941#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 942 if (ucp->uc_mcontext.mc_onstack & 1) 943 p->p_sigstk.ss_flags |= SS_ONSTACK; 944 else 945 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 946#endif 947 948 p->p_sigmask = ucp->uc_sigmask; 949 SIG_CANTMASK(p->p_sigmask); 950 PROC_UNLOCK(p); 951 return (EJUSTRETURN); 952} 953 954/* 955 * Machine dependent boot() routine 956 * 957 * I haven't seen anything to put here yet 958 * Possibly some stuff might be grafted back here from boot() 959 */ 960void 961cpu_boot(int howto) 962{ 963} 964 965/* 966 * Shutdown the CPU as much as possible 967 */ 968void 969cpu_halt(void) 970{ 971 for (;;) 972 __asm__ ("hlt"); 973} 974 975/* 976 * Hook to idle the CPU when possible. This currently only works in 977 * the !SMP case, as there is no clean way to ensure that a CPU will be 978 * woken when there is work available for it. 979 */ 980static int cpu_idle_hlt = 1; 981SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, 982 &cpu_idle_hlt, 0, "Idle loop HLT enable"); 983 984/* 985 * Note that we have to be careful here to avoid a race between checking 986 * procrunnable() and actually halting. If we don't do this, we may waste 987 * the time between calling hlt and the next interrupt even though there 988 * is a runnable process. 989 */ 990void 991cpu_idle(void) 992{ 993#ifndef SMP 994 if (cpu_idle_hlt) { 995 disable_intr(); 996 if (procrunnable()) 997 enable_intr(); 998 else { 999 enable_intr(); 1000 __asm __volatile("hlt"); 1001 } 1002 } 1003#endif 1004} 1005 1006/* 1007 * Clear registers on exec 1008 */ 1009void 1010setregs(p, entry, stack, ps_strings) 1011 struct proc *p; 1012 u_long entry; 1013 u_long stack; 1014 u_long ps_strings; 1015{ 1016 struct trapframe *regs = p->p_frame; 1017 struct pcb *pcb = &p->p_addr->u_pcb; 1018 1019 if (pcb->pcb_ldt) 1020 user_ldt_free(pcb); 1021 1022 bzero((char *)regs, sizeof(struct trapframe)); 1023 regs->tf_eip = entry; 1024 regs->tf_esp = stack; 1025 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T); 1026 regs->tf_ss = _udatasel; 1027 regs->tf_ds = _udatasel; 1028 regs->tf_es = _udatasel; 1029 regs->tf_fs = _udatasel; 1030 regs->tf_cs = _ucodesel; 1031 1032 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */ 1033 regs->tf_ebx = ps_strings; 1034 1035 /* reset %gs as well */ 1036 if (pcb == PCPU_GET(curpcb)) 1037 load_gs(_udatasel); 1038 else 1039 pcb->pcb_gs = _udatasel; 1040 1041 /* 1042 * Reset the hardware debug registers if they were in use. 1043 * They won't have any meaning for the newly exec'd process. 1044 */ 1045 if (pcb->pcb_flags & PCB_DBREGS) { 1046 pcb->pcb_dr0 = 0; 1047 pcb->pcb_dr1 = 0; 1048 pcb->pcb_dr2 = 0; 1049 pcb->pcb_dr3 = 0; 1050 pcb->pcb_dr6 = 0; 1051 pcb->pcb_dr7 = 0; 1052 if (pcb == PCPU_GET(curpcb)) { 1053 /* 1054 * Clear the debug registers on the running 1055 * CPU, otherwise they will end up affecting 1056 * the next process we switch to. 1057 */ 1058 reset_dbregs(); 1059 } 1060 pcb->pcb_flags &= ~PCB_DBREGS; 1061 } 1062 1063 /* 1064 * Initialize the math emulator (if any) for the current process. 1065 * Actually, just clear the bit that says that the emulator has 1066 * been initialized. Initialization is delayed until the process 1067 * traps to the emulator (if it is done at all) mainly because 1068 * emulators don't provide an entry point for initialization. 1069 */ 1070 p->p_addr->u_pcb.pcb_flags &= ~FP_SOFTFP; 1071 1072 /* 1073 * Arrange to trap the next npx or `fwait' instruction (see npx.c 1074 * for why fwait must be trapped at least if there is an npx or an 1075 * emulator). This is mainly to handle the case where npx0 is not 1076 * configured, since the npx routines normally set up the trap 1077 * otherwise. It should be done only at boot time, but doing it 1078 * here allows modifying `npx_exists' for testing the emulator on 1079 * systems with an npx. 1080 */ 1081 load_cr0(rcr0() | CR0_MP | CR0_TS); 1082 1083#ifdef DEV_NPX 1084 /* Initialize the npx (if any) for the current process. */ 1085 npxinit(__INITIAL_NPXCW__); 1086#endif 1087 1088 /* 1089 * XXX - Linux emulator 1090 * Make sure sure edx is 0x0 on entry. Linux binaries depend 1091 * on it. 1092 */ 1093 p->p_retval[1] = 0; 1094} 1095 1096void 1097cpu_setregs(void) 1098{ 1099 unsigned int cr0; 1100 1101 cr0 = rcr0(); 1102 cr0 |= CR0_NE; /* Done by npxinit() */ 1103 cr0 |= CR0_MP | CR0_TS; /* Done at every execve() too. */ 1104#ifndef I386_CPU 1105 cr0 |= CR0_WP | CR0_AM; 1106#endif 1107 load_cr0(cr0); 1108 load_gs(_udatasel); 1109} 1110 1111static int 1112sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS) 1113{ 1114 int error; 1115 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 1116 req); 1117 if (!error && req->newptr) 1118 resettodr(); 1119 return (error); 1120} 1121 1122SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 1123 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 1124 1125SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, 1126 CTLFLAG_RW, &disable_rtc_set, 0, ""); 1127 1128SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo, 1129 CTLFLAG_RD, &bootinfo, bootinfo, ""); 1130 1131SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, 1132 CTLFLAG_RW, &wall_cmos_clock, 0, ""); 1133 1134/* 1135 * Initialize 386 and configure to run kernel 1136 */ 1137 1138/* 1139 * Initialize segments & interrupt table 1140 */ 1141 1142int _default_ldt; 1143union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */ 1144static struct gate_descriptor idt0[NIDT]; 1145struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 1146union descriptor ldt[NLDT]; /* local descriptor table */ 1147#ifdef SMP 1148/* table descriptors - used to load tables by microp */ 1149struct region_descriptor r_gdt, r_idt; 1150#endif 1151 1152int private_tss; /* flag indicating private tss */ 1153 1154#if defined(I586_CPU) && !defined(NO_F00F_HACK) 1155extern int has_f00f_bug; 1156#endif 1157 1158static struct i386tss dblfault_tss; 1159static char dblfault_stack[PAGE_SIZE]; 1160 1161extern struct user *proc0paddr; 1162 1163 1164/* software prototypes -- in more palatable form */ 1165struct soft_segment_descriptor gdt_segs[] = { 1166/* GNULL_SEL 0 Null Descriptor */ 1167{ 0x0, /* segment base address */ 1168 0x0, /* length */ 1169 0, /* segment type */ 1170 0, /* segment descriptor priority level */ 1171 0, /* segment descriptor present */ 1172 0, 0, 1173 0, /* default 32 vs 16 bit size */ 1174 0 /* limit granularity (byte/page units)*/ }, 1175/* GCODE_SEL 1 Code Descriptor for kernel */ 1176{ 0x0, /* segment base address */ 1177 0xfffff, /* length - all address space */ 1178 SDT_MEMERA, /* segment type */ 1179 0, /* segment descriptor priority level */ 1180 1, /* segment descriptor present */ 1181 0, 0, 1182 1, /* default 32 vs 16 bit size */ 1183 1 /* limit granularity (byte/page units)*/ }, 1184/* GDATA_SEL 2 Data Descriptor for kernel */ 1185{ 0x0, /* segment base address */ 1186 0xfffff, /* length - all address space */ 1187 SDT_MEMRWA, /* segment type */ 1188 0, /* segment descriptor priority level */ 1189 1, /* segment descriptor present */ 1190 0, 0, 1191 1, /* default 32 vs 16 bit size */ 1192 1 /* limit granularity (byte/page units)*/ }, 1193/* GPRIV_SEL 3 SMP Per-Processor Private Data Descriptor */ 1194{ 0x0, /* segment base address */ 1195 0xfffff, /* length - all address space */ 1196 SDT_MEMRWA, /* segment type */ 1197 0, /* segment descriptor priority level */ 1198 1, /* segment descriptor present */ 1199 0, 0, 1200 1, /* default 32 vs 16 bit size */ 1201 1 /* limit granularity (byte/page units)*/ }, 1202/* GPROC0_SEL 4 Proc 0 Tss Descriptor */ 1203{ 1204 0x0, /* segment base address */ 1205 sizeof(struct i386tss)-1,/* length - all address space */ 1206 SDT_SYS386TSS, /* segment type */ 1207 0, /* segment descriptor priority level */ 1208 1, /* segment descriptor present */ 1209 0, 0, 1210 0, /* unused - default 32 vs 16 bit size */ 1211 0 /* limit granularity (byte/page units)*/ }, 1212/* GLDT_SEL 5 LDT Descriptor */ 1213{ (int) ldt, /* segment base address */ 1214 sizeof(ldt)-1, /* length - all address space */ 1215 SDT_SYSLDT, /* segment type */ 1216 SEL_UPL, /* segment descriptor priority level */ 1217 1, /* segment descriptor present */ 1218 0, 0, 1219 0, /* unused - default 32 vs 16 bit size */ 1220 0 /* limit granularity (byte/page units)*/ }, 1221/* GUSERLDT_SEL 6 User LDT Descriptor per process */ 1222{ (int) ldt, /* segment base address */ 1223 (512 * sizeof(union descriptor)-1), /* length */ 1224 SDT_SYSLDT, /* segment type */ 1225 0, /* segment descriptor priority level */ 1226 1, /* segment descriptor present */ 1227 0, 0, 1228 0, /* unused - default 32 vs 16 bit size */ 1229 0 /* limit granularity (byte/page units)*/ }, 1230/* GTGATE_SEL 7 Null Descriptor - Placeholder */ 1231{ 0x0, /* segment base address */ 1232 0x0, /* length - all address space */ 1233 0, /* segment type */ 1234 0, /* segment descriptor priority level */ 1235 0, /* segment descriptor present */ 1236 0, 0, 1237 0, /* default 32 vs 16 bit size */ 1238 0 /* limit granularity (byte/page units)*/ }, 1239/* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */ 1240{ 0x400, /* segment base address */ 1241 0xfffff, /* length */ 1242 SDT_MEMRWA, /* segment type */ 1243 0, /* segment descriptor priority level */ 1244 1, /* segment descriptor present */ 1245 0, 0, 1246 1, /* default 32 vs 16 bit size */ 1247 1 /* limit granularity (byte/page units)*/ }, 1248/* GPANIC_SEL 9 Panic Tss Descriptor */ 1249{ (int) &dblfault_tss, /* segment base address */ 1250 sizeof(struct i386tss)-1,/* length - all address space */ 1251 SDT_SYS386TSS, /* segment type */ 1252 0, /* segment descriptor priority level */ 1253 1, /* segment descriptor present */ 1254 0, 0, 1255 0, /* unused - default 32 vs 16 bit size */ 1256 0 /* limit granularity (byte/page units)*/ }, 1257/* GBIOSCODE32_SEL 10 BIOS 32-bit interface (32bit Code) */ 1258{ 0, /* segment base address (overwritten) */ 1259 0xfffff, /* length */ 1260 SDT_MEMERA, /* segment type */ 1261 0, /* segment descriptor priority level */ 1262 1, /* segment descriptor present */ 1263 0, 0, 1264 0, /* default 32 vs 16 bit size */ 1265 1 /* limit granularity (byte/page units)*/ }, 1266/* GBIOSCODE16_SEL 11 BIOS 32-bit interface (16bit Code) */ 1267{ 0, /* segment base address (overwritten) */ 1268 0xfffff, /* length */ 1269 SDT_MEMERA, /* segment type */ 1270 0, /* segment descriptor priority level */ 1271 1, /* segment descriptor present */ 1272 0, 0, 1273 0, /* default 32 vs 16 bit size */ 1274 1 /* limit granularity (byte/page units)*/ }, 1275/* GBIOSDATA_SEL 12 BIOS 32-bit interface (Data) */ 1276{ 0, /* segment base address (overwritten) */ 1277 0xfffff, /* length */ 1278 SDT_MEMRWA, /* segment type */ 1279 0, /* segment descriptor priority level */ 1280 1, /* segment descriptor present */ 1281 0, 0, 1282 1, /* default 32 vs 16 bit size */ 1283 1 /* limit granularity (byte/page units)*/ }, 1284/* GBIOSUTIL_SEL 13 BIOS 16-bit interface (Utility) */ 1285{ 0, /* segment base address (overwritten) */ 1286 0xfffff, /* length */ 1287 SDT_MEMRWA, /* segment type */ 1288 0, /* segment descriptor priority level */ 1289 1, /* segment descriptor present */ 1290 0, 0, 1291 0, /* default 32 vs 16 bit size */ 1292 1 /* limit granularity (byte/page units)*/ }, 1293/* GBIOSARGS_SEL 14 BIOS 16-bit interface (Arguments) */ 1294{ 0, /* segment base address (overwritten) */ 1295 0xfffff, /* length */ 1296 SDT_MEMRWA, /* segment type */ 1297 0, /* segment descriptor priority level */ 1298 1, /* segment descriptor present */ 1299 0, 0, 1300 0, /* default 32 vs 16 bit size */ 1301 1 /* limit granularity (byte/page units)*/ }, 1302}; 1303 1304static struct soft_segment_descriptor ldt_segs[] = { 1305 /* Null Descriptor - overwritten by call gate */ 1306{ 0x0, /* segment base address */ 1307 0x0, /* length - all address space */ 1308 0, /* segment type */ 1309 0, /* segment descriptor priority level */ 1310 0, /* segment descriptor present */ 1311 0, 0, 1312 0, /* default 32 vs 16 bit size */ 1313 0 /* limit granularity (byte/page units)*/ }, 1314 /* Null Descriptor - overwritten by call gate */ 1315{ 0x0, /* segment base address */ 1316 0x0, /* length - all address space */ 1317 0, /* segment type */ 1318 0, /* segment descriptor priority level */ 1319 0, /* segment descriptor present */ 1320 0, 0, 1321 0, /* default 32 vs 16 bit size */ 1322 0 /* limit granularity (byte/page units)*/ }, 1323 /* Null Descriptor - overwritten by call gate */ 1324{ 0x0, /* segment base address */ 1325 0x0, /* length - all address space */ 1326 0, /* segment type */ 1327 0, /* segment descriptor priority level */ 1328 0, /* segment descriptor present */ 1329 0, 0, 1330 0, /* default 32 vs 16 bit size */ 1331 0 /* limit granularity (byte/page units)*/ }, 1332 /* Code Descriptor for user */ 1333{ 0x0, /* segment base address */ 1334 0xfffff, /* length - all address space */ 1335 SDT_MEMERA, /* segment type */ 1336 SEL_UPL, /* segment descriptor priority level */ 1337 1, /* segment descriptor present */ 1338 0, 0, 1339 1, /* default 32 vs 16 bit size */ 1340 1 /* limit granularity (byte/page units)*/ }, 1341 /* Null Descriptor - overwritten by call gate */ 1342{ 0x0, /* segment base address */ 1343 0x0, /* length - all address space */ 1344 0, /* segment type */ 1345 0, /* segment descriptor priority level */ 1346 0, /* segment descriptor present */ 1347 0, 0, 1348 0, /* default 32 vs 16 bit size */ 1349 0 /* limit granularity (byte/page units)*/ }, 1350 /* Data Descriptor for user */ 1351{ 0x0, /* segment base address */ 1352 0xfffff, /* length - all address space */ 1353 SDT_MEMRWA, /* segment type */ 1354 SEL_UPL, /* segment descriptor priority level */ 1355 1, /* segment descriptor present */ 1356 0, 0, 1357 1, /* default 32 vs 16 bit size */ 1358 1 /* limit granularity (byte/page units)*/ }, 1359}; 1360 1361void 1362setidt(idx, func, typ, dpl, selec) 1363 int idx; 1364 inthand_t *func; 1365 int typ; 1366 int dpl; 1367 int selec; 1368{ 1369 struct gate_descriptor *ip; 1370 1371 ip = idt + idx; 1372 ip->gd_looffset = (int)func; 1373 ip->gd_selector = selec; 1374 ip->gd_stkcpy = 0; 1375 ip->gd_xx = 0; 1376 ip->gd_type = typ; 1377 ip->gd_dpl = dpl; 1378 ip->gd_p = 1; 1379 ip->gd_hioffset = ((int)func)>>16 ; 1380} 1381 1382#define IDTVEC(name) __CONCAT(X,name) 1383 1384extern inthand_t 1385 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 1386 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 1387 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 1388 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 1389 IDTVEC(xmm), IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall); 1390 1391void 1392sdtossd(sd, ssd) 1393 struct segment_descriptor *sd; 1394 struct soft_segment_descriptor *ssd; 1395{ 1396 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 1397 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 1398 ssd->ssd_type = sd->sd_type; 1399 ssd->ssd_dpl = sd->sd_dpl; 1400 ssd->ssd_p = sd->sd_p; 1401 ssd->ssd_def32 = sd->sd_def32; 1402 ssd->ssd_gran = sd->sd_gran; 1403} 1404 1405#define PHYSMAP_SIZE (2 * 8) 1406 1407/* 1408 * Populate the (physmap) array with base/bound pairs describing the 1409 * available physical memory in the system, then test this memory and 1410 * build the phys_avail array describing the actually-available memory. 1411 * 1412 * If we cannot accurately determine the physical memory map, then use 1413 * value from the 0xE801 call, and failing that, the RTC. 1414 * 1415 * Total memory size may be set by the kernel environment variable 1416 * hw.physmem or the compile-time define MAXMEM. 1417 */ 1418static void 1419getmemsize(int first) 1420{ 1421 int i, physmap_idx, pa_indx; 1422 u_int basemem, extmem; 1423 struct vm86frame vmf; 1424 struct vm86context vmc; 1425 vm_offset_t pa, physmap[PHYSMAP_SIZE]; 1426 pt_entry_t pte; 1427 const char *cp; 1428 struct bios_smap *smap; 1429 1430 bzero(&vmf, sizeof(struct vm86frame)); 1431 bzero(physmap, sizeof(physmap)); 1432 1433 /* 1434 * Perform "base memory" related probes & setup 1435 */ 1436 vm86_intcall(0x12, &vmf); 1437 basemem = vmf.vmf_ax; 1438 if (basemem > 640) { 1439 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", 1440 basemem); 1441 basemem = 640; 1442 } 1443 1444 /* 1445 * XXX if biosbasemem is now < 640, there is a `hole' 1446 * between the end of base memory and the start of 1447 * ISA memory. The hole may be empty or it may 1448 * contain BIOS code or data. Map it read/write so 1449 * that the BIOS can write to it. (Memory from 0 to 1450 * the physical end of the kernel is mapped read-only 1451 * to begin with and then parts of it are remapped. 1452 * The parts that aren't remapped form holes that 1453 * remain read-only and are unused by the kernel. 1454 * The base memory area is below the physical end of 1455 * the kernel and right now forms a read-only hole. 1456 * The part of it from PAGE_SIZE to 1457 * (trunc_page(biosbasemem * 1024) - 1) will be 1458 * remapped and used by the kernel later.) 1459 * 1460 * This code is similar to the code used in 1461 * pmap_mapdev, but since no memory needs to be 1462 * allocated we simply change the mapping. 1463 */ 1464 for (pa = trunc_page(basemem * 1024); 1465 pa < ISA_HOLE_START; pa += PAGE_SIZE) { 1466 pte = (pt_entry_t)vtopte(pa + KERNBASE); 1467 *pte = pa | PG_RW | PG_V; 1468 } 1469 1470 /* 1471 * if basemem != 640, map pages r/w into vm86 page table so 1472 * that the bios can scribble on it. 1473 */ 1474 pte = (pt_entry_t)vm86paddr; 1475 for (i = basemem / 4; i < 160; i++) 1476 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; 1477 1478 /* 1479 * map page 1 R/W into the kernel page table so we can use it 1480 * as a buffer. The kernel will unmap this page later. 1481 */ 1482 pte = (pt_entry_t)vtopte(KERNBASE + (1 << PAGE_SHIFT)); 1483 *pte = (1 << PAGE_SHIFT) | PG_RW | PG_V; 1484 1485 /* 1486 * get memory map with INT 15:E820 1487 */ 1488 vmc.npages = 0; 1489 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT)); 1490 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di); 1491 1492 physmap_idx = 0; 1493 vmf.vmf_ebx = 0; 1494 do { 1495 vmf.vmf_eax = 0xE820; 1496 vmf.vmf_edx = SMAP_SIG; 1497 vmf.vmf_ecx = sizeof(struct bios_smap); 1498 i = vm86_datacall(0x15, &vmf, &vmc); 1499 if (i || vmf.vmf_eax != SMAP_SIG) 1500 break; 1501 if (boothowto & RB_VERBOSE) 1502 printf("SMAP type=%02x base=%08x %08x len=%08x %08x\n", 1503 smap->type, 1504 *(u_int32_t *)((char *)&smap->base + 4), 1505 (u_int32_t)smap->base, 1506 *(u_int32_t *)((char *)&smap->length + 4), 1507 (u_int32_t)smap->length); 1508 1509 if (smap->type != 0x01) 1510 goto next_run; 1511 1512 if (smap->length == 0) 1513 goto next_run; 1514 1515 if (smap->base >= 0xffffffff) { 1516 printf("%uK of memory above 4GB ignored\n", 1517 (u_int)(smap->length / 1024)); 1518 goto next_run; 1519 } 1520 1521 for (i = 0; i <= physmap_idx; i += 2) { 1522 if (smap->base < physmap[i + 1]) { 1523 if (boothowto & RB_VERBOSE) 1524 printf( 1525 "Overlapping or non-montonic memory region, ignoring second region\n"); 1526 goto next_run; 1527 } 1528 } 1529 1530 if (smap->base == physmap[physmap_idx + 1]) { 1531 physmap[physmap_idx + 1] += smap->length; 1532 goto next_run; 1533 } 1534 1535 physmap_idx += 2; 1536 if (physmap_idx == PHYSMAP_SIZE) { 1537 printf( 1538 "Too many segments in the physical address map, giving up\n"); 1539 break; 1540 } 1541 physmap[physmap_idx] = smap->base; 1542 physmap[physmap_idx + 1] = smap->base + smap->length; 1543next_run: 1544 } while (vmf.vmf_ebx != 0); 1545 1546 if (physmap[1] != 0) 1547 goto physmap_done; 1548 1549 /* 1550 * If we failed above, try memory map with INT 15:E801 1551 */ 1552 vmf.vmf_ax = 0xE801; 1553 if (vm86_intcall(0x15, &vmf) == 0) { 1554 extmem = vmf.vmf_cx + vmf.vmf_dx * 64; 1555 } else { 1556#if 0 1557 vmf.vmf_ah = 0x88; 1558 vm86_intcall(0x15, &vmf); 1559 extmem = vmf.vmf_ax; 1560#else 1561 /* 1562 * Prefer the RTC value for extended memory. 1563 */ 1564 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8); 1565#endif 1566 } 1567 1568 /* 1569 * Special hack for chipsets that still remap the 384k hole when 1570 * there's 16MB of memory - this really confuses people that 1571 * are trying to use bus mastering ISA controllers with the 1572 * "16MB limit"; they only have 16MB, but the remapping puts 1573 * them beyond the limit. 1574 * 1575 * If extended memory is between 15-16MB (16-17MB phys address range), 1576 * chop it to 15MB. 1577 */ 1578 if ((extmem > 15 * 1024) && (extmem < 16 * 1024)) 1579 extmem = 15 * 1024; 1580 1581 physmap[0] = 0; 1582 physmap[1] = basemem * 1024; 1583 physmap_idx = 2; 1584 physmap[physmap_idx] = 0x100000; 1585 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024; 1586 1587physmap_done: 1588 /* 1589 * Now, physmap contains a map of physical memory. 1590 */ 1591 1592#ifdef SMP 1593 /* make hole for AP bootstrap code */ 1594 physmap[1] = mp_bootaddress(physmap[1] / 1024); 1595 1596 /* look for the MP hardware - needed for apic addresses */ 1597 i386_mp_probe(); 1598#endif 1599 1600 /* 1601 * Maxmem isn't the "maximum memory", it's one larger than the 1602 * highest page of the physical address space. It should be 1603 * called something like "Maxphyspage". We may adjust this 1604 * based on ``hw.physmem'' and the results of the memory test. 1605 */ 1606 Maxmem = atop(physmap[physmap_idx + 1]); 1607 1608#ifdef MAXMEM 1609 Maxmem = MAXMEM / 4; 1610#endif 1611 1612 /* 1613 * hw.maxmem is a size in bytes; we also allow k, m, and g suffixes 1614 * for the appropriate modifiers. This overrides MAXMEM. 1615 */ 1616 if ((cp = getenv("hw.physmem")) != NULL) { 1617 u_int64_t AllowMem, sanity; 1618 char *ep; 1619 1620 sanity = AllowMem = strtouq(cp, &ep, 0); 1621 if ((ep != cp) && (*ep != 0)) { 1622 switch(*ep) { 1623 case 'g': 1624 case 'G': 1625 AllowMem <<= 10; 1626 case 'm': 1627 case 'M': 1628 AllowMem <<= 10; 1629 case 'k': 1630 case 'K': 1631 AllowMem <<= 10; 1632 break; 1633 default: 1634 AllowMem = sanity = 0; 1635 } 1636 if (AllowMem < sanity) 1637 AllowMem = 0; 1638 } 1639 if (AllowMem == 0) 1640 printf("Ignoring invalid memory size of '%s'\n", cp); 1641 else 1642 Maxmem = atop(AllowMem); 1643 } 1644 1645 if (atop(physmap[physmap_idx + 1]) != Maxmem && 1646 (boothowto & RB_VERBOSE)) 1647 printf("Physical memory use set to %uK\n", Maxmem * 4); 1648 1649 /* 1650 * If Maxmem has been increased beyond what the system has detected, 1651 * extend the last memory segment to the new limit. 1652 */ 1653 if (atop(physmap[physmap_idx + 1]) < Maxmem) 1654 physmap[physmap_idx + 1] = ptoa(Maxmem); 1655 1656 /* call pmap initialization to make new kernel address space */ 1657 pmap_bootstrap(first, 0); 1658 1659 /* 1660 * Size up each available chunk of physical memory. 1661 */ 1662 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 1663 pa_indx = 0; 1664 phys_avail[pa_indx++] = physmap[0]; 1665 phys_avail[pa_indx] = physmap[0]; 1666#if 0 1667 pte = (pt_entry_t)vtopte(KERNBASE); 1668#else 1669 pte = (pt_entry_t)CMAP1; 1670#endif 1671 1672 /* 1673 * physmap is in bytes, so when converting to page boundaries, 1674 * round up the start address and round down the end address. 1675 */ 1676 for (i = 0; i <= physmap_idx; i += 2) { 1677 vm_offset_t end; 1678 1679 end = ptoa(Maxmem); 1680 if (physmap[i + 1] < end) 1681 end = trunc_page(physmap[i + 1]); 1682 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 1683 int tmp, page_bad; 1684#if 0 1685 int *ptr = 0; 1686#else 1687 int *ptr = (int *)CADDR1; 1688#endif 1689 1690 /* 1691 * block out kernel memory as not available. 1692 */ 1693 if (pa >= 0x100000 && pa < first) 1694 continue; 1695 1696 page_bad = FALSE; 1697 1698 /* 1699 * map page into kernel: valid, read/write,non-cacheable 1700 */ 1701 *pte = pa | PG_V | PG_RW | PG_N; 1702 invltlb(); 1703 1704 tmp = *(int *)ptr; 1705 /* 1706 * Test for alternating 1's and 0's 1707 */ 1708 *(volatile int *)ptr = 0xaaaaaaaa; 1709 if (*(volatile int *)ptr != 0xaaaaaaaa) { 1710 page_bad = TRUE; 1711 } 1712 /* 1713 * Test for alternating 0's and 1's 1714 */ 1715 *(volatile int *)ptr = 0x55555555; 1716 if (*(volatile int *)ptr != 0x55555555) { 1717 page_bad = TRUE; 1718 } 1719 /* 1720 * Test for all 1's 1721 */ 1722 *(volatile int *)ptr = 0xffffffff; 1723 if (*(volatile int *)ptr != 0xffffffff) { 1724 page_bad = TRUE; 1725 } 1726 /* 1727 * Test for all 0's 1728 */ 1729 *(volatile int *)ptr = 0x0; 1730 if (*(volatile int *)ptr != 0x0) { 1731 page_bad = TRUE; 1732 } 1733 /* 1734 * Restore original value. 1735 */ 1736 *(int *)ptr = tmp; 1737 1738 /* 1739 * Adjust array of valid/good pages. 1740 */ 1741 if (page_bad == TRUE) { 1742 continue; 1743 } 1744 /* 1745 * If this good page is a continuation of the 1746 * previous set of good pages, then just increase 1747 * the end pointer. Otherwise start a new chunk. 1748 * Note that "end" points one higher than end, 1749 * making the range >= start and < end. 1750 * If we're also doing a speculative memory 1751 * test and we at or past the end, bump up Maxmem 1752 * so that we keep going. The first bad page 1753 * will terminate the loop. 1754 */ 1755 if (phys_avail[pa_indx] == pa) { 1756 phys_avail[pa_indx] += PAGE_SIZE; 1757 } else { 1758 pa_indx++; 1759 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1760 printf( 1761 "Too many holes in the physical address space, giving up\n"); 1762 pa_indx--; 1763 break; 1764 } 1765 phys_avail[pa_indx++] = pa; /* start */ 1766 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1767 } 1768 physmem++; 1769 } 1770 } 1771 *pte = 0; 1772 invltlb(); 1773 1774 /* 1775 * XXX 1776 * The last chunk must contain at least one page plus the message 1777 * buffer to avoid complicating other code (message buffer address 1778 * calculation, etc.). 1779 */ 1780 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1781 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { 1782 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1783 phys_avail[pa_indx--] = 0; 1784 phys_avail[pa_indx--] = 0; 1785 } 1786 1787 Maxmem = atop(phys_avail[pa_indx]); 1788 1789 /* Trim off space for the message buffer. */ 1790 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); 1791 1792 avail_end = phys_avail[pa_indx]; 1793} 1794 1795void 1796init386(first) 1797 int first; 1798{ 1799 struct gate_descriptor *gdp; 1800 int gsel_tss, metadata_missing, off, x; 1801#ifndef SMP 1802 /* table descriptors - used to load tables by microp */ 1803 struct region_descriptor r_gdt, r_idt; 1804#endif 1805 1806 proc0.p_addr = proc0paddr; 1807 1808 atdevbase = ISA_HOLE_START + KERNBASE; 1809 1810 metadata_missing = 0; 1811 if (bootinfo.bi_modulep) { 1812 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE; 1813 preload_bootstrap_relocate(KERNBASE); 1814 } else { 1815 metadata_missing = 1; 1816 } 1817 if (bootinfo.bi_envp) 1818 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE; 1819 1820 /* Init basic tunables, hz etc */ 1821 init_param(); 1822 1823 /* 1824 * make gdt memory segments, the code segment goes up to end of the 1825 * page with etext in it, the data segment goes to the end of 1826 * the address space 1827 */ 1828 /* 1829 * XXX text protection is temporarily (?) disabled. The limit was 1830 * i386_btop(round_page(etext)) - 1. 1831 */ 1832 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1); 1833 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1); 1834#ifdef SMP 1835 gdt_segs[GPRIV_SEL].ssd_limit = 1836 atop(sizeof(struct privatespace) - 1); 1837 gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[0]; 1838 gdt_segs[GPROC0_SEL].ssd_base = 1839 (int) &SMP_prvspace[0].globaldata.gd_common_tss; 1840 SMP_prvspace[0].globaldata.gd_prvspace = &SMP_prvspace[0].globaldata; 1841#else 1842 gdt_segs[GPRIV_SEL].ssd_limit = 1843 atop(sizeof(struct globaldata) - 1); 1844 gdt_segs[GPRIV_SEL].ssd_base = (int) &__globaldata; 1845 gdt_segs[GPROC0_SEL].ssd_base = 1846 (int) &__globaldata.gd_common_tss; 1847 __globaldata.gd_prvspace = &__globaldata; 1848#endif 1849 1850 for (x = 0; x < NGDT; x++) { 1851#ifdef BDE_DEBUGGER 1852 /* avoid overwriting db entries with APM ones */ 1853 if (x >= GAPMCODE32_SEL && x <= GAPMDATA_SEL) 1854 continue; 1855#endif 1856 ssdtosd(&gdt_segs[x], &gdt[x].sd); 1857 } 1858 1859 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1860 r_gdt.rd_base = (int) gdt; 1861 lgdt(&r_gdt); 1862 1863 /* setup curproc so that mutexes work */ 1864 PCPU_SET(curproc, &proc0); 1865 PCPU_SET(spinlocks, NULL); 1866 1867 LIST_INIT(&proc0.p_contested); 1868 1869 /* 1870 * Initialize mutexes. 1871 */ 1872 mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE); 1873 mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE); 1874 mtx_init(&proc0.p_mtx, "process lock", MTX_DEF); 1875 mtx_init(&clock_lock, "clk", MTX_SPIN | MTX_RECURSE); 1876#ifdef SMP 1877 mtx_init(&imen_mtx, "imen", MTX_SPIN); 1878#endif 1879 mtx_lock(&Giant); 1880 1881 /* make ldt memory segments */ 1882 /* 1883 * The data segment limit must not cover the user area because we 1884 * don't want the user area to be writable in copyout() etc. (page 1885 * level protection is lost in kernel mode on 386's). Also, we 1886 * don't want the user area to be writable directly (page level 1887 * protection of the user area is not available on 486's with 1888 * CR0_WP set, because there is no user-read/kernel-write mode). 1889 * 1890 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it 1891 * should be spelled ...MAX_USER... 1892 */ 1893#define VM_END_USER_RW_ADDRESS VM_MAXUSER_ADDRESS 1894 /* 1895 * The code segment limit has to cover the user area until we move 1896 * the signal trampoline out of the user area. This is safe because 1897 * the code segment cannot be written to directly. 1898 */ 1899#define VM_END_USER_R_ADDRESS (VM_END_USER_RW_ADDRESS + UPAGES * PAGE_SIZE) 1900 ldt_segs[LUCODE_SEL].ssd_limit = atop(VM_END_USER_R_ADDRESS - 1); 1901 ldt_segs[LUDATA_SEL].ssd_limit = atop(VM_END_USER_RW_ADDRESS - 1); 1902 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++) 1903 ssdtosd(&ldt_segs[x], &ldt[x].sd); 1904 1905 _default_ldt = GSEL(GLDT_SEL, SEL_KPL); 1906 lldt(_default_ldt); 1907 PCPU_SET(currentldt, _default_ldt); 1908 1909 /* exceptions */ 1910 for (x = 0; x < NIDT; x++) 1911 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, 1912 GSEL(GCODE_SEL, SEL_KPL)); 1913 setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, 1914 GSEL(GCODE_SEL, SEL_KPL)); 1915 setidt(1, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL, 1916 GSEL(GCODE_SEL, SEL_KPL)); 1917 setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, 1918 GSEL(GCODE_SEL, SEL_KPL)); 1919 setidt(3, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL, 1920 GSEL(GCODE_SEL, SEL_KPL)); 1921 setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, 1922 GSEL(GCODE_SEL, SEL_KPL)); 1923 setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, 1924 GSEL(GCODE_SEL, SEL_KPL)); 1925 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, 1926 GSEL(GCODE_SEL, SEL_KPL)); 1927 setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL 1928 , GSEL(GCODE_SEL, SEL_KPL)); 1929 setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL)); 1930 setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, 1931 GSEL(GCODE_SEL, SEL_KPL)); 1932 setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, 1933 GSEL(GCODE_SEL, SEL_KPL)); 1934 setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, 1935 GSEL(GCODE_SEL, SEL_KPL)); 1936 setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, 1937 GSEL(GCODE_SEL, SEL_KPL)); 1938 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, 1939 GSEL(GCODE_SEL, SEL_KPL)); 1940 setidt(14, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, 1941 GSEL(GCODE_SEL, SEL_KPL)); 1942 setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, 1943 GSEL(GCODE_SEL, SEL_KPL)); 1944 setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, 1945 GSEL(GCODE_SEL, SEL_KPL)); 1946 setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, 1947 GSEL(GCODE_SEL, SEL_KPL)); 1948 setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, 1949 GSEL(GCODE_SEL, SEL_KPL)); 1950 setidt(19, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL, 1951 GSEL(GCODE_SEL, SEL_KPL)); 1952 setidt(0x80, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL, 1953 GSEL(GCODE_SEL, SEL_KPL)); 1954 1955 r_idt.rd_limit = sizeof(idt0) - 1; 1956 r_idt.rd_base = (int) idt; 1957 lidt(&r_idt); 1958 1959 /* 1960 * Initialize the console before we print anything out. 1961 */ 1962 cninit(); 1963 1964 if (metadata_missing) 1965 printf("WARNING: loader(8) metadata is missing!\n"); 1966 1967#ifdef DEV_ISA 1968 isa_defaultirq(); 1969#endif 1970 1971#ifdef DDB 1972 kdb_init(); 1973 if (boothowto & RB_KDB) 1974 Debugger("Boot flags requested debugger"); 1975#endif 1976 1977 finishidentcpu(); /* Final stage of CPU initialization */ 1978 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, 1979 GSEL(GCODE_SEL, SEL_KPL)); 1980 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, 1981 GSEL(GCODE_SEL, SEL_KPL)); 1982 initializecpu(); /* Initialize CPU registers */ 1983 1984 /* make an initial tss so cpu can get interrupt stack on syscall! */ 1985 PCPU_SET(common_tss.tss_esp0, 1986 (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16); 1987 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); 1988 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1989 private_tss = 0; 1990 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd); 1991 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 1992 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16); 1993 ltr(gsel_tss); 1994 1995 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = 1996 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)]; 1997 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = 1998 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); 1999 dblfault_tss.tss_cr3 = (int)IdlePTD; 2000 dblfault_tss.tss_eip = (int)dblfault_handler; 2001 dblfault_tss.tss_eflags = PSL_KERNEL; 2002 dblfault_tss.tss_ds = dblfault_tss.tss_es = 2003 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); 2004 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL); 2005 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); 2006 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); 2007 2008 vm86_initialize(); 2009 getmemsize(first); 2010 2011 /* now running on new page tables, configured,and u/iom is accessible */ 2012 2013 /* Map the message buffer. */ 2014 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) 2015 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off); 2016 2017 msgbufinit(msgbufp, MSGBUF_SIZE); 2018 2019 /* make a call gate to reenter kernel with */ 2020 gdp = &ldt[LSYS5CALLS_SEL].gd; 2021 2022 x = (int) &IDTVEC(lcall_syscall); 2023 gdp->gd_looffset = x; 2024 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL); 2025 gdp->gd_stkcpy = 1; 2026 gdp->gd_type = SDT_SYS386CGT; 2027 gdp->gd_dpl = SEL_UPL; 2028 gdp->gd_p = 1; 2029 gdp->gd_hioffset = x >> 16; 2030 2031 /* XXX does this work? */ 2032 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL]; 2033 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL]; 2034 2035 /* transfer to user mode */ 2036 2037 _ucodesel = LSEL(LUCODE_SEL, SEL_UPL); 2038 _udatasel = LSEL(LUDATA_SEL, SEL_UPL); 2039 2040 /* setup proc 0's pcb */ 2041 proc0.p_addr->u_pcb.pcb_flags = 0; 2042 proc0.p_addr->u_pcb.pcb_cr3 = (int)IdlePTD; 2043 proc0.p_addr->u_pcb.pcb_ext = 0; 2044 proc0.p_frame = &proc0_tf; 2045} 2046 2047#if defined(I586_CPU) && !defined(NO_F00F_HACK) 2048static void f00f_hack(void *unused); 2049SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL); 2050 2051static void 2052f00f_hack(void *unused) { 2053 struct gate_descriptor *new_idt; 2054#ifndef SMP 2055 struct region_descriptor r_idt; 2056#endif 2057 vm_offset_t tmp; 2058 2059 if (!has_f00f_bug) 2060 return; 2061 2062 GIANT_REQUIRED; 2063 2064 printf("Intel Pentium detected, installing workaround for F00F bug\n"); 2065 2066 r_idt.rd_limit = sizeof(idt0) - 1; 2067 2068 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2); 2069 if (tmp == 0) 2070 panic("kmem_alloc returned 0"); 2071 if (((unsigned int)tmp & (PAGE_SIZE-1)) != 0) 2072 panic("kmem_alloc returned non-page-aligned memory"); 2073 /* Put the first seven entries in the lower page */ 2074 new_idt = (struct gate_descriptor*)(tmp + PAGE_SIZE - (7*8)); 2075 bcopy(idt, new_idt, sizeof(idt0)); 2076 r_idt.rd_base = (int)new_idt; 2077 lidt(&r_idt); 2078 idt = new_idt; 2079 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE, 2080 VM_PROT_READ, FALSE) != KERN_SUCCESS) 2081 panic("vm_map_protect failed"); 2082 return; 2083} 2084#endif /* defined(I586_CPU) && !NO_F00F_HACK */ 2085 2086int 2087ptrace_set_pc(p, addr) 2088 struct proc *p; 2089 unsigned long addr; 2090{ 2091 p->p_frame->tf_eip = addr; 2092 return (0); 2093} 2094 2095int 2096ptrace_single_step(p) 2097 struct proc *p; 2098{ 2099 p->p_frame->tf_eflags |= PSL_T; 2100 return (0); 2101} 2102 2103int 2104fill_regs(p, regs) 2105 struct proc *p; 2106 struct reg *regs; 2107{ 2108 struct pcb *pcb; 2109 struct trapframe *tp; 2110 2111 tp = p->p_frame; 2112 regs->r_fs = tp->tf_fs; 2113 regs->r_es = tp->tf_es; 2114 regs->r_ds = tp->tf_ds; 2115 regs->r_edi = tp->tf_edi; 2116 regs->r_esi = tp->tf_esi; 2117 regs->r_ebp = tp->tf_ebp; 2118 regs->r_ebx = tp->tf_ebx; 2119 regs->r_edx = tp->tf_edx; 2120 regs->r_ecx = tp->tf_ecx; 2121 regs->r_eax = tp->tf_eax; 2122 regs->r_eip = tp->tf_eip; 2123 regs->r_cs = tp->tf_cs; 2124 regs->r_eflags = tp->tf_eflags; 2125 regs->r_esp = tp->tf_esp; 2126 regs->r_ss = tp->tf_ss; 2127 pcb = &p->p_addr->u_pcb; 2128 regs->r_gs = pcb->pcb_gs; 2129 return (0); 2130} 2131 2132int 2133set_regs(p, regs) 2134 struct proc *p; 2135 struct reg *regs; 2136{ 2137 struct pcb *pcb; 2138 struct trapframe *tp; 2139 2140 tp = p->p_frame; 2141 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) || 2142 !CS_SECURE(regs->r_cs)) 2143 return (EINVAL); 2144 tp->tf_fs = regs->r_fs; 2145 tp->tf_es = regs->r_es; 2146 tp->tf_ds = regs->r_ds; 2147 tp->tf_edi = regs->r_edi; 2148 tp->tf_esi = regs->r_esi; 2149 tp->tf_ebp = regs->r_ebp; 2150 tp->tf_ebx = regs->r_ebx; 2151 tp->tf_edx = regs->r_edx; 2152 tp->tf_ecx = regs->r_ecx; 2153 tp->tf_eax = regs->r_eax; 2154 tp->tf_eip = regs->r_eip; 2155 tp->tf_cs = regs->r_cs; 2156 tp->tf_eflags = regs->r_eflags; 2157 tp->tf_esp = regs->r_esp; 2158 tp->tf_ss = regs->r_ss; 2159 pcb = &p->p_addr->u_pcb; 2160 pcb->pcb_gs = regs->r_gs; 2161 return (0); 2162} 2163 2164#ifdef CPU_ENABLE_SSE 2165static void 2166fill_fpregs_xmm(sv_xmm, sv_87) 2167 struct savexmm *sv_xmm; 2168 struct save87 *sv_87; 2169{ 2170 register struct env87 *penv_87 = &sv_87->sv_env; 2171 register struct envxmm *penv_xmm = &sv_xmm->sv_env; 2172 int i; 2173 2174 /* FPU control/status */ 2175 penv_87->en_cw = penv_xmm->en_cw; 2176 penv_87->en_sw = penv_xmm->en_sw; 2177 penv_87->en_tw = penv_xmm->en_tw; 2178 penv_87->en_fip = penv_xmm->en_fip; 2179 penv_87->en_fcs = penv_xmm->en_fcs; 2180 penv_87->en_opcode = penv_xmm->en_opcode; 2181 penv_87->en_foo = penv_xmm->en_foo; 2182 penv_87->en_fos = penv_xmm->en_fos; 2183 2184 /* FPU registers */ 2185 for (i = 0; i < 8; ++i) 2186 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc; 2187 2188 sv_87->sv_ex_sw = sv_xmm->sv_ex_sw; 2189} 2190 2191static void 2192set_fpregs_xmm(sv_87, sv_xmm) 2193 struct save87 *sv_87; 2194 struct savexmm *sv_xmm; 2195{ 2196 register struct env87 *penv_87 = &sv_87->sv_env; 2197 register struct envxmm *penv_xmm = &sv_xmm->sv_env; 2198 int i; 2199 2200 /* FPU control/status */ 2201 penv_xmm->en_cw = penv_87->en_cw; 2202 penv_xmm->en_sw = penv_87->en_sw; 2203 penv_xmm->en_tw = penv_87->en_tw; 2204 penv_xmm->en_fip = penv_87->en_fip; 2205 penv_xmm->en_fcs = penv_87->en_fcs; 2206 penv_xmm->en_opcode = penv_87->en_opcode; 2207 penv_xmm->en_foo = penv_87->en_foo; 2208 penv_xmm->en_fos = penv_87->en_fos; 2209 2210 /* FPU registers */ 2211 for (i = 0; i < 8; ++i) 2212 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i]; 2213 2214 sv_xmm->sv_ex_sw = sv_87->sv_ex_sw; 2215} 2216#endif /* CPU_ENABLE_SSE */ 2217 2218int 2219fill_fpregs(p, fpregs) 2220 struct proc *p; 2221 struct fpreg *fpregs; 2222{ 2223#ifdef CPU_ENABLE_SSE 2224 if (cpu_fxsr) { 2225 fill_fpregs_xmm(&p->p_addr->u_pcb.pcb_save.sv_xmm, 2226 (struct save87 *)fpregs); 2227 return (0); 2228 } 2229#endif /* CPU_ENABLE_SSE */ 2230 bcopy(&p->p_addr->u_pcb.pcb_save.sv_87, fpregs, sizeof *fpregs); 2231 return (0); 2232} 2233 2234int 2235set_fpregs(p, fpregs) 2236 struct proc *p; 2237 struct fpreg *fpregs; 2238{ 2239#ifdef CPU_ENABLE_SSE 2240 if (cpu_fxsr) { 2241 set_fpregs_xmm((struct save87 *)fpregs, 2242 &p->p_addr->u_pcb.pcb_save.sv_xmm); 2243 return (0); 2244 } 2245#endif /* CPU_ENABLE_SSE */ 2246 bcopy(fpregs, &p->p_addr->u_pcb.pcb_save.sv_87, sizeof *fpregs); 2247 return (0); 2248} 2249 2250int 2251fill_dbregs(p, dbregs) 2252 struct proc *p; 2253 struct dbreg *dbregs; 2254{ 2255 struct pcb *pcb; 2256 2257 if (p == NULL) { 2258 dbregs->dr0 = rdr0(); 2259 dbregs->dr1 = rdr1(); 2260 dbregs->dr2 = rdr2(); 2261 dbregs->dr3 = rdr3(); 2262 dbregs->dr4 = rdr4(); 2263 dbregs->dr5 = rdr5(); 2264 dbregs->dr6 = rdr6(); 2265 dbregs->dr7 = rdr7(); 2266 } 2267 else { 2268 pcb = &p->p_addr->u_pcb; 2269 dbregs->dr0 = pcb->pcb_dr0; 2270 dbregs->dr1 = pcb->pcb_dr1; 2271 dbregs->dr2 = pcb->pcb_dr2; 2272 dbregs->dr3 = pcb->pcb_dr3; 2273 dbregs->dr4 = 0; 2274 dbregs->dr5 = 0; 2275 dbregs->dr6 = pcb->pcb_dr6; 2276 dbregs->dr7 = pcb->pcb_dr7; 2277 } 2278 return (0); 2279} 2280 2281int 2282set_dbregs(p, dbregs) 2283 struct proc *p; 2284 struct dbreg *dbregs; 2285{ 2286 struct pcb *pcb; 2287 int i; 2288 u_int32_t mask1, mask2; 2289 2290 if (p == NULL) { 2291 load_dr0(dbregs->dr0); 2292 load_dr1(dbregs->dr1); 2293 load_dr2(dbregs->dr2); 2294 load_dr3(dbregs->dr3); 2295 load_dr4(dbregs->dr4); 2296 load_dr5(dbregs->dr5); 2297 load_dr6(dbregs->dr6); 2298 load_dr7(dbregs->dr7); 2299 } 2300 else { 2301 /* 2302 * Don't let an illegal value for dr7 get set. Specifically, 2303 * check for undefined settings. Setting these bit patterns 2304 * result in undefined behaviour and can lead to an unexpected 2305 * TRCTRAP. 2306 */ 2307 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 8; 2308 i++, mask1 <<= 2, mask2 <<= 2) 2309 if ((dbregs->dr7 & mask1) == mask2) 2310 return (EINVAL); 2311 2312 pcb = &p->p_addr->u_pcb; 2313 2314 /* 2315 * Don't let a process set a breakpoint that is not within the 2316 * process's address space. If a process could do this, it 2317 * could halt the system by setting a breakpoint in the kernel 2318 * (if ddb was enabled). Thus, we need to check to make sure 2319 * that no breakpoints are being enabled for addresses outside 2320 * process's address space, unless, perhaps, we were called by 2321 * uid 0. 2322 * 2323 * XXX - what about when the watched area of the user's 2324 * address space is written into from within the kernel 2325 * ... wouldn't that still cause a breakpoint to be generated 2326 * from within kernel mode? 2327 */ 2328 2329 if (suser(p) != 0) { 2330 if (dbregs->dr7 & 0x3) { 2331 /* dr0 is enabled */ 2332 if (dbregs->dr0 >= VM_MAXUSER_ADDRESS) 2333 return (EINVAL); 2334 } 2335 2336 if (dbregs->dr7 & (0x3<<2)) { 2337 /* dr1 is enabled */ 2338 if (dbregs->dr1 >= VM_MAXUSER_ADDRESS) 2339 return (EINVAL); 2340 } 2341 2342 if (dbregs->dr7 & (0x3<<4)) { 2343 /* dr2 is enabled */ 2344 if (dbregs->dr2 >= VM_MAXUSER_ADDRESS) 2345 return (EINVAL); 2346 } 2347 2348 if (dbregs->dr7 & (0x3<<6)) { 2349 /* dr3 is enabled */ 2350 if (dbregs->dr3 >= VM_MAXUSER_ADDRESS) 2351 return (EINVAL); 2352 } 2353 } 2354 2355 pcb->pcb_dr0 = dbregs->dr0; 2356 pcb->pcb_dr1 = dbregs->dr1; 2357 pcb->pcb_dr2 = dbregs->dr2; 2358 pcb->pcb_dr3 = dbregs->dr3; 2359 pcb->pcb_dr6 = dbregs->dr6; 2360 pcb->pcb_dr7 = dbregs->dr7; 2361 2362 pcb->pcb_flags |= PCB_DBREGS; 2363 } 2364 2365 return (0); 2366} 2367 2368/* 2369 * Return > 0 if a hardware breakpoint has been hit, and the 2370 * breakpoint was in user space. Return 0, otherwise. 2371 */ 2372int 2373user_dbreg_trap(void) 2374{ 2375 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */ 2376 u_int32_t bp; /* breakpoint bits extracted from dr6 */ 2377 int nbp; /* number of breakpoints that triggered */ 2378 caddr_t addr[4]; /* breakpoint addresses */ 2379 int i; 2380 2381 dr7 = rdr7(); 2382 if ((dr7 & 0x000000ff) == 0) { 2383 /* 2384 * all GE and LE bits in the dr7 register are zero, 2385 * thus the trap couldn't have been caused by the 2386 * hardware debug registers 2387 */ 2388 return 0; 2389 } 2390 2391 nbp = 0; 2392 dr6 = rdr6(); 2393 bp = dr6 & 0x0000000f; 2394 2395 if (!bp) { 2396 /* 2397 * None of the breakpoint bits are set meaning this 2398 * trap was not caused by any of the debug registers 2399 */ 2400 return 0; 2401 } 2402 2403 /* 2404 * at least one of the breakpoints were hit, check to see 2405 * which ones and if any of them are user space addresses 2406 */ 2407 2408 if (bp & 0x01) { 2409 addr[nbp++] = (caddr_t)rdr0(); 2410 } 2411 if (bp & 0x02) { 2412 addr[nbp++] = (caddr_t)rdr1(); 2413 } 2414 if (bp & 0x04) { 2415 addr[nbp++] = (caddr_t)rdr2(); 2416 } 2417 if (bp & 0x08) { 2418 addr[nbp++] = (caddr_t)rdr3(); 2419 } 2420 2421 for (i=0; i<nbp; i++) { 2422 if (addr[i] < 2423 (caddr_t)VM_MAXUSER_ADDRESS) { 2424 /* 2425 * addr[i] is in user space 2426 */ 2427 return nbp; 2428 } 2429 } 2430 2431 /* 2432 * None of the breakpoints are in user space. 2433 */ 2434 return 0; 2435} 2436 2437 2438#ifndef DDB 2439void 2440Debugger(const char *msg) 2441{ 2442 printf("Debugger(\"%s\") called.\n", msg); 2443} 2444#endif /* no DDB */ 2445 2446#include <sys/disklabel.h> 2447 2448/* 2449 * Determine the size of the transfer, and make sure it is 2450 * within the boundaries of the partition. Adjust transfer 2451 * if needed, and signal errors or early completion. 2452 */ 2453int 2454bounds_check_with_label(struct bio *bp, struct disklabel *lp, int wlabel) 2455{ 2456 struct partition *p = lp->d_partitions + dkpart(bp->bio_dev); 2457 int labelsect = lp->d_partitions[0].p_offset; 2458 int maxsz = p->p_size, 2459 sz = (bp->bio_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT; 2460 2461 /* overwriting disk label ? */ 2462 /* XXX should also protect bootstrap in first 8K */ 2463 if (bp->bio_blkno + p->p_offset <= LABELSECTOR + labelsect && 2464#if LABELSECTOR != 0 2465 bp->bio_blkno + p->p_offset + sz > LABELSECTOR + labelsect && 2466#endif 2467 (bp->bio_cmd == BIO_WRITE) && wlabel == 0) { 2468 bp->bio_error = EROFS; 2469 goto bad; 2470 } 2471 2472#if defined(DOSBBSECTOR) && defined(notyet) 2473 /* overwriting master boot record? */ 2474 if (bp->bio_blkno + p->p_offset <= DOSBBSECTOR && 2475 (bp->bio_cmd == BIO_WRITE) && wlabel == 0) { 2476 bp->bio_error = EROFS; 2477 goto bad; 2478 } 2479#endif 2480 2481 /* beyond partition? */ 2482 if (bp->bio_blkno < 0 || bp->bio_blkno + sz > maxsz) { 2483 /* if exactly at end of disk, return an EOF */ 2484 if (bp->bio_blkno == maxsz) { 2485 bp->bio_resid = bp->bio_bcount; 2486 return(0); 2487 } 2488 /* or truncate if part of it fits */ 2489 sz = maxsz - bp->bio_blkno; 2490 if (sz <= 0) { 2491 bp->bio_error = EINVAL; 2492 goto bad; 2493 } 2494 bp->bio_bcount = sz << DEV_BSHIFT; 2495 } 2496 2497 bp->bio_pblkno = bp->bio_blkno + p->p_offset; 2498 return(1); 2499 2500bad: 2501 bp->bio_flags |= BIO_ERROR; 2502 return(-1); 2503} 2504 2505#ifdef DDB 2506 2507/* 2508 * Provide inb() and outb() as functions. They are normally only 2509 * available as macros calling inlined functions, thus cannot be 2510 * called inside DDB. 2511 * 2512 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 2513 */ 2514 2515#undef inb 2516#undef outb 2517 2518/* silence compiler warnings */ 2519u_char inb(u_int); 2520void outb(u_int, u_char); 2521 2522u_char 2523inb(u_int port) 2524{ 2525 u_char data; 2526 /* 2527 * We use %%dx and not %1 here because i/o is done at %dx and not at 2528 * %edx, while gcc generates inferior code (movw instead of movl) 2529 * if we tell it to load (u_short) port. 2530 */ 2531 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 2532 return (data); 2533} 2534 2535void 2536outb(u_int port, u_char data) 2537{ 2538 u_char al; 2539 /* 2540 * Use an unnecessary assignment to help gcc's register allocator. 2541 * This make a large difference for gcc-1.40 and a tiny difference 2542 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 2543 * best results. gcc-2.6.0 can't handle this. 2544 */ 2545 al = data; 2546 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 2547} 2548 2549#endif /* DDB */ 2550