machdep.c revision 82157
1/*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 38 * $FreeBSD: head/sys/amd64/amd64/machdep.c 82157 2001-08-23 01:25:21Z peter $ 39 */ 40 41#include "opt_atalk.h" 42#include "opt_compat.h" 43#include "opt_cpu.h" 44#include "opt_ddb.h" 45#include "opt_inet.h" 46#include "opt_ipx.h" 47#include "opt_isa.h" 48#include "opt_maxmem.h" 49#include "opt_msgbuf.h" 50#include "opt_npx.h" 51#include "opt_perfmon.h" 52/* #include "opt_userconfig.h" */ 53 54#include <sys/param.h> 55#include <sys/systm.h> 56#include <sys/sysproto.h> 57#include <sys/signalvar.h> 58#include <sys/kernel.h> 59#include <sys/ktr.h> 60#include <sys/linker.h> 61#include <sys/lock.h> 62#include <sys/malloc.h> 63#include <sys/mutex.h> 64#include <sys/pcpu.h> 65#include <sys/proc.h> 66#include <sys/bio.h> 67#include <sys/buf.h> 68#include <sys/reboot.h> 69#include <sys/smp.h> 70#include <sys/callout.h> 71#include <sys/msgbuf.h> 72#include <sys/sysent.h> 73#include <sys/sysctl.h> 74#include <sys/vmmeter.h> 75#include <sys/bus.h> 76#include <sys/eventhandler.h> 77 78#include <vm/vm.h> 79#include <vm/vm_param.h> 80#include <sys/lock.h> 81#include <vm/vm_kern.h> 82#include <vm/vm_object.h> 83#include <vm/vm_page.h> 84#include <vm/vm_map.h> 85#include <vm/vm_pager.h> 86#include <vm/vm_extern.h> 87 88#include <sys/user.h> 89#include <sys/exec.h> 90#include <sys/cons.h> 91 92#include <ddb/ddb.h> 93 94#include <net/netisr.h> 95 96#include <machine/cpu.h> 97#include <machine/cputypes.h> 98#include <machine/reg.h> 99#include <machine/clock.h> 100#include <machine/specialreg.h> 101#include <machine/bootinfo.h> 102#include <machine/md_var.h> 103#include <machine/pc/bios.h> 104#include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */ 105#include <machine/globals.h> 106#ifdef PERFMON 107#include <machine/perfmon.h> 108#endif 109 110#include <i386/isa/icu.h> 111#include <i386/isa/intr_machdep.h> 112#include <isa/rtc.h> 113#include <machine/vm86.h> 114#include <sys/ptrace.h> 115#include <machine/sigframe.h> 116 117extern void init386 __P((int first)); 118extern void dblfault_handler __P((void)); 119 120extern void printcpuinfo(void); /* XXX header file */ 121extern void earlysetcpuclass(void); /* same header file */ 122extern void finishidentcpu(void); 123extern void panicifcpuunsupported(void); 124extern void initializecpu(void); 125 126#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 127#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 128 129static void cpu_startup __P((void *)); 130#ifdef CPU_ENABLE_SSE 131static void set_fpregs_xmm __P((struct save87 *, struct savexmm *)); 132static void fill_fpregs_xmm __P((struct savexmm *, struct save87 *)); 133#endif /* CPU_ENABLE_SSE */ 134SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) 135 136int _udatasel, _ucodesel; 137u_int atdevbase; 138 139#if defined(SWTCH_OPTIM_STATS) 140extern int swtch_optim_stats; 141SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, 142 CTLFLAG_RD, &swtch_optim_stats, 0, ""); 143SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, 144 CTLFLAG_RD, &tlb_flush_count, 0, ""); 145#endif 146 147#ifdef PC98 148static int ispc98 = 1; 149#else 150static int ispc98 = 0; 151#endif 152SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, ""); 153 154int physmem = 0; 155int cold = 1; 156 157#ifdef COMPAT_43 158static void osendsig __P((sig_t catcher, int sig, sigset_t *mask, u_long code)); 159#endif 160 161static int 162sysctl_hw_physmem(SYSCTL_HANDLER_ARGS) 163{ 164 int error = sysctl_handle_int(oidp, 0, ctob(physmem), req); 165 return (error); 166} 167 168SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD, 169 0, 0, sysctl_hw_physmem, "IU", ""); 170 171static int 172sysctl_hw_usermem(SYSCTL_HANDLER_ARGS) 173{ 174 int error = sysctl_handle_int(oidp, 0, 175 ctob(physmem - cnt.v_wire_count), req); 176 return (error); 177} 178 179SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD, 180 0, 0, sysctl_hw_usermem, "IU", ""); 181 182static int 183sysctl_hw_availpages(SYSCTL_HANDLER_ARGS) 184{ 185 int error = sysctl_handle_int(oidp, 0, 186 i386_btop(avail_end - avail_start), req); 187 return (error); 188} 189 190SYSCTL_PROC(_hw, OID_AUTO, availpages, CTLTYPE_INT|CTLFLAG_RD, 191 0, 0, sysctl_hw_availpages, "I", ""); 192 193int Maxmem = 0; 194long dumplo; 195 196vm_offset_t phys_avail[10]; 197 198/* must be 2 less so 0 0 can signal end of chunks */ 199#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2) 200 201struct kva_md_info kmi; 202 203static struct trapframe proc0_tf; 204#ifndef SMP 205static struct globaldata __globaldata; 206#endif 207 208struct mtx sched_lock; 209struct mtx Giant; 210 211static void 212cpu_startup(dummy) 213 void *dummy; 214{ 215 /* 216 * Good {morning,afternoon,evening,night}. 217 */ 218 earlysetcpuclass(); 219 startrtclock(); 220 printcpuinfo(); 221 panicifcpuunsupported(); 222#ifdef PERFMON 223 perfmon_init(); 224#endif 225 printf("real memory = %u (%uK bytes)\n", ptoa(Maxmem), 226 ptoa(Maxmem) / 1024); 227 /* 228 * Display any holes after the first chunk of extended memory. 229 */ 230 if (bootverbose) { 231 int indx; 232 233 printf("Physical memory chunk(s):\n"); 234 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 235 unsigned int size1; 236 237 size1 = phys_avail[indx + 1] - phys_avail[indx]; 238 printf("0x%08x - 0x%08x, %u bytes (%u pages)\n", 239 phys_avail[indx], phys_avail[indx + 1] - 1, size1, 240 size1 / PAGE_SIZE); 241 } 242 } 243 244 vm_ksubmap_init(&kmi); 245 246#if 0 247 /* 248 * Calculate callout wheel size 249 */ 250 for (callwheelsize = 1, callwheelbits = 0; 251 callwheelsize < ncallout; 252 callwheelsize <<= 1, ++callwheelbits) 253 ; 254 callwheelmask = callwheelsize - 1; 255 256 /* 257 * Allocate space for system data structures. 258 * The first available kernel virtual address is in "v". 259 * As pages of kernel virtual memory are allocated, "v" is incremented. 260 * As pages of memory are allocated and cleared, 261 * "firstaddr" is incremented. 262 * An index into the kernel page table corresponding to the 263 * virtual memory address maintained in "v" is kept in "mapaddr". 264 */ 265 266 /* 267 * Make two passes. The first pass calculates how much memory is 268 * needed and allocates it. The second pass assigns virtual 269 * addresses to the various data structures. 270 */ 271 firstaddr = 0; 272again: 273 v = (caddr_t)firstaddr; 274 275#define valloc(name, type, num) \ 276 (name) = (type *)v; v = (caddr_t)((name)+(num)) 277#define valloclim(name, type, num, lim) \ 278 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num))) 279 280 valloc(callout, struct callout, ncallout); 281 valloc(callwheel, struct callout_tailq, callwheelsize); 282 283 /* 284 * Discount the physical memory larger than the size of kernel_map 285 * to avoid eating up all of KVA space. 286 */ 287 if (kernel_map->first_free == NULL) { 288 printf("Warning: no free entries in kernel_map.\n"); 289 physmem_est = physmem; 290 } else { 291 physmem_est = min(physmem, btoc(kernel_map->max_offset - 292 kernel_map->min_offset)); 293 } 294 295 /* 296 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE. 297 * For the first 64MB of ram nominally allocate sufficient buffers to 298 * cover 1/4 of our ram. Beyond the first 64MB allocate additional 299 * buffers to cover 1/20 of our ram over 64MB. When auto-sizing 300 * the buffer cache we limit the eventual kva reservation to 301 * maxbcache bytes. 302 * 303 * factor represents the 1/4 x ram conversion. 304 */ 305 if (nbuf == 0) { 306 int factor = 4 * BKVASIZE / PAGE_SIZE; 307 308 nbuf = 50; 309 if (physmem_est > 1024) 310 nbuf += min((physmem_est - 1024) / factor, 311 16384 / factor); 312 if (physmem_est > 16384) 313 nbuf += (physmem_est - 16384) * 2 / (factor * 5); 314 315 if (maxbcache && nbuf > maxbcache / BKVASIZE) 316 nbuf = maxbcache / BKVASIZE; 317 } 318 319 /* 320 * Do not allow the buffer_map to be more then 1/2 the size of the 321 * kernel_map. 322 */ 323 if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) / 324 (BKVASIZE * 2)) { 325 nbuf = (kernel_map->max_offset - kernel_map->min_offset) / 326 (BKVASIZE * 2); 327 printf("Warning: nbufs capped at %d\n", nbuf); 328 } 329 330 nswbuf = max(min(nbuf/4, 256), 16); 331 332 valloc(swbuf, struct buf, nswbuf); 333 valloc(buf, struct buf, nbuf); 334 v = bufhashinit(v); 335 336 /* 337 * End of first pass, size has been calculated so allocate memory 338 */ 339 if (firstaddr == 0) { 340 size = (vm_size_t)(v - firstaddr); 341 firstaddr = (int)kmem_alloc(kernel_map, round_page(size)); 342 if (firstaddr == 0) 343 panic("startup: no room for tables"); 344 goto again; 345 } 346 347 /* 348 * End of second pass, addresses have been assigned 349 */ 350 if ((vm_size_t)(v - firstaddr) != size) 351 panic("startup: table size inconsistency"); 352 353 clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva, 354 (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size); 355 buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva, 356 (nbuf*BKVASIZE)); 357 buffer_map->system_map = 1; 358 pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva, 359 (nswbuf*MAXPHYS) + pager_map_size); 360 pager_map->system_map = 1; 361 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 362 (16*(ARG_MAX+(PAGE_SIZE*3)))); 363 364 /* 365 * XXX: Mbuf system machine-specific initializations should 366 * go here, if anywhere. 367 */ 368 369 /* 370 * Initialize callouts 371 */ 372 SLIST_INIT(&callfree); 373 for (i = 0; i < ncallout; i++) { 374 callout_init(&callout[i], 0); 375 callout[i].c_flags = CALLOUT_LOCAL_ALLOC; 376 SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); 377 } 378 379 for (i = 0; i < callwheelsize; i++) { 380 TAILQ_INIT(&callwheel[i]); 381 } 382 383 mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE); 384#endif 385 386#if defined(USERCONFIG) 387 userconfig(); 388 cninit(); /* the preferred console may have changed */ 389#endif 390 391 printf("avail memory = %u (%uK bytes)\n", ptoa(cnt.v_free_count), 392 ptoa(cnt.v_free_count) / 1024); 393 394 /* 395 * Set up buffers, so they can be used to read disk labels. 396 */ 397 bufinit(); 398 vm_pager_bufferinit(); 399 400 globaldata_register(GLOBALDATA); 401#ifndef SMP 402 /* For SMP, we delay the cpu_setregs() until after SMP startup. */ 403 cpu_setregs(); 404#endif 405} 406 407/* 408 * Send an interrupt to process. 409 * 410 * Stack is set up to allow sigcode stored 411 * at top to call routine, followed by kcall 412 * to sigreturn routine below. After sigreturn 413 * resets the signal mask, the stack, and the 414 * frame pointer, it returns to the user 415 * specified pc, psl. 416 */ 417#ifdef COMPAT_43 418static void 419osendsig(catcher, sig, mask, code) 420 sig_t catcher; 421 int sig; 422 sigset_t *mask; 423 u_long code; 424{ 425 struct osigframe sf; 426 struct osigframe *fp; 427 struct proc *p; 428 struct sigacts *psp; 429 struct trapframe *regs; 430 int oonstack; 431 432 p = curproc; 433 PROC_LOCK(p); 434 psp = p->p_sigacts; 435 regs = p->p_frame; 436 oonstack = sigonstack(regs->tf_esp); 437 438 /* Allocate and validate space for the signal handler context. */ 439 if ((p->p_flag & P_ALTSTACK) && !oonstack && 440 SIGISMEMBER(psp->ps_sigonstack, sig)) { 441 fp = (struct osigframe *)(p->p_sigstk.ss_sp + 442 p->p_sigstk.ss_size - sizeof(struct osigframe)); 443#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 444 p->p_sigstk.ss_flags |= SS_ONSTACK; 445#endif 446 } else 447 fp = (struct osigframe *)regs->tf_esp - 1; 448 PROC_UNLOCK(p); 449 450 /* 451 * grow_stack() will return 0 if *fp does not fit inside the stack 452 * and the stack can not be grown. 453 * useracc() will return FALSE if access is denied. 454 */ 455 if (grow_stack(p, (int)fp) == 0 || 456 !useracc((caddr_t)fp, sizeof(*fp), VM_PROT_WRITE)) { 457 /* 458 * Process has trashed its stack; give it an illegal 459 * instruction to halt it in its tracks. 460 */ 461 PROC_LOCK(p); 462 SIGACTION(p, SIGILL) = SIG_DFL; 463 SIGDELSET(p->p_sigignore, SIGILL); 464 SIGDELSET(p->p_sigcatch, SIGILL); 465 SIGDELSET(p->p_sigmask, SIGILL); 466 psignal(p, SIGILL); 467 PROC_UNLOCK(p); 468 return; 469 } 470 471 /* Translate the signal if appropriate. */ 472 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 473 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 474 475 /* Build the argument list for the signal handler. */ 476 sf.sf_signum = sig; 477 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc; 478 PROC_LOCK(p); 479 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 480 /* Signal handler installed with SA_SIGINFO. */ 481 sf.sf_arg2 = (register_t)&fp->sf_siginfo; 482 sf.sf_siginfo.si_signo = sig; 483 sf.sf_siginfo.si_code = code; 484 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher; 485 } else { 486 /* Old FreeBSD-style arguments. */ 487 sf.sf_arg2 = code; 488 sf.sf_addr = regs->tf_err; 489 sf.sf_ahu.sf_handler = catcher; 490 } 491 PROC_UNLOCK(p); 492 493 /* Save most if not all of trap frame. */ 494 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax; 495 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx; 496 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx; 497 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx; 498 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi; 499 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi; 500 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs; 501 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds; 502 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss; 503 sf.sf_siginfo.si_sc.sc_es = regs->tf_es; 504 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs; 505 sf.sf_siginfo.si_sc.sc_gs = rgs(); 506 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp; 507 508 /* Build the signal context to be used by osigreturn(). */ 509 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0; 510 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask); 511 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp; 512 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp; 513 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip; 514 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags; 515 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno; 516 sf.sf_siginfo.si_sc.sc_err = regs->tf_err; 517 518 /* 519 * If we're a vm86 process, we want to save the segment registers. 520 * We also change eflags to be our emulated eflags, not the actual 521 * eflags. 522 */ 523 if (regs->tf_eflags & PSL_VM) { 524 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */ 525 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 526 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 527 528 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs; 529 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs; 530 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es; 531 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds; 532 533 if (vm86->vm86_has_vme == 0) 534 sf.sf_siginfo.si_sc.sc_ps = 535 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 536 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 537 538 /* See sendsig() for comments. */ 539 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP); 540 } 541 542 /* Copy the sigframe out to the user's stack. */ 543 if (copyout(&sf, fp, sizeof(*fp)) != 0) { 544 /* 545 * Something is wrong with the stack pointer. 546 * ...Kill the process. 547 */ 548 PROC_LOCK(p); 549 sigexit(p, SIGILL); 550 /* NOTREACHED */ 551 } 552 553 regs->tf_esp = (int)fp; 554 regs->tf_eip = PS_STRINGS - szosigcode; 555 regs->tf_cs = _ucodesel; 556 regs->tf_ds = _udatasel; 557 regs->tf_es = _udatasel; 558 regs->tf_fs = _udatasel; 559 load_gs(_udatasel); 560 regs->tf_ss = _udatasel; 561} 562#endif 563 564void 565sendsig(catcher, sig, mask, code) 566 sig_t catcher; 567 int sig; 568 sigset_t *mask; 569 u_long code; 570{ 571 struct sigframe sf; 572 struct proc *p; 573 struct sigacts *psp; 574 struct trapframe *regs; 575 struct sigframe *sfp; 576 int oonstack; 577 578 p = curproc; 579 PROC_LOCK(p); 580 psp = p->p_sigacts; 581#ifdef COMPAT_43 582 if (SIGISMEMBER(psp->ps_osigset, sig)) { 583 PROC_UNLOCK(p); 584 osendsig(catcher, sig, mask, code); 585 return; 586 } 587#endif 588 regs = p->p_frame; 589 oonstack = sigonstack(regs->tf_esp); 590 591 /* Save user context. */ 592 bzero(&sf, sizeof(sf)); 593 sf.sf_uc.uc_sigmask = *mask; 594 sf.sf_uc.uc_stack = p->p_sigstk; 595 sf.sf_uc.uc_stack.ss_flags = (p->p_flag & P_ALTSTACK) 596 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 597 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 598 sf.sf_uc.uc_mcontext.mc_gs = rgs(); 599 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs)); 600 601 /* Allocate and validate space for the signal handler context. */ 602 if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack && 603 SIGISMEMBER(psp->ps_sigonstack, sig)) { 604 sfp = (struct sigframe *)(p->p_sigstk.ss_sp + 605 p->p_sigstk.ss_size - sizeof(struct sigframe)); 606#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 607 p->p_sigstk.ss_flags |= SS_ONSTACK; 608#endif 609 } else 610 sfp = (struct sigframe *)regs->tf_esp - 1; 611 PROC_UNLOCK(p); 612 613 /* 614 * grow_stack() will return 0 if *sfp does not fit inside the stack 615 * and the stack can not be grown. 616 * useracc() will return FALSE if access is denied. 617 */ 618 if (grow_stack(p, (int)sfp) == 0 || 619 !useracc((caddr_t)sfp, sizeof(*sfp), VM_PROT_WRITE)) { 620 /* 621 * Process has trashed its stack; give it an illegal 622 * instruction to halt it in its tracks. 623 */ 624#ifdef DEBUG 625 printf("process %d has trashed its stack\n", p->p_pid); 626#endif 627 PROC_LOCK(p); 628 SIGACTION(p, SIGILL) = SIG_DFL; 629 SIGDELSET(p->p_sigignore, SIGILL); 630 SIGDELSET(p->p_sigcatch, SIGILL); 631 SIGDELSET(p->p_sigmask, SIGILL); 632 psignal(p, SIGILL); 633 PROC_UNLOCK(p); 634 return; 635 } 636 637 /* Translate the signal if appropriate. */ 638 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 639 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 640 641 /* Build the argument list for the signal handler. */ 642 sf.sf_signum = sig; 643 sf.sf_ucontext = (register_t)&sfp->sf_uc; 644 PROC_LOCK(p); 645 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 646 /* Signal handler installed with SA_SIGINFO. */ 647 sf.sf_siginfo = (register_t)&sfp->sf_si; 648 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 649 650 /* Fill siginfo structure. */ 651 sf.sf_si.si_signo = sig; 652 sf.sf_si.si_code = code; 653 sf.sf_si.si_addr = (void *)regs->tf_err; 654 } else { 655 /* Old FreeBSD-style arguments. */ 656 sf.sf_siginfo = code; 657 sf.sf_addr = regs->tf_err; 658 sf.sf_ahu.sf_handler = catcher; 659 } 660 PROC_UNLOCK(p); 661 662 /* 663 * If we're a vm86 process, we want to save the segment registers. 664 * We also change eflags to be our emulated eflags, not the actual 665 * eflags. 666 */ 667 if (regs->tf_eflags & PSL_VM) { 668 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 669 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 670 671 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 672 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 673 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 674 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 675 676 if (vm86->vm86_has_vme == 0) 677 sf.sf_uc.uc_mcontext.mc_eflags = 678 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 679 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 680 681 /* 682 * We should never have PSL_T set when returning from vm86 683 * mode. It may be set here if we deliver a signal before 684 * getting to vm86 mode, so turn it off. 685 * 686 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 687 * syscalls made by the signal handler. This just avoids 688 * wasting time for our lazy fixup of such faults. PSL_NT 689 * does nothing in vm86 mode, but vm86 programs can set it 690 * almost legitimately in probes for old cpu types. 691 */ 692 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP); 693 } 694 695 /* Copy the sigframe out to the user's stack. */ 696 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { 697 /* 698 * Something is wrong with the stack pointer. 699 * ...Kill the process. 700 */ 701 PROC_LOCK(p); 702 sigexit(p, SIGILL); 703 /* NOTREACHED */ 704 } 705 706 regs->tf_esp = (int)sfp; 707 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 708 regs->tf_cs = _ucodesel; 709 regs->tf_ds = _udatasel; 710 regs->tf_es = _udatasel; 711 regs->tf_fs = _udatasel; 712 regs->tf_ss = _udatasel; 713} 714 715/* 716 * System call to cleanup state after a signal 717 * has been taken. Reset signal mask and 718 * stack state from context left by sendsig (above). 719 * Return to previous pc and psl as specified by 720 * context left by sendsig. Check carefully to 721 * make sure that the user has not modified the 722 * state to gain improper privileges. 723 */ 724#ifdef COMPAT_43 725int 726osigreturn(p, uap) 727 struct proc *p; 728 struct osigreturn_args /* { 729 struct osigcontext *sigcntxp; 730 } */ *uap; 731{ 732 struct trapframe *regs; 733 struct osigcontext *scp; 734 int eflags; 735 736 regs = p->p_frame; 737 scp = uap->sigcntxp; 738 if (!useracc((caddr_t)scp, sizeof(*scp), VM_PROT_READ)) 739 return (EFAULT); 740 eflags = scp->sc_ps; 741 if (eflags & PSL_VM) { 742 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 743 struct vm86_kernel *vm86; 744 745 /* 746 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 747 * set up the vm86 area, and we can't enter vm86 mode. 748 */ 749 if (p->p_addr->u_pcb.pcb_ext == 0) 750 return (EINVAL); 751 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 752 if (vm86->vm86_inited == 0) 753 return (EINVAL); 754 755 /* Go back to user mode if both flags are set. */ 756 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 757 trapsignal(p, SIGBUS, 0); 758 759 if (vm86->vm86_has_vme) { 760 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 761 (eflags & VME_USERCHANGE) | PSL_VM; 762 } else { 763 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 764 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | 765 (eflags & VM_USERCHANGE) | PSL_VM; 766 } 767 tf->tf_vm86_ds = scp->sc_ds; 768 tf->tf_vm86_es = scp->sc_es; 769 tf->tf_vm86_fs = scp->sc_fs; 770 tf->tf_vm86_gs = scp->sc_gs; 771 tf->tf_ds = _udatasel; 772 tf->tf_es = _udatasel; 773 tf->tf_fs = _udatasel; 774 } else { 775 /* 776 * Don't allow users to change privileged or reserved flags. 777 */ 778 /* 779 * XXX do allow users to change the privileged flag PSL_RF. 780 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 781 * should sometimes set it there too. tf_eflags is kept in 782 * the signal context during signal handling and there is no 783 * other place to remember it, so the PSL_RF bit may be 784 * corrupted by the signal handler without us knowing. 785 * Corruption of the PSL_RF bit at worst causes one more or 786 * one less debugger trap, so allowing it is fairly harmless. 787 */ 788 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 789 return (EINVAL); 790 } 791 792 /* 793 * Don't allow users to load a valid privileged %cs. Let the 794 * hardware check for invalid selectors, excess privilege in 795 * other selectors, invalid %eip's and invalid %esp's. 796 */ 797 if (!CS_SECURE(scp->sc_cs)) { 798 trapsignal(p, SIGBUS, T_PROTFLT); 799 return (EINVAL); 800 } 801 regs->tf_ds = scp->sc_ds; 802 regs->tf_es = scp->sc_es; 803 regs->tf_fs = scp->sc_fs; 804 } 805 806 /* Restore remaining registers. */ 807 regs->tf_eax = scp->sc_eax; 808 regs->tf_ebx = scp->sc_ebx; 809 regs->tf_ecx = scp->sc_ecx; 810 regs->tf_edx = scp->sc_edx; 811 regs->tf_esi = scp->sc_esi; 812 regs->tf_edi = scp->sc_edi; 813 regs->tf_cs = scp->sc_cs; 814 regs->tf_ss = scp->sc_ss; 815 regs->tf_isp = scp->sc_isp; 816 817 PROC_LOCK(p); 818#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 819 if (scp->sc_onstack & 1) 820 p->p_sigstk.ss_flags |= SS_ONSTACK; 821 else 822 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 823#endif 824 825 SIGSETOLD(p->p_sigmask, scp->sc_mask); 826 SIG_CANTMASK(p->p_sigmask); 827 PROC_UNLOCK(p); 828 regs->tf_ebp = scp->sc_fp; 829 regs->tf_esp = scp->sc_sp; 830 regs->tf_eip = scp->sc_pc; 831 regs->tf_eflags = eflags; 832 return (EJUSTRETURN); 833} 834#endif 835 836int 837sigreturn(p, uap) 838 struct proc *p; 839 struct sigreturn_args /* { 840 ucontext_t *sigcntxp; 841 } */ *uap; 842{ 843 struct trapframe *regs; 844 ucontext_t *ucp; 845 int cs, eflags; 846 847 ucp = uap->sigcntxp; 848#ifdef COMPAT_43 849 if (!useracc((caddr_t)ucp, sizeof(struct osigcontext), VM_PROT_READ)) 850 return (EFAULT); 851 if (((struct osigcontext *)ucp)->sc_trapno == 0x01d516) 852 return (osigreturn(p, (struct osigreturn_args *)uap)); 853 /* 854 * Since ucp is not an osigcontext but a ucontext_t, we have to 855 * check again if all of it is accessible. A ucontext_t is 856 * much larger, so instead of just checking for the pointer 857 * being valid for the size of an osigcontext, now check for 858 * it being valid for a whole, new-style ucontext_t. 859 */ 860#endif 861 if (!useracc((caddr_t)ucp, sizeof(*ucp), VM_PROT_READ)) 862 return (EFAULT); 863 864 regs = p->p_frame; 865 eflags = ucp->uc_mcontext.mc_eflags; 866 if (eflags & PSL_VM) { 867 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 868 struct vm86_kernel *vm86; 869 870 /* 871 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 872 * set up the vm86 area, and we can't enter vm86 mode. 873 */ 874 if (p->p_addr->u_pcb.pcb_ext == 0) 875 return (EINVAL); 876 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 877 if (vm86->vm86_inited == 0) 878 return (EINVAL); 879 880 /* Go back to user mode if both flags are set. */ 881 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 882 trapsignal(p, SIGBUS, 0); 883 884 if (vm86->vm86_has_vme) { 885 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 886 (eflags & VME_USERCHANGE) | PSL_VM; 887 } else { 888 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 889 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | 890 (eflags & VM_USERCHANGE) | PSL_VM; 891 } 892 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe)); 893 tf->tf_eflags = eflags; 894 tf->tf_vm86_ds = tf->tf_ds; 895 tf->tf_vm86_es = tf->tf_es; 896 tf->tf_vm86_fs = tf->tf_fs; 897 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs; 898 tf->tf_ds = _udatasel; 899 tf->tf_es = _udatasel; 900 tf->tf_fs = _udatasel; 901 } else { 902 /* 903 * Don't allow users to change privileged or reserved flags. 904 */ 905 /* 906 * XXX do allow users to change the privileged flag PSL_RF. 907 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 908 * should sometimes set it there too. tf_eflags is kept in 909 * the signal context during signal handling and there is no 910 * other place to remember it, so the PSL_RF bit may be 911 * corrupted by the signal handler without us knowing. 912 * Corruption of the PSL_RF bit at worst causes one more or 913 * one less debugger trap, so allowing it is fairly harmless. 914 */ 915 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 916 printf("sigreturn: eflags = 0x%x\n", eflags); 917 return (EINVAL); 918 } 919 920 /* 921 * Don't allow users to load a valid privileged %cs. Let the 922 * hardware check for invalid selectors, excess privilege in 923 * other selectors, invalid %eip's and invalid %esp's. 924 */ 925 cs = ucp->uc_mcontext.mc_cs; 926 if (!CS_SECURE(cs)) { 927 printf("sigreturn: cs = 0x%x\n", cs); 928 trapsignal(p, SIGBUS, T_PROTFLT); 929 return (EINVAL); 930 } 931 932 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs)); 933 } 934 935 PROC_LOCK(p); 936#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 937 if (ucp->uc_mcontext.mc_onstack & 1) 938 p->p_sigstk.ss_flags |= SS_ONSTACK; 939 else 940 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 941#endif 942 943 p->p_sigmask = ucp->uc_sigmask; 944 SIG_CANTMASK(p->p_sigmask); 945 PROC_UNLOCK(p); 946 return (EJUSTRETURN); 947} 948 949/* 950 * Machine dependent boot() routine 951 * 952 * I haven't seen anything to put here yet 953 * Possibly some stuff might be grafted back here from boot() 954 */ 955void 956cpu_boot(int howto) 957{ 958} 959 960/* 961 * Shutdown the CPU as much as possible 962 */ 963void 964cpu_halt(void) 965{ 966 for (;;) 967 __asm__ ("hlt"); 968} 969 970/* 971 * Hook to idle the CPU when possible. This currently only works in 972 * the !SMP case, as there is no clean way to ensure that a CPU will be 973 * woken when there is work available for it. 974 */ 975static int cpu_idle_hlt = 1; 976SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, 977 &cpu_idle_hlt, 0, "Idle loop HLT enable"); 978 979/* 980 * Note that we have to be careful here to avoid a race between checking 981 * procrunnable() and actually halting. If we don't do this, we may waste 982 * the time between calling hlt and the next interrupt even though there 983 * is a runnable process. 984 */ 985void 986cpu_idle(void) 987{ 988#ifndef SMP 989 if (cpu_idle_hlt) { 990 disable_intr(); 991 if (procrunnable()) 992 enable_intr(); 993 else { 994 enable_intr(); 995 __asm __volatile("hlt"); 996 } 997 } 998#endif 999} 1000 1001/* 1002 * Clear registers on exec 1003 */ 1004void 1005setregs(p, entry, stack, ps_strings) 1006 struct proc *p; 1007 u_long entry; 1008 u_long stack; 1009 u_long ps_strings; 1010{ 1011 struct trapframe *regs = p->p_frame; 1012 struct pcb *pcb = &p->p_addr->u_pcb; 1013 1014 if (pcb->pcb_ldt) 1015 user_ldt_free(pcb); 1016 1017 bzero((char *)regs, sizeof(struct trapframe)); 1018 regs->tf_eip = entry; 1019 regs->tf_esp = stack; 1020 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T); 1021 regs->tf_ss = _udatasel; 1022 regs->tf_ds = _udatasel; 1023 regs->tf_es = _udatasel; 1024 regs->tf_fs = _udatasel; 1025 regs->tf_cs = _ucodesel; 1026 1027 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */ 1028 regs->tf_ebx = ps_strings; 1029 1030 /* reset %gs as well */ 1031 if (pcb == PCPU_GET(curpcb)) 1032 load_gs(_udatasel); 1033 else 1034 pcb->pcb_gs = _udatasel; 1035 1036 /* 1037 * Reset the hardware debug registers if they were in use. 1038 * They won't have any meaning for the newly exec'd process. 1039 */ 1040 if (pcb->pcb_flags & PCB_DBREGS) { 1041 pcb->pcb_dr0 = 0; 1042 pcb->pcb_dr1 = 0; 1043 pcb->pcb_dr2 = 0; 1044 pcb->pcb_dr3 = 0; 1045 pcb->pcb_dr6 = 0; 1046 pcb->pcb_dr7 = 0; 1047 if (pcb == PCPU_GET(curpcb)) { 1048 /* 1049 * Clear the debug registers on the running 1050 * CPU, otherwise they will end up affecting 1051 * the next process we switch to. 1052 */ 1053 reset_dbregs(); 1054 } 1055 pcb->pcb_flags &= ~PCB_DBREGS; 1056 } 1057 1058 /* 1059 * Initialize the math emulator (if any) for the current process. 1060 * Actually, just clear the bit that says that the emulator has 1061 * been initialized. Initialization is delayed until the process 1062 * traps to the emulator (if it is done at all) mainly because 1063 * emulators don't provide an entry point for initialization. 1064 */ 1065 p->p_addr->u_pcb.pcb_flags &= ~FP_SOFTFP; 1066 1067 /* 1068 * Arrange to trap the next npx or `fwait' instruction (see npx.c 1069 * for why fwait must be trapped at least if there is an npx or an 1070 * emulator). This is mainly to handle the case where npx0 is not 1071 * configured, since the npx routines normally set up the trap 1072 * otherwise. It should be done only at boot time, but doing it 1073 * here allows modifying `npx_exists' for testing the emulator on 1074 * systems with an npx. 1075 */ 1076 load_cr0(rcr0() | CR0_MP | CR0_TS); 1077 1078#ifdef DEV_NPX 1079 /* Initialize the npx (if any) for the current process. */ 1080 npxinit(__INITIAL_NPXCW__); 1081#endif 1082 1083 /* 1084 * XXX - Linux emulator 1085 * Make sure sure edx is 0x0 on entry. Linux binaries depend 1086 * on it. 1087 */ 1088 p->p_retval[1] = 0; 1089} 1090 1091void 1092cpu_setregs(void) 1093{ 1094 unsigned int cr0; 1095 1096 cr0 = rcr0(); 1097 cr0 |= CR0_NE; /* Done by npxinit() */ 1098 cr0 |= CR0_MP | CR0_TS; /* Done at every execve() too. */ 1099#ifndef I386_CPU 1100 cr0 |= CR0_WP | CR0_AM; 1101#endif 1102 load_cr0(cr0); 1103 load_gs(_udatasel); 1104} 1105 1106static int 1107sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS) 1108{ 1109 int error; 1110 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 1111 req); 1112 if (!error && req->newptr) 1113 resettodr(); 1114 return (error); 1115} 1116 1117SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 1118 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 1119 1120SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, 1121 CTLFLAG_RW, &disable_rtc_set, 0, ""); 1122 1123SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo, 1124 CTLFLAG_RD, &bootinfo, bootinfo, ""); 1125 1126SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, 1127 CTLFLAG_RW, &wall_cmos_clock, 0, ""); 1128 1129/* 1130 * Initialize 386 and configure to run kernel 1131 */ 1132 1133/* 1134 * Initialize segments & interrupt table 1135 */ 1136 1137int _default_ldt; 1138union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */ 1139static struct gate_descriptor idt0[NIDT]; 1140struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 1141union descriptor ldt[NLDT]; /* local descriptor table */ 1142#ifdef SMP 1143/* table descriptors - used to load tables by microp */ 1144struct region_descriptor r_gdt, r_idt; 1145#endif 1146 1147int private_tss; /* flag indicating private tss */ 1148 1149#if defined(I586_CPU) && !defined(NO_F00F_HACK) 1150extern int has_f00f_bug; 1151#endif 1152 1153static struct i386tss dblfault_tss; 1154static char dblfault_stack[PAGE_SIZE]; 1155 1156extern struct user *proc0paddr; 1157 1158 1159/* software prototypes -- in more palatable form */ 1160struct soft_segment_descriptor gdt_segs[] = { 1161/* GNULL_SEL 0 Null Descriptor */ 1162{ 0x0, /* segment base address */ 1163 0x0, /* length */ 1164 0, /* segment type */ 1165 0, /* segment descriptor priority level */ 1166 0, /* segment descriptor present */ 1167 0, 0, 1168 0, /* default 32 vs 16 bit size */ 1169 0 /* limit granularity (byte/page units)*/ }, 1170/* GCODE_SEL 1 Code Descriptor for kernel */ 1171{ 0x0, /* segment base address */ 1172 0xfffff, /* length - all address space */ 1173 SDT_MEMERA, /* segment type */ 1174 0, /* segment descriptor priority level */ 1175 1, /* segment descriptor present */ 1176 0, 0, 1177 1, /* default 32 vs 16 bit size */ 1178 1 /* limit granularity (byte/page units)*/ }, 1179/* GDATA_SEL 2 Data Descriptor for kernel */ 1180{ 0x0, /* segment base address */ 1181 0xfffff, /* length - all address space */ 1182 SDT_MEMRWA, /* segment type */ 1183 0, /* segment descriptor priority level */ 1184 1, /* segment descriptor present */ 1185 0, 0, 1186 1, /* default 32 vs 16 bit size */ 1187 1 /* limit granularity (byte/page units)*/ }, 1188/* GPRIV_SEL 3 SMP Per-Processor Private Data Descriptor */ 1189{ 0x0, /* segment base address */ 1190 0xfffff, /* length - all address space */ 1191 SDT_MEMRWA, /* segment type */ 1192 0, /* segment descriptor priority level */ 1193 1, /* segment descriptor present */ 1194 0, 0, 1195 1, /* default 32 vs 16 bit size */ 1196 1 /* limit granularity (byte/page units)*/ }, 1197/* GPROC0_SEL 4 Proc 0 Tss Descriptor */ 1198{ 1199 0x0, /* segment base address */ 1200 sizeof(struct i386tss)-1,/* length - all address space */ 1201 SDT_SYS386TSS, /* segment type */ 1202 0, /* segment descriptor priority level */ 1203 1, /* segment descriptor present */ 1204 0, 0, 1205 0, /* unused - default 32 vs 16 bit size */ 1206 0 /* limit granularity (byte/page units)*/ }, 1207/* GLDT_SEL 5 LDT Descriptor */ 1208{ (int) ldt, /* segment base address */ 1209 sizeof(ldt)-1, /* length - all address space */ 1210 SDT_SYSLDT, /* segment type */ 1211 SEL_UPL, /* segment descriptor priority level */ 1212 1, /* segment descriptor present */ 1213 0, 0, 1214 0, /* unused - default 32 vs 16 bit size */ 1215 0 /* limit granularity (byte/page units)*/ }, 1216/* GUSERLDT_SEL 6 User LDT Descriptor per process */ 1217{ (int) ldt, /* segment base address */ 1218 (512 * sizeof(union descriptor)-1), /* length */ 1219 SDT_SYSLDT, /* segment type */ 1220 0, /* segment descriptor priority level */ 1221 1, /* segment descriptor present */ 1222 0, 0, 1223 0, /* unused - default 32 vs 16 bit size */ 1224 0 /* limit granularity (byte/page units)*/ }, 1225/* GTGATE_SEL 7 Null Descriptor - Placeholder */ 1226{ 0x0, /* segment base address */ 1227 0x0, /* length - all address space */ 1228 0, /* segment type */ 1229 0, /* segment descriptor priority level */ 1230 0, /* segment descriptor present */ 1231 0, 0, 1232 0, /* default 32 vs 16 bit size */ 1233 0 /* limit granularity (byte/page units)*/ }, 1234/* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */ 1235{ 0x400, /* segment base address */ 1236 0xfffff, /* length */ 1237 SDT_MEMRWA, /* segment type */ 1238 0, /* segment descriptor priority level */ 1239 1, /* segment descriptor present */ 1240 0, 0, 1241 1, /* default 32 vs 16 bit size */ 1242 1 /* limit granularity (byte/page units)*/ }, 1243/* GPANIC_SEL 9 Panic Tss Descriptor */ 1244{ (int) &dblfault_tss, /* segment base address */ 1245 sizeof(struct i386tss)-1,/* length - all address space */ 1246 SDT_SYS386TSS, /* segment type */ 1247 0, /* segment descriptor priority level */ 1248 1, /* segment descriptor present */ 1249 0, 0, 1250 0, /* unused - default 32 vs 16 bit size */ 1251 0 /* limit granularity (byte/page units)*/ }, 1252/* GBIOSCODE32_SEL 10 BIOS 32-bit interface (32bit Code) */ 1253{ 0, /* segment base address (overwritten) */ 1254 0xfffff, /* length */ 1255 SDT_MEMERA, /* segment type */ 1256 0, /* segment descriptor priority level */ 1257 1, /* segment descriptor present */ 1258 0, 0, 1259 0, /* default 32 vs 16 bit size */ 1260 1 /* limit granularity (byte/page units)*/ }, 1261/* GBIOSCODE16_SEL 11 BIOS 32-bit interface (16bit Code) */ 1262{ 0, /* segment base address (overwritten) */ 1263 0xfffff, /* length */ 1264 SDT_MEMERA, /* segment type */ 1265 0, /* segment descriptor priority level */ 1266 1, /* segment descriptor present */ 1267 0, 0, 1268 0, /* default 32 vs 16 bit size */ 1269 1 /* limit granularity (byte/page units)*/ }, 1270/* GBIOSDATA_SEL 12 BIOS 32-bit interface (Data) */ 1271{ 0, /* segment base address (overwritten) */ 1272 0xfffff, /* length */ 1273 SDT_MEMRWA, /* segment type */ 1274 0, /* segment descriptor priority level */ 1275 1, /* segment descriptor present */ 1276 0, 0, 1277 1, /* default 32 vs 16 bit size */ 1278 1 /* limit granularity (byte/page units)*/ }, 1279/* GBIOSUTIL_SEL 13 BIOS 16-bit interface (Utility) */ 1280{ 0, /* segment base address (overwritten) */ 1281 0xfffff, /* length */ 1282 SDT_MEMRWA, /* segment type */ 1283 0, /* segment descriptor priority level */ 1284 1, /* segment descriptor present */ 1285 0, 0, 1286 0, /* default 32 vs 16 bit size */ 1287 1 /* limit granularity (byte/page units)*/ }, 1288/* GBIOSARGS_SEL 14 BIOS 16-bit interface (Arguments) */ 1289{ 0, /* segment base address (overwritten) */ 1290 0xfffff, /* length */ 1291 SDT_MEMRWA, /* segment type */ 1292 0, /* segment descriptor priority level */ 1293 1, /* segment descriptor present */ 1294 0, 0, 1295 0, /* default 32 vs 16 bit size */ 1296 1 /* limit granularity (byte/page units)*/ }, 1297}; 1298 1299static struct soft_segment_descriptor ldt_segs[] = { 1300 /* Null Descriptor - overwritten by call gate */ 1301{ 0x0, /* segment base address */ 1302 0x0, /* length - all address space */ 1303 0, /* segment type */ 1304 0, /* segment descriptor priority level */ 1305 0, /* segment descriptor present */ 1306 0, 0, 1307 0, /* default 32 vs 16 bit size */ 1308 0 /* limit granularity (byte/page units)*/ }, 1309 /* Null Descriptor - overwritten by call gate */ 1310{ 0x0, /* segment base address */ 1311 0x0, /* length - all address space */ 1312 0, /* segment type */ 1313 0, /* segment descriptor priority level */ 1314 0, /* segment descriptor present */ 1315 0, 0, 1316 0, /* default 32 vs 16 bit size */ 1317 0 /* limit granularity (byte/page units)*/ }, 1318 /* Null Descriptor - overwritten by call gate */ 1319{ 0x0, /* segment base address */ 1320 0x0, /* length - all address space */ 1321 0, /* segment type */ 1322 0, /* segment descriptor priority level */ 1323 0, /* segment descriptor present */ 1324 0, 0, 1325 0, /* default 32 vs 16 bit size */ 1326 0 /* limit granularity (byte/page units)*/ }, 1327 /* Code Descriptor for user */ 1328{ 0x0, /* segment base address */ 1329 0xfffff, /* length - all address space */ 1330 SDT_MEMERA, /* segment type */ 1331 SEL_UPL, /* segment descriptor priority level */ 1332 1, /* segment descriptor present */ 1333 0, 0, 1334 1, /* default 32 vs 16 bit size */ 1335 1 /* limit granularity (byte/page units)*/ }, 1336 /* Null Descriptor - overwritten by call gate */ 1337{ 0x0, /* segment base address */ 1338 0x0, /* length - all address space */ 1339 0, /* segment type */ 1340 0, /* segment descriptor priority level */ 1341 0, /* segment descriptor present */ 1342 0, 0, 1343 0, /* default 32 vs 16 bit size */ 1344 0 /* limit granularity (byte/page units)*/ }, 1345 /* Data Descriptor for user */ 1346{ 0x0, /* segment base address */ 1347 0xfffff, /* length - all address space */ 1348 SDT_MEMRWA, /* segment type */ 1349 SEL_UPL, /* segment descriptor priority level */ 1350 1, /* segment descriptor present */ 1351 0, 0, 1352 1, /* default 32 vs 16 bit size */ 1353 1 /* limit granularity (byte/page units)*/ }, 1354}; 1355 1356void 1357setidt(idx, func, typ, dpl, selec) 1358 int idx; 1359 inthand_t *func; 1360 int typ; 1361 int dpl; 1362 int selec; 1363{ 1364 struct gate_descriptor *ip; 1365 1366 ip = idt + idx; 1367 ip->gd_looffset = (int)func; 1368 ip->gd_selector = selec; 1369 ip->gd_stkcpy = 0; 1370 ip->gd_xx = 0; 1371 ip->gd_type = typ; 1372 ip->gd_dpl = dpl; 1373 ip->gd_p = 1; 1374 ip->gd_hioffset = ((int)func)>>16 ; 1375} 1376 1377#define IDTVEC(name) __CONCAT(X,name) 1378 1379extern inthand_t 1380 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 1381 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 1382 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 1383 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 1384 IDTVEC(xmm), IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall); 1385 1386void 1387sdtossd(sd, ssd) 1388 struct segment_descriptor *sd; 1389 struct soft_segment_descriptor *ssd; 1390{ 1391 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 1392 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 1393 ssd->ssd_type = sd->sd_type; 1394 ssd->ssd_dpl = sd->sd_dpl; 1395 ssd->ssd_p = sd->sd_p; 1396 ssd->ssd_def32 = sd->sd_def32; 1397 ssd->ssd_gran = sd->sd_gran; 1398} 1399 1400#define PHYSMAP_SIZE (2 * 8) 1401 1402/* 1403 * Populate the (physmap) array with base/bound pairs describing the 1404 * available physical memory in the system, then test this memory and 1405 * build the phys_avail array describing the actually-available memory. 1406 * 1407 * If we cannot accurately determine the physical memory map, then use 1408 * value from the 0xE801 call, and failing that, the RTC. 1409 * 1410 * Total memory size may be set by the kernel environment variable 1411 * hw.physmem or the compile-time define MAXMEM. 1412 */ 1413static void 1414getmemsize(int first) 1415{ 1416 int i, physmap_idx, pa_indx; 1417 u_int basemem, extmem; 1418 struct vm86frame vmf; 1419 struct vm86context vmc; 1420 vm_offset_t pa, physmap[PHYSMAP_SIZE]; 1421 pt_entry_t pte; 1422 const char *cp; 1423 struct bios_smap *smap; 1424 1425 bzero(&vmf, sizeof(struct vm86frame)); 1426 bzero(physmap, sizeof(physmap)); 1427 1428 /* 1429 * Perform "base memory" related probes & setup 1430 */ 1431 vm86_intcall(0x12, &vmf); 1432 basemem = vmf.vmf_ax; 1433 if (basemem > 640) { 1434 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", 1435 basemem); 1436 basemem = 640; 1437 } 1438 1439 /* 1440 * XXX if biosbasemem is now < 640, there is a `hole' 1441 * between the end of base memory and the start of 1442 * ISA memory. The hole may be empty or it may 1443 * contain BIOS code or data. Map it read/write so 1444 * that the BIOS can write to it. (Memory from 0 to 1445 * the physical end of the kernel is mapped read-only 1446 * to begin with and then parts of it are remapped. 1447 * The parts that aren't remapped form holes that 1448 * remain read-only and are unused by the kernel. 1449 * The base memory area is below the physical end of 1450 * the kernel and right now forms a read-only hole. 1451 * The part of it from PAGE_SIZE to 1452 * (trunc_page(biosbasemem * 1024) - 1) will be 1453 * remapped and used by the kernel later.) 1454 * 1455 * This code is similar to the code used in 1456 * pmap_mapdev, but since no memory needs to be 1457 * allocated we simply change the mapping. 1458 */ 1459 for (pa = trunc_page(basemem * 1024); 1460 pa < ISA_HOLE_START; pa += PAGE_SIZE) { 1461 pte = (pt_entry_t)vtopte(pa + KERNBASE); 1462 *pte = pa | PG_RW | PG_V; 1463 } 1464 1465 /* 1466 * if basemem != 640, map pages r/w into vm86 page table so 1467 * that the bios can scribble on it. 1468 */ 1469 pte = (pt_entry_t)vm86paddr; 1470 for (i = basemem / 4; i < 160; i++) 1471 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; 1472 1473 /* 1474 * map page 1 R/W into the kernel page table so we can use it 1475 * as a buffer. The kernel will unmap this page later. 1476 */ 1477 pte = (pt_entry_t)vtopte(KERNBASE + (1 << PAGE_SHIFT)); 1478 *pte = (1 << PAGE_SHIFT) | PG_RW | PG_V; 1479 1480 /* 1481 * get memory map with INT 15:E820 1482 */ 1483 vmc.npages = 0; 1484 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT)); 1485 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di); 1486 1487 physmap_idx = 0; 1488 vmf.vmf_ebx = 0; 1489 do { 1490 vmf.vmf_eax = 0xE820; 1491 vmf.vmf_edx = SMAP_SIG; 1492 vmf.vmf_ecx = sizeof(struct bios_smap); 1493 i = vm86_datacall(0x15, &vmf, &vmc); 1494 if (i || vmf.vmf_eax != SMAP_SIG) 1495 break; 1496 if (boothowto & RB_VERBOSE) 1497 printf("SMAP type=%02x base=%08x %08x len=%08x %08x\n", 1498 smap->type, 1499 *(u_int32_t *)((char *)&smap->base + 4), 1500 (u_int32_t)smap->base, 1501 *(u_int32_t *)((char *)&smap->length + 4), 1502 (u_int32_t)smap->length); 1503 1504 if (smap->type != 0x01) 1505 goto next_run; 1506 1507 if (smap->length == 0) 1508 goto next_run; 1509 1510 if (smap->base >= 0xffffffff) { 1511 printf("%uK of memory above 4GB ignored\n", 1512 (u_int)(smap->length / 1024)); 1513 goto next_run; 1514 } 1515 1516 for (i = 0; i <= physmap_idx; i += 2) { 1517 if (smap->base < physmap[i + 1]) { 1518 if (boothowto & RB_VERBOSE) 1519 printf( 1520 "Overlapping or non-montonic memory region, ignoring second region\n"); 1521 goto next_run; 1522 } 1523 } 1524 1525 if (smap->base == physmap[physmap_idx + 1]) { 1526 physmap[physmap_idx + 1] += smap->length; 1527 goto next_run; 1528 } 1529 1530 physmap_idx += 2; 1531 if (physmap_idx == PHYSMAP_SIZE) { 1532 printf( 1533 "Too many segments in the physical address map, giving up\n"); 1534 break; 1535 } 1536 physmap[physmap_idx] = smap->base; 1537 physmap[physmap_idx + 1] = smap->base + smap->length; 1538next_run: 1539 } while (vmf.vmf_ebx != 0); 1540 1541 if (physmap[1] != 0) 1542 goto physmap_done; 1543 1544 /* 1545 * If we failed above, try memory map with INT 15:E801 1546 */ 1547 vmf.vmf_ax = 0xE801; 1548 if (vm86_intcall(0x15, &vmf) == 0) { 1549 extmem = vmf.vmf_cx + vmf.vmf_dx * 64; 1550 } else { 1551#if 0 1552 vmf.vmf_ah = 0x88; 1553 vm86_intcall(0x15, &vmf); 1554 extmem = vmf.vmf_ax; 1555#else 1556 /* 1557 * Prefer the RTC value for extended memory. 1558 */ 1559 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8); 1560#endif 1561 } 1562 1563 /* 1564 * Special hack for chipsets that still remap the 384k hole when 1565 * there's 16MB of memory - this really confuses people that 1566 * are trying to use bus mastering ISA controllers with the 1567 * "16MB limit"; they only have 16MB, but the remapping puts 1568 * them beyond the limit. 1569 * 1570 * If extended memory is between 15-16MB (16-17MB phys address range), 1571 * chop it to 15MB. 1572 */ 1573 if ((extmem > 15 * 1024) && (extmem < 16 * 1024)) 1574 extmem = 15 * 1024; 1575 1576 physmap[0] = 0; 1577 physmap[1] = basemem * 1024; 1578 physmap_idx = 2; 1579 physmap[physmap_idx] = 0x100000; 1580 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024; 1581 1582physmap_done: 1583 /* 1584 * Now, physmap contains a map of physical memory. 1585 */ 1586 1587#ifdef SMP 1588 /* make hole for AP bootstrap code */ 1589 physmap[1] = mp_bootaddress(physmap[1] / 1024); 1590 1591 /* look for the MP hardware - needed for apic addresses */ 1592 i386_mp_probe(); 1593#endif 1594 1595 /* 1596 * Maxmem isn't the "maximum memory", it's one larger than the 1597 * highest page of the physical address space. It should be 1598 * called something like "Maxphyspage". We may adjust this 1599 * based on ``hw.physmem'' and the results of the memory test. 1600 */ 1601 Maxmem = atop(physmap[physmap_idx + 1]); 1602 1603#ifdef MAXMEM 1604 Maxmem = MAXMEM / 4; 1605#endif 1606 1607 /* 1608 * hw.maxmem is a size in bytes; we also allow k, m, and g suffixes 1609 * for the appropriate modifiers. This overrides MAXMEM. 1610 */ 1611 if ((cp = getenv("hw.physmem")) != NULL) { 1612 u_int64_t AllowMem, sanity; 1613 char *ep; 1614 1615 sanity = AllowMem = strtouq(cp, &ep, 0); 1616 if ((ep != cp) && (*ep != 0)) { 1617 switch(*ep) { 1618 case 'g': 1619 case 'G': 1620 AllowMem <<= 10; 1621 case 'm': 1622 case 'M': 1623 AllowMem <<= 10; 1624 case 'k': 1625 case 'K': 1626 AllowMem <<= 10; 1627 break; 1628 default: 1629 AllowMem = sanity = 0; 1630 } 1631 if (AllowMem < sanity) 1632 AllowMem = 0; 1633 } 1634 if (AllowMem == 0) 1635 printf("Ignoring invalid memory size of '%s'\n", cp); 1636 else 1637 Maxmem = atop(AllowMem); 1638 } 1639 1640 if (atop(physmap[physmap_idx + 1]) != Maxmem && 1641 (boothowto & RB_VERBOSE)) 1642 printf("Physical memory use set to %uK\n", Maxmem * 4); 1643 1644 /* 1645 * If Maxmem has been increased beyond what the system has detected, 1646 * extend the last memory segment to the new limit. 1647 */ 1648 if (atop(physmap[physmap_idx + 1]) < Maxmem) 1649 physmap[physmap_idx + 1] = ptoa(Maxmem); 1650 1651 /* call pmap initialization to make new kernel address space */ 1652 pmap_bootstrap(first, 0); 1653 1654 /* 1655 * Size up each available chunk of physical memory. 1656 */ 1657 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 1658 pa_indx = 0; 1659 phys_avail[pa_indx++] = physmap[0]; 1660 phys_avail[pa_indx] = physmap[0]; 1661#if 0 1662 pte = (pt_entry_t)vtopte(KERNBASE); 1663#else 1664 pte = (pt_entry_t)CMAP1; 1665#endif 1666 1667 /* 1668 * physmap is in bytes, so when converting to page boundaries, 1669 * round up the start address and round down the end address. 1670 */ 1671 for (i = 0; i <= physmap_idx; i += 2) { 1672 vm_offset_t end; 1673 1674 end = ptoa(Maxmem); 1675 if (physmap[i + 1] < end) 1676 end = trunc_page(physmap[i + 1]); 1677 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 1678 int tmp, page_bad; 1679#if 0 1680 int *ptr = 0; 1681#else 1682 int *ptr = (int *)CADDR1; 1683#endif 1684 1685 /* 1686 * block out kernel memory as not available. 1687 */ 1688 if (pa >= 0x100000 && pa < first) 1689 continue; 1690 1691 page_bad = FALSE; 1692 1693 /* 1694 * map page into kernel: valid, read/write,non-cacheable 1695 */ 1696 *pte = pa | PG_V | PG_RW | PG_N; 1697 invltlb(); 1698 1699 tmp = *(int *)ptr; 1700 /* 1701 * Test for alternating 1's and 0's 1702 */ 1703 *(volatile int *)ptr = 0xaaaaaaaa; 1704 if (*(volatile int *)ptr != 0xaaaaaaaa) { 1705 page_bad = TRUE; 1706 } 1707 /* 1708 * Test for alternating 0's and 1's 1709 */ 1710 *(volatile int *)ptr = 0x55555555; 1711 if (*(volatile int *)ptr != 0x55555555) { 1712 page_bad = TRUE; 1713 } 1714 /* 1715 * Test for all 1's 1716 */ 1717 *(volatile int *)ptr = 0xffffffff; 1718 if (*(volatile int *)ptr != 0xffffffff) { 1719 page_bad = TRUE; 1720 } 1721 /* 1722 * Test for all 0's 1723 */ 1724 *(volatile int *)ptr = 0x0; 1725 if (*(volatile int *)ptr != 0x0) { 1726 page_bad = TRUE; 1727 } 1728 /* 1729 * Restore original value. 1730 */ 1731 *(int *)ptr = tmp; 1732 1733 /* 1734 * Adjust array of valid/good pages. 1735 */ 1736 if (page_bad == TRUE) { 1737 continue; 1738 } 1739 /* 1740 * If this good page is a continuation of the 1741 * previous set of good pages, then just increase 1742 * the end pointer. Otherwise start a new chunk. 1743 * Note that "end" points one higher than end, 1744 * making the range >= start and < end. 1745 * If we're also doing a speculative memory 1746 * test and we at or past the end, bump up Maxmem 1747 * so that we keep going. The first bad page 1748 * will terminate the loop. 1749 */ 1750 if (phys_avail[pa_indx] == pa) { 1751 phys_avail[pa_indx] += PAGE_SIZE; 1752 } else { 1753 pa_indx++; 1754 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1755 printf( 1756 "Too many holes in the physical address space, giving up\n"); 1757 pa_indx--; 1758 break; 1759 } 1760 phys_avail[pa_indx++] = pa; /* start */ 1761 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1762 } 1763 physmem++; 1764 } 1765 } 1766 *pte = 0; 1767 invltlb(); 1768 1769 /* 1770 * XXX 1771 * The last chunk must contain at least one page plus the message 1772 * buffer to avoid complicating other code (message buffer address 1773 * calculation, etc.). 1774 */ 1775 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1776 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { 1777 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1778 phys_avail[pa_indx--] = 0; 1779 phys_avail[pa_indx--] = 0; 1780 } 1781 1782 Maxmem = atop(phys_avail[pa_indx]); 1783 1784 /* Trim off space for the message buffer. */ 1785 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); 1786 1787 avail_end = phys_avail[pa_indx]; 1788} 1789 1790void 1791init386(first) 1792 int first; 1793{ 1794 struct gate_descriptor *gdp; 1795 int gsel_tss, metadata_missing, off, x; 1796#ifndef SMP 1797 /* table descriptors - used to load tables by microp */ 1798 struct region_descriptor r_gdt, r_idt; 1799#endif 1800 1801 proc0.p_addr = proc0paddr; 1802 1803 atdevbase = ISA_HOLE_START + KERNBASE; 1804 1805 metadata_missing = 0; 1806 if (bootinfo.bi_modulep) { 1807 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE; 1808 preload_bootstrap_relocate(KERNBASE); 1809 } else { 1810 metadata_missing = 1; 1811 } 1812 if (bootinfo.bi_envp) 1813 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE; 1814 1815 /* Init basic tunables, hz etc */ 1816 init_param(); 1817 1818 /* 1819 * make gdt memory segments, the code segment goes up to end of the 1820 * page with etext in it, the data segment goes to the end of 1821 * the address space 1822 */ 1823 /* 1824 * XXX text protection is temporarily (?) disabled. The limit was 1825 * i386_btop(round_page(etext)) - 1. 1826 */ 1827 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1); 1828 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1); 1829#ifdef SMP 1830 gdt_segs[GPRIV_SEL].ssd_limit = 1831 atop(sizeof(struct privatespace) - 1); 1832 gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[0]; 1833 gdt_segs[GPROC0_SEL].ssd_base = 1834 (int) &SMP_prvspace[0].globaldata.gd_common_tss; 1835 SMP_prvspace[0].globaldata.gd_prvspace = &SMP_prvspace[0].globaldata; 1836#else 1837 gdt_segs[GPRIV_SEL].ssd_limit = 1838 atop(sizeof(struct globaldata) - 1); 1839 gdt_segs[GPRIV_SEL].ssd_base = (int) &__globaldata; 1840 gdt_segs[GPROC0_SEL].ssd_base = 1841 (int) &__globaldata.gd_common_tss; 1842 __globaldata.gd_prvspace = &__globaldata; 1843#endif 1844 1845 for (x = 0; x < NGDT; x++) { 1846#ifdef BDE_DEBUGGER 1847 /* avoid overwriting db entries with APM ones */ 1848 if (x >= GAPMCODE32_SEL && x <= GAPMDATA_SEL) 1849 continue; 1850#endif 1851 ssdtosd(&gdt_segs[x], &gdt[x].sd); 1852 } 1853 1854 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1855 r_gdt.rd_base = (int) gdt; 1856 lgdt(&r_gdt); 1857 1858 /* setup curproc so that mutexes work */ 1859 PCPU_SET(curproc, &proc0); 1860 PCPU_SET(spinlocks, NULL); 1861 1862 LIST_INIT(&proc0.p_contested); 1863 1864 /* 1865 * Initialize mutexes. 1866 */ 1867 mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE); 1868 mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE); 1869 mtx_init(&proc0.p_mtx, "process lock", MTX_DEF); 1870 mtx_init(&clock_lock, "clk", MTX_SPIN | MTX_RECURSE); 1871#ifdef SMP 1872 mtx_init(&imen_mtx, "imen", MTX_SPIN); 1873#endif 1874 mtx_lock(&Giant); 1875 1876 /* make ldt memory segments */ 1877 /* 1878 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it 1879 * should be spelled ...MAX_USER... 1880 */ 1881 ldt_segs[LUCODE_SEL].ssd_limit = atop(VM_MAXUSER_ADDRESS - 1); 1882 ldt_segs[LUDATA_SEL].ssd_limit = atop(VM_MAXUSER_ADDRESS - 1); 1883 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++) 1884 ssdtosd(&ldt_segs[x], &ldt[x].sd); 1885 1886 _default_ldt = GSEL(GLDT_SEL, SEL_KPL); 1887 lldt(_default_ldt); 1888 PCPU_SET(currentldt, _default_ldt); 1889 1890 /* exceptions */ 1891 for (x = 0; x < NIDT; x++) 1892 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, 1893 GSEL(GCODE_SEL, SEL_KPL)); 1894 setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, 1895 GSEL(GCODE_SEL, SEL_KPL)); 1896 setidt(1, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL, 1897 GSEL(GCODE_SEL, SEL_KPL)); 1898 setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, 1899 GSEL(GCODE_SEL, SEL_KPL)); 1900 setidt(3, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL, 1901 GSEL(GCODE_SEL, SEL_KPL)); 1902 setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, 1903 GSEL(GCODE_SEL, SEL_KPL)); 1904 setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, 1905 GSEL(GCODE_SEL, SEL_KPL)); 1906 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, 1907 GSEL(GCODE_SEL, SEL_KPL)); 1908 setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL 1909 , GSEL(GCODE_SEL, SEL_KPL)); 1910 setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL)); 1911 setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, 1912 GSEL(GCODE_SEL, SEL_KPL)); 1913 setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, 1914 GSEL(GCODE_SEL, SEL_KPL)); 1915 setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, 1916 GSEL(GCODE_SEL, SEL_KPL)); 1917 setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, 1918 GSEL(GCODE_SEL, SEL_KPL)); 1919 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, 1920 GSEL(GCODE_SEL, SEL_KPL)); 1921 setidt(14, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, 1922 GSEL(GCODE_SEL, SEL_KPL)); 1923 setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, 1924 GSEL(GCODE_SEL, SEL_KPL)); 1925 setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, 1926 GSEL(GCODE_SEL, SEL_KPL)); 1927 setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, 1928 GSEL(GCODE_SEL, SEL_KPL)); 1929 setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, 1930 GSEL(GCODE_SEL, SEL_KPL)); 1931 setidt(19, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL, 1932 GSEL(GCODE_SEL, SEL_KPL)); 1933 setidt(0x80, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL, 1934 GSEL(GCODE_SEL, SEL_KPL)); 1935 1936 r_idt.rd_limit = sizeof(idt0) - 1; 1937 r_idt.rd_base = (int) idt; 1938 lidt(&r_idt); 1939 1940 /* 1941 * Initialize the console before we print anything out. 1942 */ 1943 cninit(); 1944 1945 if (metadata_missing) 1946 printf("WARNING: loader(8) metadata is missing!\n"); 1947 1948#ifdef DEV_ISA 1949 isa_defaultirq(); 1950#endif 1951 1952#ifdef DDB 1953 kdb_init(); 1954 if (boothowto & RB_KDB) 1955 Debugger("Boot flags requested debugger"); 1956#endif 1957 1958 finishidentcpu(); /* Final stage of CPU initialization */ 1959 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, 1960 GSEL(GCODE_SEL, SEL_KPL)); 1961 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, 1962 GSEL(GCODE_SEL, SEL_KPL)); 1963 initializecpu(); /* Initialize CPU registers */ 1964 1965 /* make an initial tss so cpu can get interrupt stack on syscall! */ 1966 PCPU_SET(common_tss.tss_esp0, 1967 (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16); 1968 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); 1969 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1970 private_tss = 0; 1971 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd); 1972 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 1973 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16); 1974 ltr(gsel_tss); 1975 1976 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = 1977 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)]; 1978 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = 1979 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); 1980 dblfault_tss.tss_cr3 = (int)IdlePTD; 1981 dblfault_tss.tss_eip = (int)dblfault_handler; 1982 dblfault_tss.tss_eflags = PSL_KERNEL; 1983 dblfault_tss.tss_ds = dblfault_tss.tss_es = 1984 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); 1985 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL); 1986 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); 1987 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); 1988 1989 vm86_initialize(); 1990 getmemsize(first); 1991 1992 /* now running on new page tables, configured,and u/iom is accessible */ 1993 1994 /* Map the message buffer. */ 1995 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) 1996 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off); 1997 1998 msgbufinit(msgbufp, MSGBUF_SIZE); 1999 2000 /* make a call gate to reenter kernel with */ 2001 gdp = &ldt[LSYS5CALLS_SEL].gd; 2002 2003 x = (int) &IDTVEC(lcall_syscall); 2004 gdp->gd_looffset = x; 2005 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL); 2006 gdp->gd_stkcpy = 1; 2007 gdp->gd_type = SDT_SYS386CGT; 2008 gdp->gd_dpl = SEL_UPL; 2009 gdp->gd_p = 1; 2010 gdp->gd_hioffset = x >> 16; 2011 2012 /* XXX does this work? */ 2013 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL]; 2014 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL]; 2015 2016 /* transfer to user mode */ 2017 2018 _ucodesel = LSEL(LUCODE_SEL, SEL_UPL); 2019 _udatasel = LSEL(LUDATA_SEL, SEL_UPL); 2020 2021 /* setup proc 0's pcb */ 2022 proc0.p_addr->u_pcb.pcb_flags = 0; 2023 proc0.p_addr->u_pcb.pcb_cr3 = (int)IdlePTD; 2024 proc0.p_addr->u_pcb.pcb_ext = 0; 2025 proc0.p_frame = &proc0_tf; 2026} 2027 2028#if defined(I586_CPU) && !defined(NO_F00F_HACK) 2029static void f00f_hack(void *unused); 2030SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL); 2031 2032static void 2033f00f_hack(void *unused) { 2034 struct gate_descriptor *new_idt; 2035#ifndef SMP 2036 struct region_descriptor r_idt; 2037#endif 2038 vm_offset_t tmp; 2039 2040 if (!has_f00f_bug) 2041 return; 2042 2043 GIANT_REQUIRED; 2044 2045 printf("Intel Pentium detected, installing workaround for F00F bug\n"); 2046 2047 r_idt.rd_limit = sizeof(idt0) - 1; 2048 2049 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2); 2050 if (tmp == 0) 2051 panic("kmem_alloc returned 0"); 2052 if (((unsigned int)tmp & (PAGE_SIZE-1)) != 0) 2053 panic("kmem_alloc returned non-page-aligned memory"); 2054 /* Put the first seven entries in the lower page */ 2055 new_idt = (struct gate_descriptor*)(tmp + PAGE_SIZE - (7*8)); 2056 bcopy(idt, new_idt, sizeof(idt0)); 2057 r_idt.rd_base = (int)new_idt; 2058 lidt(&r_idt); 2059 idt = new_idt; 2060 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE, 2061 VM_PROT_READ, FALSE) != KERN_SUCCESS) 2062 panic("vm_map_protect failed"); 2063 return; 2064} 2065#endif /* defined(I586_CPU) && !NO_F00F_HACK */ 2066 2067int 2068ptrace_set_pc(p, addr) 2069 struct proc *p; 2070 unsigned long addr; 2071{ 2072 p->p_frame->tf_eip = addr; 2073 return (0); 2074} 2075 2076int 2077ptrace_single_step(p) 2078 struct proc *p; 2079{ 2080 p->p_frame->tf_eflags |= PSL_T; 2081 return (0); 2082} 2083 2084int 2085fill_regs(p, regs) 2086 struct proc *p; 2087 struct reg *regs; 2088{ 2089 struct pcb *pcb; 2090 struct trapframe *tp; 2091 2092 tp = p->p_frame; 2093 regs->r_fs = tp->tf_fs; 2094 regs->r_es = tp->tf_es; 2095 regs->r_ds = tp->tf_ds; 2096 regs->r_edi = tp->tf_edi; 2097 regs->r_esi = tp->tf_esi; 2098 regs->r_ebp = tp->tf_ebp; 2099 regs->r_ebx = tp->tf_ebx; 2100 regs->r_edx = tp->tf_edx; 2101 regs->r_ecx = tp->tf_ecx; 2102 regs->r_eax = tp->tf_eax; 2103 regs->r_eip = tp->tf_eip; 2104 regs->r_cs = tp->tf_cs; 2105 regs->r_eflags = tp->tf_eflags; 2106 regs->r_esp = tp->tf_esp; 2107 regs->r_ss = tp->tf_ss; 2108 pcb = &p->p_addr->u_pcb; 2109 regs->r_gs = pcb->pcb_gs; 2110 return (0); 2111} 2112 2113int 2114set_regs(p, regs) 2115 struct proc *p; 2116 struct reg *regs; 2117{ 2118 struct pcb *pcb; 2119 struct trapframe *tp; 2120 2121 tp = p->p_frame; 2122 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) || 2123 !CS_SECURE(regs->r_cs)) 2124 return (EINVAL); 2125 tp->tf_fs = regs->r_fs; 2126 tp->tf_es = regs->r_es; 2127 tp->tf_ds = regs->r_ds; 2128 tp->tf_edi = regs->r_edi; 2129 tp->tf_esi = regs->r_esi; 2130 tp->tf_ebp = regs->r_ebp; 2131 tp->tf_ebx = regs->r_ebx; 2132 tp->tf_edx = regs->r_edx; 2133 tp->tf_ecx = regs->r_ecx; 2134 tp->tf_eax = regs->r_eax; 2135 tp->tf_eip = regs->r_eip; 2136 tp->tf_cs = regs->r_cs; 2137 tp->tf_eflags = regs->r_eflags; 2138 tp->tf_esp = regs->r_esp; 2139 tp->tf_ss = regs->r_ss; 2140 pcb = &p->p_addr->u_pcb; 2141 pcb->pcb_gs = regs->r_gs; 2142 return (0); 2143} 2144 2145#ifdef CPU_ENABLE_SSE 2146static void 2147fill_fpregs_xmm(sv_xmm, sv_87) 2148 struct savexmm *sv_xmm; 2149 struct save87 *sv_87; 2150{ 2151 register struct env87 *penv_87 = &sv_87->sv_env; 2152 register struct envxmm *penv_xmm = &sv_xmm->sv_env; 2153 int i; 2154 2155 /* FPU control/status */ 2156 penv_87->en_cw = penv_xmm->en_cw; 2157 penv_87->en_sw = penv_xmm->en_sw; 2158 penv_87->en_tw = penv_xmm->en_tw; 2159 penv_87->en_fip = penv_xmm->en_fip; 2160 penv_87->en_fcs = penv_xmm->en_fcs; 2161 penv_87->en_opcode = penv_xmm->en_opcode; 2162 penv_87->en_foo = penv_xmm->en_foo; 2163 penv_87->en_fos = penv_xmm->en_fos; 2164 2165 /* FPU registers */ 2166 for (i = 0; i < 8; ++i) 2167 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc; 2168 2169 sv_87->sv_ex_sw = sv_xmm->sv_ex_sw; 2170} 2171 2172static void 2173set_fpregs_xmm(sv_87, sv_xmm) 2174 struct save87 *sv_87; 2175 struct savexmm *sv_xmm; 2176{ 2177 register struct env87 *penv_87 = &sv_87->sv_env; 2178 register struct envxmm *penv_xmm = &sv_xmm->sv_env; 2179 int i; 2180 2181 /* FPU control/status */ 2182 penv_xmm->en_cw = penv_87->en_cw; 2183 penv_xmm->en_sw = penv_87->en_sw; 2184 penv_xmm->en_tw = penv_87->en_tw; 2185 penv_xmm->en_fip = penv_87->en_fip; 2186 penv_xmm->en_fcs = penv_87->en_fcs; 2187 penv_xmm->en_opcode = penv_87->en_opcode; 2188 penv_xmm->en_foo = penv_87->en_foo; 2189 penv_xmm->en_fos = penv_87->en_fos; 2190 2191 /* FPU registers */ 2192 for (i = 0; i < 8; ++i) 2193 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i]; 2194 2195 sv_xmm->sv_ex_sw = sv_87->sv_ex_sw; 2196} 2197#endif /* CPU_ENABLE_SSE */ 2198 2199int 2200fill_fpregs(p, fpregs) 2201 struct proc *p; 2202 struct fpreg *fpregs; 2203{ 2204#ifdef CPU_ENABLE_SSE 2205 if (cpu_fxsr) { 2206 fill_fpregs_xmm(&p->p_addr->u_pcb.pcb_save.sv_xmm, 2207 (struct save87 *)fpregs); 2208 return (0); 2209 } 2210#endif /* CPU_ENABLE_SSE */ 2211 bcopy(&p->p_addr->u_pcb.pcb_save.sv_87, fpregs, sizeof *fpregs); 2212 return (0); 2213} 2214 2215int 2216set_fpregs(p, fpregs) 2217 struct proc *p; 2218 struct fpreg *fpregs; 2219{ 2220#ifdef CPU_ENABLE_SSE 2221 if (cpu_fxsr) { 2222 set_fpregs_xmm((struct save87 *)fpregs, 2223 &p->p_addr->u_pcb.pcb_save.sv_xmm); 2224 return (0); 2225 } 2226#endif /* CPU_ENABLE_SSE */ 2227 bcopy(fpregs, &p->p_addr->u_pcb.pcb_save.sv_87, sizeof *fpregs); 2228 return (0); 2229} 2230 2231int 2232fill_dbregs(p, dbregs) 2233 struct proc *p; 2234 struct dbreg *dbregs; 2235{ 2236 struct pcb *pcb; 2237 2238 if (p == NULL) { 2239 dbregs->dr0 = rdr0(); 2240 dbregs->dr1 = rdr1(); 2241 dbregs->dr2 = rdr2(); 2242 dbregs->dr3 = rdr3(); 2243 dbregs->dr4 = rdr4(); 2244 dbregs->dr5 = rdr5(); 2245 dbregs->dr6 = rdr6(); 2246 dbregs->dr7 = rdr7(); 2247 } 2248 else { 2249 pcb = &p->p_addr->u_pcb; 2250 dbregs->dr0 = pcb->pcb_dr0; 2251 dbregs->dr1 = pcb->pcb_dr1; 2252 dbregs->dr2 = pcb->pcb_dr2; 2253 dbregs->dr3 = pcb->pcb_dr3; 2254 dbregs->dr4 = 0; 2255 dbregs->dr5 = 0; 2256 dbregs->dr6 = pcb->pcb_dr6; 2257 dbregs->dr7 = pcb->pcb_dr7; 2258 } 2259 return (0); 2260} 2261 2262int 2263set_dbregs(p, dbregs) 2264 struct proc *p; 2265 struct dbreg *dbregs; 2266{ 2267 struct pcb *pcb; 2268 int i; 2269 u_int32_t mask1, mask2; 2270 2271 if (p == NULL) { 2272 load_dr0(dbregs->dr0); 2273 load_dr1(dbregs->dr1); 2274 load_dr2(dbregs->dr2); 2275 load_dr3(dbregs->dr3); 2276 load_dr4(dbregs->dr4); 2277 load_dr5(dbregs->dr5); 2278 load_dr6(dbregs->dr6); 2279 load_dr7(dbregs->dr7); 2280 } 2281 else { 2282 /* 2283 * Don't let an illegal value for dr7 get set. Specifically, 2284 * check for undefined settings. Setting these bit patterns 2285 * result in undefined behaviour and can lead to an unexpected 2286 * TRCTRAP. 2287 */ 2288 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 8; 2289 i++, mask1 <<= 2, mask2 <<= 2) 2290 if ((dbregs->dr7 & mask1) == mask2) 2291 return (EINVAL); 2292 2293 pcb = &p->p_addr->u_pcb; 2294 2295 /* 2296 * Don't let a process set a breakpoint that is not within the 2297 * process's address space. If a process could do this, it 2298 * could halt the system by setting a breakpoint in the kernel 2299 * (if ddb was enabled). Thus, we need to check to make sure 2300 * that no breakpoints are being enabled for addresses outside 2301 * process's address space, unless, perhaps, we were called by 2302 * uid 0. 2303 * 2304 * XXX - what about when the watched area of the user's 2305 * address space is written into from within the kernel 2306 * ... wouldn't that still cause a breakpoint to be generated 2307 * from within kernel mode? 2308 */ 2309 2310 if (suser(p) != 0) { 2311 if (dbregs->dr7 & 0x3) { 2312 /* dr0 is enabled */ 2313 if (dbregs->dr0 >= VM_MAXUSER_ADDRESS) 2314 return (EINVAL); 2315 } 2316 2317 if (dbregs->dr7 & (0x3<<2)) { 2318 /* dr1 is enabled */ 2319 if (dbregs->dr1 >= VM_MAXUSER_ADDRESS) 2320 return (EINVAL); 2321 } 2322 2323 if (dbregs->dr7 & (0x3<<4)) { 2324 /* dr2 is enabled */ 2325 if (dbregs->dr2 >= VM_MAXUSER_ADDRESS) 2326 return (EINVAL); 2327 } 2328 2329 if (dbregs->dr7 & (0x3<<6)) { 2330 /* dr3 is enabled */ 2331 if (dbregs->dr3 >= VM_MAXUSER_ADDRESS) 2332 return (EINVAL); 2333 } 2334 } 2335 2336 pcb->pcb_dr0 = dbregs->dr0; 2337 pcb->pcb_dr1 = dbregs->dr1; 2338 pcb->pcb_dr2 = dbregs->dr2; 2339 pcb->pcb_dr3 = dbregs->dr3; 2340 pcb->pcb_dr6 = dbregs->dr6; 2341 pcb->pcb_dr7 = dbregs->dr7; 2342 2343 pcb->pcb_flags |= PCB_DBREGS; 2344 } 2345 2346 return (0); 2347} 2348 2349/* 2350 * Return > 0 if a hardware breakpoint has been hit, and the 2351 * breakpoint was in user space. Return 0, otherwise. 2352 */ 2353int 2354user_dbreg_trap(void) 2355{ 2356 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */ 2357 u_int32_t bp; /* breakpoint bits extracted from dr6 */ 2358 int nbp; /* number of breakpoints that triggered */ 2359 caddr_t addr[4]; /* breakpoint addresses */ 2360 int i; 2361 2362 dr7 = rdr7(); 2363 if ((dr7 & 0x000000ff) == 0) { 2364 /* 2365 * all GE and LE bits in the dr7 register are zero, 2366 * thus the trap couldn't have been caused by the 2367 * hardware debug registers 2368 */ 2369 return 0; 2370 } 2371 2372 nbp = 0; 2373 dr6 = rdr6(); 2374 bp = dr6 & 0x0000000f; 2375 2376 if (!bp) { 2377 /* 2378 * None of the breakpoint bits are set meaning this 2379 * trap was not caused by any of the debug registers 2380 */ 2381 return 0; 2382 } 2383 2384 /* 2385 * at least one of the breakpoints were hit, check to see 2386 * which ones and if any of them are user space addresses 2387 */ 2388 2389 if (bp & 0x01) { 2390 addr[nbp++] = (caddr_t)rdr0(); 2391 } 2392 if (bp & 0x02) { 2393 addr[nbp++] = (caddr_t)rdr1(); 2394 } 2395 if (bp & 0x04) { 2396 addr[nbp++] = (caddr_t)rdr2(); 2397 } 2398 if (bp & 0x08) { 2399 addr[nbp++] = (caddr_t)rdr3(); 2400 } 2401 2402 for (i=0; i<nbp; i++) { 2403 if (addr[i] < 2404 (caddr_t)VM_MAXUSER_ADDRESS) { 2405 /* 2406 * addr[i] is in user space 2407 */ 2408 return nbp; 2409 } 2410 } 2411 2412 /* 2413 * None of the breakpoints are in user space. 2414 */ 2415 return 0; 2416} 2417 2418 2419#ifndef DDB 2420void 2421Debugger(const char *msg) 2422{ 2423 printf("Debugger(\"%s\") called.\n", msg); 2424} 2425#endif /* no DDB */ 2426 2427#include <sys/disklabel.h> 2428 2429/* 2430 * Determine the size of the transfer, and make sure it is 2431 * within the boundaries of the partition. Adjust transfer 2432 * if needed, and signal errors or early completion. 2433 */ 2434int 2435bounds_check_with_label(struct bio *bp, struct disklabel *lp, int wlabel) 2436{ 2437 struct partition *p = lp->d_partitions + dkpart(bp->bio_dev); 2438 int labelsect = lp->d_partitions[0].p_offset; 2439 int maxsz = p->p_size, 2440 sz = (bp->bio_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT; 2441 2442 /* overwriting disk label ? */ 2443 /* XXX should also protect bootstrap in first 8K */ 2444 if (bp->bio_blkno + p->p_offset <= LABELSECTOR + labelsect && 2445#if LABELSECTOR != 0 2446 bp->bio_blkno + p->p_offset + sz > LABELSECTOR + labelsect && 2447#endif 2448 (bp->bio_cmd == BIO_WRITE) && wlabel == 0) { 2449 bp->bio_error = EROFS; 2450 goto bad; 2451 } 2452 2453#if defined(DOSBBSECTOR) && defined(notyet) 2454 /* overwriting master boot record? */ 2455 if (bp->bio_blkno + p->p_offset <= DOSBBSECTOR && 2456 (bp->bio_cmd == BIO_WRITE) && wlabel == 0) { 2457 bp->bio_error = EROFS; 2458 goto bad; 2459 } 2460#endif 2461 2462 /* beyond partition? */ 2463 if (bp->bio_blkno < 0 || bp->bio_blkno + sz > maxsz) { 2464 /* if exactly at end of disk, return an EOF */ 2465 if (bp->bio_blkno == maxsz) { 2466 bp->bio_resid = bp->bio_bcount; 2467 return(0); 2468 } 2469 /* or truncate if part of it fits */ 2470 sz = maxsz - bp->bio_blkno; 2471 if (sz <= 0) { 2472 bp->bio_error = EINVAL; 2473 goto bad; 2474 } 2475 bp->bio_bcount = sz << DEV_BSHIFT; 2476 } 2477 2478 bp->bio_pblkno = bp->bio_blkno + p->p_offset; 2479 return(1); 2480 2481bad: 2482 bp->bio_flags |= BIO_ERROR; 2483 return(-1); 2484} 2485 2486#ifdef DDB 2487 2488/* 2489 * Provide inb() and outb() as functions. They are normally only 2490 * available as macros calling inlined functions, thus cannot be 2491 * called inside DDB. 2492 * 2493 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 2494 */ 2495 2496#undef inb 2497#undef outb 2498 2499/* silence compiler warnings */ 2500u_char inb(u_int); 2501void outb(u_int, u_char); 2502 2503u_char 2504inb(u_int port) 2505{ 2506 u_char data; 2507 /* 2508 * We use %%dx and not %1 here because i/o is done at %dx and not at 2509 * %edx, while gcc generates inferior code (movw instead of movl) 2510 * if we tell it to load (u_short) port. 2511 */ 2512 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 2513 return (data); 2514} 2515 2516void 2517outb(u_int port, u_char data) 2518{ 2519 u_char al; 2520 /* 2521 * Use an unnecessary assignment to help gcc's register allocator. 2522 * This make a large difference for gcc-1.40 and a tiny difference 2523 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 2524 * best results. gcc-2.6.0 can't handle this. 2525 */ 2526 al = data; 2527 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 2528} 2529 2530#endif /* DDB */ 2531