machdep.c revision 82393
1/*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 38 * $FreeBSD: head/sys/amd64/amd64/machdep.c 82393 2001-08-27 05:11:53Z peter $ 39 */ 40 41#include "opt_atalk.h" 42#include "opt_compat.h" 43#include "opt_cpu.h" 44#include "opt_ddb.h" 45#include "opt_inet.h" 46#include "opt_ipx.h" 47#include "opt_isa.h" 48#include "opt_maxmem.h" 49#include "opt_msgbuf.h" 50#include "opt_npx.h" 51#include "opt_perfmon.h" 52#include "opt_upages.h" 53/* #include "opt_userconfig.h" */ 54 55#include <sys/param.h> 56#include <sys/systm.h> 57#include <sys/sysproto.h> 58#include <sys/signalvar.h> 59#include <sys/kernel.h> 60#include <sys/ktr.h> 61#include <sys/linker.h> 62#include <sys/lock.h> 63#include <sys/malloc.h> 64#include <sys/mutex.h> 65#include <sys/pcpu.h> 66#include <sys/proc.h> 67#include <sys/bio.h> 68#include <sys/buf.h> 69#include <sys/reboot.h> 70#include <sys/smp.h> 71#include <sys/callout.h> 72#include <sys/msgbuf.h> 73#include <sys/sysent.h> 74#include <sys/sysctl.h> 75#include <sys/vmmeter.h> 76#include <sys/bus.h> 77#include <sys/eventhandler.h> 78 79#include <vm/vm.h> 80#include <vm/vm_param.h> 81#include <sys/lock.h> 82#include <vm/vm_kern.h> 83#include <vm/vm_object.h> 84#include <vm/vm_page.h> 85#include <vm/vm_map.h> 86#include <vm/vm_pager.h> 87#include <vm/vm_extern.h> 88 89#include <sys/user.h> 90#include <sys/exec.h> 91#include <sys/cons.h> 92 93#include <ddb/ddb.h> 94 95#include <net/netisr.h> 96 97#include <machine/cpu.h> 98#include <machine/cputypes.h> 99#include <machine/reg.h> 100#include <machine/clock.h> 101#include <machine/specialreg.h> 102#include <machine/bootinfo.h> 103#include <machine/md_var.h> 104#include <machine/pc/bios.h> 105#include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */ 106#include <machine/globals.h> 107#ifdef PERFMON 108#include <machine/perfmon.h> 109#endif 110#ifdef SMP 111#include <machine/privatespace.h> 112#endif 113 114#include <i386/isa/icu.h> 115#include <i386/isa/intr_machdep.h> 116#include <isa/rtc.h> 117#include <machine/vm86.h> 118#include <sys/ptrace.h> 119#include <machine/sigframe.h> 120 121extern void init386 __P((int first)); 122extern void dblfault_handler __P((void)); 123 124extern void printcpuinfo(void); /* XXX header file */ 125extern void earlysetcpuclass(void); /* same header file */ 126extern void finishidentcpu(void); 127extern void panicifcpuunsupported(void); 128extern void initializecpu(void); 129 130#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 131#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 132 133static void cpu_startup __P((void *)); 134#ifdef CPU_ENABLE_SSE 135static void set_fpregs_xmm __P((struct save87 *, struct savexmm *)); 136static void fill_fpregs_xmm __P((struct savexmm *, struct save87 *)); 137#endif /* CPU_ENABLE_SSE */ 138SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) 139 140int _udatasel, _ucodesel; 141u_int atdevbase; 142 143#if defined(SWTCH_OPTIM_STATS) 144extern int swtch_optim_stats; 145SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, 146 CTLFLAG_RD, &swtch_optim_stats, 0, ""); 147SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, 148 CTLFLAG_RD, &tlb_flush_count, 0, ""); 149#endif 150 151#ifdef PC98 152static int ispc98 = 1; 153#else 154static int ispc98 = 0; 155#endif 156SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, ""); 157 158int physmem = 0; 159int cold = 1; 160 161#ifdef COMPAT_43 162static void osendsig __P((sig_t catcher, int sig, sigset_t *mask, u_long code)); 163#endif 164 165static int 166sysctl_hw_physmem(SYSCTL_HANDLER_ARGS) 167{ 168 int error = sysctl_handle_int(oidp, 0, ctob(physmem), req); 169 return (error); 170} 171 172SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD, 173 0, 0, sysctl_hw_physmem, "IU", ""); 174 175static int 176sysctl_hw_usermem(SYSCTL_HANDLER_ARGS) 177{ 178 int error = sysctl_handle_int(oidp, 0, 179 ctob(physmem - cnt.v_wire_count), req); 180 return (error); 181} 182 183SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD, 184 0, 0, sysctl_hw_usermem, "IU", ""); 185 186static int 187sysctl_hw_availpages(SYSCTL_HANDLER_ARGS) 188{ 189 int error = sysctl_handle_int(oidp, 0, 190 i386_btop(avail_end - avail_start), req); 191 return (error); 192} 193 194SYSCTL_PROC(_hw, OID_AUTO, availpages, CTLTYPE_INT|CTLFLAG_RD, 195 0, 0, sysctl_hw_availpages, "I", ""); 196 197int Maxmem = 0; 198long dumplo; 199 200vm_offset_t phys_avail[10]; 201 202/* must be 2 less so 0 0 can signal end of chunks */ 203#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2) 204 205struct kva_md_info kmi; 206 207static struct trapframe proc0_tf; 208#ifndef SMP 209static struct globaldata __globaldata; 210#endif 211 212struct mtx sched_lock; 213struct mtx Giant; 214 215static void 216cpu_startup(dummy) 217 void *dummy; 218{ 219 /* 220 * Good {morning,afternoon,evening,night}. 221 */ 222 earlysetcpuclass(); 223 startrtclock(); 224 printcpuinfo(); 225 panicifcpuunsupported(); 226#ifdef PERFMON 227 perfmon_init(); 228#endif 229 printf("real memory = %u (%uK bytes)\n", ptoa(Maxmem), 230 ptoa(Maxmem) / 1024); 231 /* 232 * Display any holes after the first chunk of extended memory. 233 */ 234 if (bootverbose) { 235 int indx; 236 237 printf("Physical memory chunk(s):\n"); 238 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 239 unsigned int size1; 240 241 size1 = phys_avail[indx + 1] - phys_avail[indx]; 242 printf("0x%08x - 0x%08x, %u bytes (%u pages)\n", 243 phys_avail[indx], phys_avail[indx + 1] - 1, size1, 244 size1 / PAGE_SIZE); 245 } 246 } 247 248 vm_ksubmap_init(&kmi); 249 250#if 0 251 /* 252 * Calculate callout wheel size 253 */ 254 for (callwheelsize = 1, callwheelbits = 0; 255 callwheelsize < ncallout; 256 callwheelsize <<= 1, ++callwheelbits) 257 ; 258 callwheelmask = callwheelsize - 1; 259 260 /* 261 * Allocate space for system data structures. 262 * The first available kernel virtual address is in "v". 263 * As pages of kernel virtual memory are allocated, "v" is incremented. 264 * As pages of memory are allocated and cleared, 265 * "firstaddr" is incremented. 266 * An index into the kernel page table corresponding to the 267 * virtual memory address maintained in "v" is kept in "mapaddr". 268 */ 269 270 /* 271 * Make two passes. The first pass calculates how much memory is 272 * needed and allocates it. The second pass assigns virtual 273 * addresses to the various data structures. 274 */ 275 firstaddr = 0; 276again: 277 v = (caddr_t)firstaddr; 278 279#define valloc(name, type, num) \ 280 (name) = (type *)v; v = (caddr_t)((name)+(num)) 281#define valloclim(name, type, num, lim) \ 282 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num))) 283 284 valloc(callout, struct callout, ncallout); 285 valloc(callwheel, struct callout_tailq, callwheelsize); 286 287 /* 288 * Discount the physical memory larger than the size of kernel_map 289 * to avoid eating up all of KVA space. 290 */ 291 if (kernel_map->first_free == NULL) { 292 printf("Warning: no free entries in kernel_map.\n"); 293 physmem_est = physmem; 294 } else { 295 physmem_est = min(physmem, btoc(kernel_map->max_offset - 296 kernel_map->min_offset)); 297 } 298 299 /* 300 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE. 301 * For the first 64MB of ram nominally allocate sufficient buffers to 302 * cover 1/4 of our ram. Beyond the first 64MB allocate additional 303 * buffers to cover 1/20 of our ram over 64MB. When auto-sizing 304 * the buffer cache we limit the eventual kva reservation to 305 * maxbcache bytes. 306 * 307 * factor represents the 1/4 x ram conversion. 308 */ 309 if (nbuf == 0) { 310 int factor = 4 * BKVASIZE / PAGE_SIZE; 311 312 nbuf = 50; 313 if (physmem_est > 1024) 314 nbuf += min((physmem_est - 1024) / factor, 315 16384 / factor); 316 if (physmem_est > 16384) 317 nbuf += (physmem_est - 16384) * 2 / (factor * 5); 318 319 if (maxbcache && nbuf > maxbcache / BKVASIZE) 320 nbuf = maxbcache / BKVASIZE; 321 } 322 323 /* 324 * Do not allow the buffer_map to be more then 1/2 the size of the 325 * kernel_map. 326 */ 327 if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) / 328 (BKVASIZE * 2)) { 329 nbuf = (kernel_map->max_offset - kernel_map->min_offset) / 330 (BKVASIZE * 2); 331 printf("Warning: nbufs capped at %d\n", nbuf); 332 } 333 334 nswbuf = max(min(nbuf/4, 256), 16); 335 336 valloc(swbuf, struct buf, nswbuf); 337 valloc(buf, struct buf, nbuf); 338 v = bufhashinit(v); 339 340 /* 341 * End of first pass, size has been calculated so allocate memory 342 */ 343 if (firstaddr == 0) { 344 size = (vm_size_t)(v - firstaddr); 345 firstaddr = (int)kmem_alloc(kernel_map, round_page(size)); 346 if (firstaddr == 0) 347 panic("startup: no room for tables"); 348 goto again; 349 } 350 351 /* 352 * End of second pass, addresses have been assigned 353 */ 354 if ((vm_size_t)(v - firstaddr) != size) 355 panic("startup: table size inconsistency"); 356 357 clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva, 358 (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size); 359 buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva, 360 (nbuf*BKVASIZE)); 361 buffer_map->system_map = 1; 362 pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva, 363 (nswbuf*MAXPHYS) + pager_map_size); 364 pager_map->system_map = 1; 365 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 366 (16*(ARG_MAX+(PAGE_SIZE*3)))); 367 368 /* 369 * XXX: Mbuf system machine-specific initializations should 370 * go here, if anywhere. 371 */ 372 373 /* 374 * Initialize callouts 375 */ 376 SLIST_INIT(&callfree); 377 for (i = 0; i < ncallout; i++) { 378 callout_init(&callout[i], 0); 379 callout[i].c_flags = CALLOUT_LOCAL_ALLOC; 380 SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); 381 } 382 383 for (i = 0; i < callwheelsize; i++) { 384 TAILQ_INIT(&callwheel[i]); 385 } 386 387 mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE); 388#endif 389 390#if defined(USERCONFIG) 391 userconfig(); 392 cninit(); /* the preferred console may have changed */ 393#endif 394 395 printf("avail memory = %u (%uK bytes)\n", ptoa(cnt.v_free_count), 396 ptoa(cnt.v_free_count) / 1024); 397 398 /* 399 * Set up buffers, so they can be used to read disk labels. 400 */ 401 bufinit(); 402 vm_pager_bufferinit(); 403 404 globaldata_register(GLOBALDATA); 405#ifndef SMP 406 /* For SMP, we delay the cpu_setregs() until after SMP startup. */ 407 cpu_setregs(); 408#endif 409} 410 411/* 412 * Send an interrupt to process. 413 * 414 * Stack is set up to allow sigcode stored 415 * at top to call routine, followed by kcall 416 * to sigreturn routine below. After sigreturn 417 * resets the signal mask, the stack, and the 418 * frame pointer, it returns to the user 419 * specified pc, psl. 420 */ 421#ifdef COMPAT_43 422static void 423osendsig(catcher, sig, mask, code) 424 sig_t catcher; 425 int sig; 426 sigset_t *mask; 427 u_long code; 428{ 429 struct osigframe sf; 430 struct osigframe *fp; 431 struct proc *p; 432 struct sigacts *psp; 433 struct trapframe *regs; 434 int oonstack; 435 436 p = curproc; 437 PROC_LOCK(p); 438 psp = p->p_sigacts; 439 regs = p->p_frame; 440 oonstack = sigonstack(regs->tf_esp); 441 442 /* Allocate and validate space for the signal handler context. */ 443 if ((p->p_flag & P_ALTSTACK) && !oonstack && 444 SIGISMEMBER(psp->ps_sigonstack, sig)) { 445 fp = (struct osigframe *)(p->p_sigstk.ss_sp + 446 p->p_sigstk.ss_size - sizeof(struct osigframe)); 447#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 448 p->p_sigstk.ss_flags |= SS_ONSTACK; 449#endif 450 } else 451 fp = (struct osigframe *)regs->tf_esp - 1; 452 PROC_UNLOCK(p); 453 454 /* 455 * grow_stack() will return 0 if *fp does not fit inside the stack 456 * and the stack can not be grown. 457 * useracc() will return FALSE if access is denied. 458 */ 459 if (grow_stack(p, (int)fp) == 0 || 460 !useracc((caddr_t)fp, sizeof(*fp), VM_PROT_WRITE)) { 461 /* 462 * Process has trashed its stack; give it an illegal 463 * instruction to halt it in its tracks. 464 */ 465 PROC_LOCK(p); 466 SIGACTION(p, SIGILL) = SIG_DFL; 467 SIGDELSET(p->p_sigignore, SIGILL); 468 SIGDELSET(p->p_sigcatch, SIGILL); 469 SIGDELSET(p->p_sigmask, SIGILL); 470 psignal(p, SIGILL); 471 PROC_UNLOCK(p); 472 return; 473 } 474 475 /* Translate the signal if appropriate. */ 476 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 477 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 478 479 /* Build the argument list for the signal handler. */ 480 sf.sf_signum = sig; 481 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc; 482 PROC_LOCK(p); 483 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 484 /* Signal handler installed with SA_SIGINFO. */ 485 sf.sf_arg2 = (register_t)&fp->sf_siginfo; 486 sf.sf_siginfo.si_signo = sig; 487 sf.sf_siginfo.si_code = code; 488 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher; 489 } else { 490 /* Old FreeBSD-style arguments. */ 491 sf.sf_arg2 = code; 492 sf.sf_addr = regs->tf_err; 493 sf.sf_ahu.sf_handler = catcher; 494 } 495 PROC_UNLOCK(p); 496 497 /* Save most if not all of trap frame. */ 498 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax; 499 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx; 500 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx; 501 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx; 502 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi; 503 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi; 504 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs; 505 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds; 506 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss; 507 sf.sf_siginfo.si_sc.sc_es = regs->tf_es; 508 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs; 509 sf.sf_siginfo.si_sc.sc_gs = rgs(); 510 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp; 511 512 /* Build the signal context to be used by osigreturn(). */ 513 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0; 514 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask); 515 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp; 516 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp; 517 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip; 518 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags; 519 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno; 520 sf.sf_siginfo.si_sc.sc_err = regs->tf_err; 521 522 /* 523 * If we're a vm86 process, we want to save the segment registers. 524 * We also change eflags to be our emulated eflags, not the actual 525 * eflags. 526 */ 527 if (regs->tf_eflags & PSL_VM) { 528 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */ 529 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 530 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 531 532 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs; 533 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs; 534 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es; 535 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds; 536 537 if (vm86->vm86_has_vme == 0) 538 sf.sf_siginfo.si_sc.sc_ps = 539 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 540 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 541 542 /* See sendsig() for comments. */ 543 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP); 544 } 545 546 /* Copy the sigframe out to the user's stack. */ 547 if (copyout(&sf, fp, sizeof(*fp)) != 0) { 548 /* 549 * Something is wrong with the stack pointer. 550 * ...Kill the process. 551 */ 552 PROC_LOCK(p); 553 sigexit(p, SIGILL); 554 /* NOTREACHED */ 555 } 556 557 regs->tf_esp = (int)fp; 558 regs->tf_eip = PS_STRINGS - szosigcode; 559 regs->tf_cs = _ucodesel; 560 regs->tf_ds = _udatasel; 561 regs->tf_es = _udatasel; 562 regs->tf_fs = _udatasel; 563 load_gs(_udatasel); 564 regs->tf_ss = _udatasel; 565} 566#endif 567 568void 569sendsig(catcher, sig, mask, code) 570 sig_t catcher; 571 int sig; 572 sigset_t *mask; 573 u_long code; 574{ 575 struct sigframe sf; 576 struct proc *p; 577 struct sigacts *psp; 578 struct trapframe *regs; 579 struct sigframe *sfp; 580 int oonstack; 581 582 p = curproc; 583 PROC_LOCK(p); 584 psp = p->p_sigacts; 585#ifdef COMPAT_43 586 if (SIGISMEMBER(psp->ps_osigset, sig)) { 587 PROC_UNLOCK(p); 588 osendsig(catcher, sig, mask, code); 589 return; 590 } 591#endif 592 regs = p->p_frame; 593 oonstack = sigonstack(regs->tf_esp); 594 595 /* Save user context. */ 596 bzero(&sf, sizeof(sf)); 597 sf.sf_uc.uc_sigmask = *mask; 598 sf.sf_uc.uc_stack = p->p_sigstk; 599 sf.sf_uc.uc_stack.ss_flags = (p->p_flag & P_ALTSTACK) 600 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 601 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 602 sf.sf_uc.uc_mcontext.mc_gs = rgs(); 603 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs)); 604 605 /* Allocate and validate space for the signal handler context. */ 606 if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack && 607 SIGISMEMBER(psp->ps_sigonstack, sig)) { 608 sfp = (struct sigframe *)(p->p_sigstk.ss_sp + 609 p->p_sigstk.ss_size - sizeof(struct sigframe)); 610#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 611 p->p_sigstk.ss_flags |= SS_ONSTACK; 612#endif 613 } else 614 sfp = (struct sigframe *)regs->tf_esp - 1; 615 PROC_UNLOCK(p); 616 617 /* 618 * grow_stack() will return 0 if *sfp does not fit inside the stack 619 * and the stack can not be grown. 620 * useracc() will return FALSE if access is denied. 621 */ 622 if (grow_stack(p, (int)sfp) == 0 || 623 !useracc((caddr_t)sfp, sizeof(*sfp), VM_PROT_WRITE)) { 624 /* 625 * Process has trashed its stack; give it an illegal 626 * instruction to halt it in its tracks. 627 */ 628#ifdef DEBUG 629 printf("process %d has trashed its stack\n", p->p_pid); 630#endif 631 PROC_LOCK(p); 632 SIGACTION(p, SIGILL) = SIG_DFL; 633 SIGDELSET(p->p_sigignore, SIGILL); 634 SIGDELSET(p->p_sigcatch, SIGILL); 635 SIGDELSET(p->p_sigmask, SIGILL); 636 psignal(p, SIGILL); 637 PROC_UNLOCK(p); 638 return; 639 } 640 641 /* Translate the signal if appropriate. */ 642 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 643 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 644 645 /* Build the argument list for the signal handler. */ 646 sf.sf_signum = sig; 647 sf.sf_ucontext = (register_t)&sfp->sf_uc; 648 PROC_LOCK(p); 649 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 650 /* Signal handler installed with SA_SIGINFO. */ 651 sf.sf_siginfo = (register_t)&sfp->sf_si; 652 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 653 654 /* Fill siginfo structure. */ 655 sf.sf_si.si_signo = sig; 656 sf.sf_si.si_code = code; 657 sf.sf_si.si_addr = (void *)regs->tf_err; 658 } else { 659 /* Old FreeBSD-style arguments. */ 660 sf.sf_siginfo = code; 661 sf.sf_addr = regs->tf_err; 662 sf.sf_ahu.sf_handler = catcher; 663 } 664 PROC_UNLOCK(p); 665 666 /* 667 * If we're a vm86 process, we want to save the segment registers. 668 * We also change eflags to be our emulated eflags, not the actual 669 * eflags. 670 */ 671 if (regs->tf_eflags & PSL_VM) { 672 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 673 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 674 675 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 676 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 677 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 678 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 679 680 if (vm86->vm86_has_vme == 0) 681 sf.sf_uc.uc_mcontext.mc_eflags = 682 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 683 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 684 685 /* 686 * We should never have PSL_T set when returning from vm86 687 * mode. It may be set here if we deliver a signal before 688 * getting to vm86 mode, so turn it off. 689 * 690 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 691 * syscalls made by the signal handler. This just avoids 692 * wasting time for our lazy fixup of such faults. PSL_NT 693 * does nothing in vm86 mode, but vm86 programs can set it 694 * almost legitimately in probes for old cpu types. 695 */ 696 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP); 697 } 698 699 /* Copy the sigframe out to the user's stack. */ 700 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { 701 /* 702 * Something is wrong with the stack pointer. 703 * ...Kill the process. 704 */ 705 PROC_LOCK(p); 706 sigexit(p, SIGILL); 707 /* NOTREACHED */ 708 } 709 710 regs->tf_esp = (int)sfp; 711 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 712 regs->tf_cs = _ucodesel; 713 regs->tf_ds = _udatasel; 714 regs->tf_es = _udatasel; 715 regs->tf_fs = _udatasel; 716 regs->tf_ss = _udatasel; 717} 718 719/* 720 * System call to cleanup state after a signal 721 * has been taken. Reset signal mask and 722 * stack state from context left by sendsig (above). 723 * Return to previous pc and psl as specified by 724 * context left by sendsig. Check carefully to 725 * make sure that the user has not modified the 726 * state to gain improper privileges. 727 */ 728#ifdef COMPAT_43 729int 730osigreturn(p, uap) 731 struct proc *p; 732 struct osigreturn_args /* { 733 struct osigcontext *sigcntxp; 734 } */ *uap; 735{ 736 struct trapframe *regs; 737 struct osigcontext *scp; 738 int eflags; 739 740 regs = p->p_frame; 741 scp = uap->sigcntxp; 742 if (!useracc((caddr_t)scp, sizeof(*scp), VM_PROT_READ)) 743 return (EFAULT); 744 eflags = scp->sc_ps; 745 if (eflags & PSL_VM) { 746 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 747 struct vm86_kernel *vm86; 748 749 /* 750 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 751 * set up the vm86 area, and we can't enter vm86 mode. 752 */ 753 if (p->p_addr->u_pcb.pcb_ext == 0) 754 return (EINVAL); 755 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 756 if (vm86->vm86_inited == 0) 757 return (EINVAL); 758 759 /* Go back to user mode if both flags are set. */ 760 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 761 trapsignal(p, SIGBUS, 0); 762 763 if (vm86->vm86_has_vme) { 764 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 765 (eflags & VME_USERCHANGE) | PSL_VM; 766 } else { 767 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 768 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | 769 (eflags & VM_USERCHANGE) | PSL_VM; 770 } 771 tf->tf_vm86_ds = scp->sc_ds; 772 tf->tf_vm86_es = scp->sc_es; 773 tf->tf_vm86_fs = scp->sc_fs; 774 tf->tf_vm86_gs = scp->sc_gs; 775 tf->tf_ds = _udatasel; 776 tf->tf_es = _udatasel; 777 tf->tf_fs = _udatasel; 778 } else { 779 /* 780 * Don't allow users to change privileged or reserved flags. 781 */ 782 /* 783 * XXX do allow users to change the privileged flag PSL_RF. 784 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 785 * should sometimes set it there too. tf_eflags is kept in 786 * the signal context during signal handling and there is no 787 * other place to remember it, so the PSL_RF bit may be 788 * corrupted by the signal handler without us knowing. 789 * Corruption of the PSL_RF bit at worst causes one more or 790 * one less debugger trap, so allowing it is fairly harmless. 791 */ 792 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 793 return (EINVAL); 794 } 795 796 /* 797 * Don't allow users to load a valid privileged %cs. Let the 798 * hardware check for invalid selectors, excess privilege in 799 * other selectors, invalid %eip's and invalid %esp's. 800 */ 801 if (!CS_SECURE(scp->sc_cs)) { 802 trapsignal(p, SIGBUS, T_PROTFLT); 803 return (EINVAL); 804 } 805 regs->tf_ds = scp->sc_ds; 806 regs->tf_es = scp->sc_es; 807 regs->tf_fs = scp->sc_fs; 808 } 809 810 /* Restore remaining registers. */ 811 regs->tf_eax = scp->sc_eax; 812 regs->tf_ebx = scp->sc_ebx; 813 regs->tf_ecx = scp->sc_ecx; 814 regs->tf_edx = scp->sc_edx; 815 regs->tf_esi = scp->sc_esi; 816 regs->tf_edi = scp->sc_edi; 817 regs->tf_cs = scp->sc_cs; 818 regs->tf_ss = scp->sc_ss; 819 regs->tf_isp = scp->sc_isp; 820 821 PROC_LOCK(p); 822#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 823 if (scp->sc_onstack & 1) 824 p->p_sigstk.ss_flags |= SS_ONSTACK; 825 else 826 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 827#endif 828 829 SIGSETOLD(p->p_sigmask, scp->sc_mask); 830 SIG_CANTMASK(p->p_sigmask); 831 PROC_UNLOCK(p); 832 regs->tf_ebp = scp->sc_fp; 833 regs->tf_esp = scp->sc_sp; 834 regs->tf_eip = scp->sc_pc; 835 regs->tf_eflags = eflags; 836 return (EJUSTRETURN); 837} 838#endif 839 840int 841sigreturn(p, uap) 842 struct proc *p; 843 struct sigreturn_args /* { 844 ucontext_t *sigcntxp; 845 } */ *uap; 846{ 847 struct trapframe *regs; 848 ucontext_t *ucp; 849 int cs, eflags; 850 851 ucp = uap->sigcntxp; 852#ifdef COMPAT_43 853 if (!useracc((caddr_t)ucp, sizeof(struct osigcontext), VM_PROT_READ)) 854 return (EFAULT); 855 if (((struct osigcontext *)ucp)->sc_trapno == 0x01d516) 856 return (osigreturn(p, (struct osigreturn_args *)uap)); 857 /* 858 * Since ucp is not an osigcontext but a ucontext_t, we have to 859 * check again if all of it is accessible. A ucontext_t is 860 * much larger, so instead of just checking for the pointer 861 * being valid for the size of an osigcontext, now check for 862 * it being valid for a whole, new-style ucontext_t. 863 */ 864#endif 865 if (!useracc((caddr_t)ucp, sizeof(*ucp), VM_PROT_READ)) 866 return (EFAULT); 867 868 regs = p->p_frame; 869 eflags = ucp->uc_mcontext.mc_eflags; 870 if (eflags & PSL_VM) { 871 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 872 struct vm86_kernel *vm86; 873 874 /* 875 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 876 * set up the vm86 area, and we can't enter vm86 mode. 877 */ 878 if (p->p_addr->u_pcb.pcb_ext == 0) 879 return (EINVAL); 880 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 881 if (vm86->vm86_inited == 0) 882 return (EINVAL); 883 884 /* Go back to user mode if both flags are set. */ 885 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 886 trapsignal(p, SIGBUS, 0); 887 888 if (vm86->vm86_has_vme) { 889 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 890 (eflags & VME_USERCHANGE) | PSL_VM; 891 } else { 892 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 893 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | 894 (eflags & VM_USERCHANGE) | PSL_VM; 895 } 896 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe)); 897 tf->tf_eflags = eflags; 898 tf->tf_vm86_ds = tf->tf_ds; 899 tf->tf_vm86_es = tf->tf_es; 900 tf->tf_vm86_fs = tf->tf_fs; 901 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs; 902 tf->tf_ds = _udatasel; 903 tf->tf_es = _udatasel; 904 tf->tf_fs = _udatasel; 905 } else { 906 /* 907 * Don't allow users to change privileged or reserved flags. 908 */ 909 /* 910 * XXX do allow users to change the privileged flag PSL_RF. 911 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 912 * should sometimes set it there too. tf_eflags is kept in 913 * the signal context during signal handling and there is no 914 * other place to remember it, so the PSL_RF bit may be 915 * corrupted by the signal handler without us knowing. 916 * Corruption of the PSL_RF bit at worst causes one more or 917 * one less debugger trap, so allowing it is fairly harmless. 918 */ 919 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 920 printf("sigreturn: eflags = 0x%x\n", eflags); 921 return (EINVAL); 922 } 923 924 /* 925 * Don't allow users to load a valid privileged %cs. Let the 926 * hardware check for invalid selectors, excess privilege in 927 * other selectors, invalid %eip's and invalid %esp's. 928 */ 929 cs = ucp->uc_mcontext.mc_cs; 930 if (!CS_SECURE(cs)) { 931 printf("sigreturn: cs = 0x%x\n", cs); 932 trapsignal(p, SIGBUS, T_PROTFLT); 933 return (EINVAL); 934 } 935 936 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs)); 937 } 938 939 PROC_LOCK(p); 940#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 941 if (ucp->uc_mcontext.mc_onstack & 1) 942 p->p_sigstk.ss_flags |= SS_ONSTACK; 943 else 944 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 945#endif 946 947 p->p_sigmask = ucp->uc_sigmask; 948 SIG_CANTMASK(p->p_sigmask); 949 PROC_UNLOCK(p); 950 return (EJUSTRETURN); 951} 952 953/* 954 * Machine dependent boot() routine 955 * 956 * I haven't seen anything to put here yet 957 * Possibly some stuff might be grafted back here from boot() 958 */ 959void 960cpu_boot(int howto) 961{ 962} 963 964/* 965 * Shutdown the CPU as much as possible 966 */ 967void 968cpu_halt(void) 969{ 970 for (;;) 971 __asm__ ("hlt"); 972} 973 974/* 975 * Hook to idle the CPU when possible. This currently only works in 976 * the !SMP case, as there is no clean way to ensure that a CPU will be 977 * woken when there is work available for it. 978 */ 979static int cpu_idle_hlt = 1; 980SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, 981 &cpu_idle_hlt, 0, "Idle loop HLT enable"); 982 983/* 984 * Note that we have to be careful here to avoid a race between checking 985 * procrunnable() and actually halting. If we don't do this, we may waste 986 * the time between calling hlt and the next interrupt even though there 987 * is a runnable process. 988 */ 989void 990cpu_idle(void) 991{ 992#ifndef SMP 993 if (cpu_idle_hlt) { 994 disable_intr(); 995 if (procrunnable()) 996 enable_intr(); 997 else { 998 enable_intr(); 999 __asm __volatile("hlt"); 1000 } 1001 } 1002#endif 1003} 1004 1005/* 1006 * Clear registers on exec 1007 */ 1008void 1009setregs(p, entry, stack, ps_strings) 1010 struct proc *p; 1011 u_long entry; 1012 u_long stack; 1013 u_long ps_strings; 1014{ 1015 struct trapframe *regs = p->p_frame; 1016 struct pcb *pcb = &p->p_addr->u_pcb; 1017 1018 if (pcb->pcb_ldt) 1019 user_ldt_free(pcb); 1020 1021 bzero((char *)regs, sizeof(struct trapframe)); 1022 regs->tf_eip = entry; 1023 regs->tf_esp = stack; 1024 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T); 1025 regs->tf_ss = _udatasel; 1026 regs->tf_ds = _udatasel; 1027 regs->tf_es = _udatasel; 1028 regs->tf_fs = _udatasel; 1029 regs->tf_cs = _ucodesel; 1030 1031 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */ 1032 regs->tf_ebx = ps_strings; 1033 1034 /* reset %gs as well */ 1035 if (pcb == PCPU_GET(curpcb)) 1036 load_gs(_udatasel); 1037 else 1038 pcb->pcb_gs = _udatasel; 1039 1040 /* 1041 * Reset the hardware debug registers if they were in use. 1042 * They won't have any meaning for the newly exec'd process. 1043 */ 1044 if (pcb->pcb_flags & PCB_DBREGS) { 1045 pcb->pcb_dr0 = 0; 1046 pcb->pcb_dr1 = 0; 1047 pcb->pcb_dr2 = 0; 1048 pcb->pcb_dr3 = 0; 1049 pcb->pcb_dr6 = 0; 1050 pcb->pcb_dr7 = 0; 1051 if (pcb == PCPU_GET(curpcb)) { 1052 /* 1053 * Clear the debug registers on the running 1054 * CPU, otherwise they will end up affecting 1055 * the next process we switch to. 1056 */ 1057 reset_dbregs(); 1058 } 1059 pcb->pcb_flags &= ~PCB_DBREGS; 1060 } 1061 1062 /* 1063 * Initialize the math emulator (if any) for the current process. 1064 * Actually, just clear the bit that says that the emulator has 1065 * been initialized. Initialization is delayed until the process 1066 * traps to the emulator (if it is done at all) mainly because 1067 * emulators don't provide an entry point for initialization. 1068 */ 1069 p->p_addr->u_pcb.pcb_flags &= ~FP_SOFTFP; 1070 1071 /* 1072 * Arrange to trap the next npx or `fwait' instruction (see npx.c 1073 * for why fwait must be trapped at least if there is an npx or an 1074 * emulator). This is mainly to handle the case where npx0 is not 1075 * configured, since the npx routines normally set up the trap 1076 * otherwise. It should be done only at boot time, but doing it 1077 * here allows modifying `npx_exists' for testing the emulator on 1078 * systems with an npx. 1079 */ 1080 load_cr0(rcr0() | CR0_MP | CR0_TS); 1081 1082#ifdef DEV_NPX 1083 /* Initialize the npx (if any) for the current process. */ 1084 npxinit(__INITIAL_NPXCW__); 1085#endif 1086 1087 /* 1088 * XXX - Linux emulator 1089 * Make sure sure edx is 0x0 on entry. Linux binaries depend 1090 * on it. 1091 */ 1092 p->p_retval[1] = 0; 1093} 1094 1095void 1096cpu_setregs(void) 1097{ 1098 unsigned int cr0; 1099 1100 cr0 = rcr0(); 1101 cr0 |= CR0_NE; /* Done by npxinit() */ 1102 cr0 |= CR0_MP | CR0_TS; /* Done at every execve() too. */ 1103#ifndef I386_CPU 1104 cr0 |= CR0_WP | CR0_AM; 1105#endif 1106 load_cr0(cr0); 1107 load_gs(_udatasel); 1108} 1109 1110static int 1111sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS) 1112{ 1113 int error; 1114 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 1115 req); 1116 if (!error && req->newptr) 1117 resettodr(); 1118 return (error); 1119} 1120 1121SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 1122 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 1123 1124SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, 1125 CTLFLAG_RW, &disable_rtc_set, 0, ""); 1126 1127SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo, 1128 CTLFLAG_RD, &bootinfo, bootinfo, ""); 1129 1130SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, 1131 CTLFLAG_RW, &wall_cmos_clock, 0, ""); 1132 1133/* 1134 * Initialize 386 and configure to run kernel 1135 */ 1136 1137/* 1138 * Initialize segments & interrupt table 1139 */ 1140 1141int _default_ldt; 1142union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */ 1143static struct gate_descriptor idt0[NIDT]; 1144struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 1145union descriptor ldt[NLDT]; /* local descriptor table */ 1146#ifdef SMP 1147/* table descriptors - used to load tables by microp */ 1148struct region_descriptor r_gdt, r_idt; 1149#endif 1150 1151int private_tss; /* flag indicating private tss */ 1152 1153#if defined(I586_CPU) && !defined(NO_F00F_HACK) 1154extern int has_f00f_bug; 1155#endif 1156 1157static struct i386tss dblfault_tss; 1158static char dblfault_stack[PAGE_SIZE]; 1159 1160extern struct user *proc0paddr; 1161 1162 1163/* software prototypes -- in more palatable form */ 1164struct soft_segment_descriptor gdt_segs[] = { 1165/* GNULL_SEL 0 Null Descriptor */ 1166{ 0x0, /* segment base address */ 1167 0x0, /* length */ 1168 0, /* segment type */ 1169 0, /* segment descriptor priority level */ 1170 0, /* segment descriptor present */ 1171 0, 0, 1172 0, /* default 32 vs 16 bit size */ 1173 0 /* limit granularity (byte/page units)*/ }, 1174/* GCODE_SEL 1 Code Descriptor for kernel */ 1175{ 0x0, /* segment base address */ 1176 0xfffff, /* length - all address space */ 1177 SDT_MEMERA, /* segment type */ 1178 0, /* segment descriptor priority level */ 1179 1, /* segment descriptor present */ 1180 0, 0, 1181 1, /* default 32 vs 16 bit size */ 1182 1 /* limit granularity (byte/page units)*/ }, 1183/* GDATA_SEL 2 Data Descriptor for kernel */ 1184{ 0x0, /* segment base address */ 1185 0xfffff, /* length - all address space */ 1186 SDT_MEMRWA, /* segment type */ 1187 0, /* segment descriptor priority level */ 1188 1, /* segment descriptor present */ 1189 0, 0, 1190 1, /* default 32 vs 16 bit size */ 1191 1 /* limit granularity (byte/page units)*/ }, 1192/* GPRIV_SEL 3 SMP Per-Processor Private Data Descriptor */ 1193{ 0x0, /* segment base address */ 1194 0xfffff, /* length - all address space */ 1195 SDT_MEMRWA, /* segment type */ 1196 0, /* segment descriptor priority level */ 1197 1, /* segment descriptor present */ 1198 0, 0, 1199 1, /* default 32 vs 16 bit size */ 1200 1 /* limit granularity (byte/page units)*/ }, 1201/* GPROC0_SEL 4 Proc 0 Tss Descriptor */ 1202{ 1203 0x0, /* segment base address */ 1204 sizeof(struct i386tss)-1,/* length - all address space */ 1205 SDT_SYS386TSS, /* segment type */ 1206 0, /* segment descriptor priority level */ 1207 1, /* segment descriptor present */ 1208 0, 0, 1209 0, /* unused - default 32 vs 16 bit size */ 1210 0 /* limit granularity (byte/page units)*/ }, 1211/* GLDT_SEL 5 LDT Descriptor */ 1212{ (int) ldt, /* segment base address */ 1213 sizeof(ldt)-1, /* length - all address space */ 1214 SDT_SYSLDT, /* segment type */ 1215 SEL_UPL, /* segment descriptor priority level */ 1216 1, /* segment descriptor present */ 1217 0, 0, 1218 0, /* unused - default 32 vs 16 bit size */ 1219 0 /* limit granularity (byte/page units)*/ }, 1220/* GUSERLDT_SEL 6 User LDT Descriptor per process */ 1221{ (int) ldt, /* segment base address */ 1222 (512 * sizeof(union descriptor)-1), /* length */ 1223 SDT_SYSLDT, /* segment type */ 1224 0, /* segment descriptor priority level */ 1225 1, /* segment descriptor present */ 1226 0, 0, 1227 0, /* unused - default 32 vs 16 bit size */ 1228 0 /* limit granularity (byte/page units)*/ }, 1229/* GTGATE_SEL 7 Null Descriptor - Placeholder */ 1230{ 0x0, /* segment base address */ 1231 0x0, /* length - all address space */ 1232 0, /* segment type */ 1233 0, /* segment descriptor priority level */ 1234 0, /* segment descriptor present */ 1235 0, 0, 1236 0, /* default 32 vs 16 bit size */ 1237 0 /* limit granularity (byte/page units)*/ }, 1238/* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */ 1239{ 0x400, /* segment base address */ 1240 0xfffff, /* length */ 1241 SDT_MEMRWA, /* segment type */ 1242 0, /* segment descriptor priority level */ 1243 1, /* segment descriptor present */ 1244 0, 0, 1245 1, /* default 32 vs 16 bit size */ 1246 1 /* limit granularity (byte/page units)*/ }, 1247/* GPANIC_SEL 9 Panic Tss Descriptor */ 1248{ (int) &dblfault_tss, /* segment base address */ 1249 sizeof(struct i386tss)-1,/* length - all address space */ 1250 SDT_SYS386TSS, /* segment type */ 1251 0, /* segment descriptor priority level */ 1252 1, /* segment descriptor present */ 1253 0, 0, 1254 0, /* unused - default 32 vs 16 bit size */ 1255 0 /* limit granularity (byte/page units)*/ }, 1256/* GBIOSCODE32_SEL 10 BIOS 32-bit interface (32bit Code) */ 1257{ 0, /* segment base address (overwritten) */ 1258 0xfffff, /* length */ 1259 SDT_MEMERA, /* segment type */ 1260 0, /* segment descriptor priority level */ 1261 1, /* segment descriptor present */ 1262 0, 0, 1263 0, /* default 32 vs 16 bit size */ 1264 1 /* limit granularity (byte/page units)*/ }, 1265/* GBIOSCODE16_SEL 11 BIOS 32-bit interface (16bit Code) */ 1266{ 0, /* segment base address (overwritten) */ 1267 0xfffff, /* length */ 1268 SDT_MEMERA, /* segment type */ 1269 0, /* segment descriptor priority level */ 1270 1, /* segment descriptor present */ 1271 0, 0, 1272 0, /* default 32 vs 16 bit size */ 1273 1 /* limit granularity (byte/page units)*/ }, 1274/* GBIOSDATA_SEL 12 BIOS 32-bit interface (Data) */ 1275{ 0, /* segment base address (overwritten) */ 1276 0xfffff, /* length */ 1277 SDT_MEMRWA, /* segment type */ 1278 0, /* segment descriptor priority level */ 1279 1, /* segment descriptor present */ 1280 0, 0, 1281 1, /* default 32 vs 16 bit size */ 1282 1 /* limit granularity (byte/page units)*/ }, 1283/* GBIOSUTIL_SEL 13 BIOS 16-bit interface (Utility) */ 1284{ 0, /* segment base address (overwritten) */ 1285 0xfffff, /* length */ 1286 SDT_MEMRWA, /* segment type */ 1287 0, /* segment descriptor priority level */ 1288 1, /* segment descriptor present */ 1289 0, 0, 1290 0, /* default 32 vs 16 bit size */ 1291 1 /* limit granularity (byte/page units)*/ }, 1292/* GBIOSARGS_SEL 14 BIOS 16-bit interface (Arguments) */ 1293{ 0, /* segment base address (overwritten) */ 1294 0xfffff, /* length */ 1295 SDT_MEMRWA, /* segment type */ 1296 0, /* segment descriptor priority level */ 1297 1, /* segment descriptor present */ 1298 0, 0, 1299 0, /* default 32 vs 16 bit size */ 1300 1 /* limit granularity (byte/page units)*/ }, 1301}; 1302 1303static struct soft_segment_descriptor ldt_segs[] = { 1304 /* Null Descriptor - overwritten by call gate */ 1305{ 0x0, /* segment base address */ 1306 0x0, /* length - all address space */ 1307 0, /* segment type */ 1308 0, /* segment descriptor priority level */ 1309 0, /* segment descriptor present */ 1310 0, 0, 1311 0, /* default 32 vs 16 bit size */ 1312 0 /* limit granularity (byte/page units)*/ }, 1313 /* Null Descriptor - overwritten by call gate */ 1314{ 0x0, /* segment base address */ 1315 0x0, /* length - all address space */ 1316 0, /* segment type */ 1317 0, /* segment descriptor priority level */ 1318 0, /* segment descriptor present */ 1319 0, 0, 1320 0, /* default 32 vs 16 bit size */ 1321 0 /* limit granularity (byte/page units)*/ }, 1322 /* Null Descriptor - overwritten by call gate */ 1323{ 0x0, /* segment base address */ 1324 0x0, /* length - all address space */ 1325 0, /* segment type */ 1326 0, /* segment descriptor priority level */ 1327 0, /* segment descriptor present */ 1328 0, 0, 1329 0, /* default 32 vs 16 bit size */ 1330 0 /* limit granularity (byte/page units)*/ }, 1331 /* Code Descriptor for user */ 1332{ 0x0, /* segment base address */ 1333 0xfffff, /* length - all address space */ 1334 SDT_MEMERA, /* segment type */ 1335 SEL_UPL, /* segment descriptor priority level */ 1336 1, /* segment descriptor present */ 1337 0, 0, 1338 1, /* default 32 vs 16 bit size */ 1339 1 /* limit granularity (byte/page units)*/ }, 1340 /* Null Descriptor - overwritten by call gate */ 1341{ 0x0, /* segment base address */ 1342 0x0, /* length - all address space */ 1343 0, /* segment type */ 1344 0, /* segment descriptor priority level */ 1345 0, /* segment descriptor present */ 1346 0, 0, 1347 0, /* default 32 vs 16 bit size */ 1348 0 /* limit granularity (byte/page units)*/ }, 1349 /* Data Descriptor for user */ 1350{ 0x0, /* segment base address */ 1351 0xfffff, /* length - all address space */ 1352 SDT_MEMRWA, /* segment type */ 1353 SEL_UPL, /* segment descriptor priority level */ 1354 1, /* segment descriptor present */ 1355 0, 0, 1356 1, /* default 32 vs 16 bit size */ 1357 1 /* limit granularity (byte/page units)*/ }, 1358}; 1359 1360void 1361setidt(idx, func, typ, dpl, selec) 1362 int idx; 1363 inthand_t *func; 1364 int typ; 1365 int dpl; 1366 int selec; 1367{ 1368 struct gate_descriptor *ip; 1369 1370 ip = idt + idx; 1371 ip->gd_looffset = (int)func; 1372 ip->gd_selector = selec; 1373 ip->gd_stkcpy = 0; 1374 ip->gd_xx = 0; 1375 ip->gd_type = typ; 1376 ip->gd_dpl = dpl; 1377 ip->gd_p = 1; 1378 ip->gd_hioffset = ((int)func)>>16 ; 1379} 1380 1381#define IDTVEC(name) __CONCAT(X,name) 1382 1383extern inthand_t 1384 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 1385 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 1386 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 1387 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 1388 IDTVEC(xmm), IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall); 1389 1390void 1391sdtossd(sd, ssd) 1392 struct segment_descriptor *sd; 1393 struct soft_segment_descriptor *ssd; 1394{ 1395 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 1396 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 1397 ssd->ssd_type = sd->sd_type; 1398 ssd->ssd_dpl = sd->sd_dpl; 1399 ssd->ssd_p = sd->sd_p; 1400 ssd->ssd_def32 = sd->sd_def32; 1401 ssd->ssd_gran = sd->sd_gran; 1402} 1403 1404#define PHYSMAP_SIZE (2 * 8) 1405 1406/* 1407 * Populate the (physmap) array with base/bound pairs describing the 1408 * available physical memory in the system, then test this memory and 1409 * build the phys_avail array describing the actually-available memory. 1410 * 1411 * If we cannot accurately determine the physical memory map, then use 1412 * value from the 0xE801 call, and failing that, the RTC. 1413 * 1414 * Total memory size may be set by the kernel environment variable 1415 * hw.physmem or the compile-time define MAXMEM. 1416 */ 1417static void 1418getmemsize(int first) 1419{ 1420 int i, physmap_idx, pa_indx; 1421 u_int basemem, extmem; 1422 struct vm86frame vmf; 1423 struct vm86context vmc; 1424 vm_offset_t pa, physmap[PHYSMAP_SIZE]; 1425 pt_entry_t pte; 1426 const char *cp; 1427 struct bios_smap *smap; 1428 1429 bzero(&vmf, sizeof(struct vm86frame)); 1430 bzero(physmap, sizeof(physmap)); 1431 1432 /* 1433 * Perform "base memory" related probes & setup 1434 */ 1435 vm86_intcall(0x12, &vmf); 1436 basemem = vmf.vmf_ax; 1437 if (basemem > 640) { 1438 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", 1439 basemem); 1440 basemem = 640; 1441 } 1442 1443 /* 1444 * XXX if biosbasemem is now < 640, there is a `hole' 1445 * between the end of base memory and the start of 1446 * ISA memory. The hole may be empty or it may 1447 * contain BIOS code or data. Map it read/write so 1448 * that the BIOS can write to it. (Memory from 0 to 1449 * the physical end of the kernel is mapped read-only 1450 * to begin with and then parts of it are remapped. 1451 * The parts that aren't remapped form holes that 1452 * remain read-only and are unused by the kernel. 1453 * The base memory area is below the physical end of 1454 * the kernel and right now forms a read-only hole. 1455 * The part of it from PAGE_SIZE to 1456 * (trunc_page(biosbasemem * 1024) - 1) will be 1457 * remapped and used by the kernel later.) 1458 * 1459 * This code is similar to the code used in 1460 * pmap_mapdev, but since no memory needs to be 1461 * allocated we simply change the mapping. 1462 */ 1463 for (pa = trunc_page(basemem * 1024); 1464 pa < ISA_HOLE_START; pa += PAGE_SIZE) { 1465 pte = (pt_entry_t)vtopte(pa + KERNBASE); 1466 *pte = pa | PG_RW | PG_V; 1467 } 1468 1469 /* 1470 * if basemem != 640, map pages r/w into vm86 page table so 1471 * that the bios can scribble on it. 1472 */ 1473 pte = (pt_entry_t)vm86paddr; 1474 for (i = basemem / 4; i < 160; i++) 1475 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; 1476 1477 /* 1478 * map page 1 R/W into the kernel page table so we can use it 1479 * as a buffer. The kernel will unmap this page later. 1480 */ 1481 pte = (pt_entry_t)vtopte(KERNBASE + (1 << PAGE_SHIFT)); 1482 *pte = (1 << PAGE_SHIFT) | PG_RW | PG_V; 1483 1484 /* 1485 * get memory map with INT 15:E820 1486 */ 1487 vmc.npages = 0; 1488 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT)); 1489 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di); 1490 1491 physmap_idx = 0; 1492 vmf.vmf_ebx = 0; 1493 do { 1494 vmf.vmf_eax = 0xE820; 1495 vmf.vmf_edx = SMAP_SIG; 1496 vmf.vmf_ecx = sizeof(struct bios_smap); 1497 i = vm86_datacall(0x15, &vmf, &vmc); 1498 if (i || vmf.vmf_eax != SMAP_SIG) 1499 break; 1500 if (boothowto & RB_VERBOSE) 1501 printf("SMAP type=%02x base=%08x %08x len=%08x %08x\n", 1502 smap->type, 1503 *(u_int32_t *)((char *)&smap->base + 4), 1504 (u_int32_t)smap->base, 1505 *(u_int32_t *)((char *)&smap->length + 4), 1506 (u_int32_t)smap->length); 1507 1508 if (smap->type != 0x01) 1509 goto next_run; 1510 1511 if (smap->length == 0) 1512 goto next_run; 1513 1514 if (smap->base >= 0xffffffff) { 1515 printf("%uK of memory above 4GB ignored\n", 1516 (u_int)(smap->length / 1024)); 1517 goto next_run; 1518 } 1519 1520 for (i = 0; i <= physmap_idx; i += 2) { 1521 if (smap->base < physmap[i + 1]) { 1522 if (boothowto & RB_VERBOSE) 1523 printf( 1524 "Overlapping or non-montonic memory region, ignoring second region\n"); 1525 goto next_run; 1526 } 1527 } 1528 1529 if (smap->base == physmap[physmap_idx + 1]) { 1530 physmap[physmap_idx + 1] += smap->length; 1531 goto next_run; 1532 } 1533 1534 physmap_idx += 2; 1535 if (physmap_idx == PHYSMAP_SIZE) { 1536 printf( 1537 "Too many segments in the physical address map, giving up\n"); 1538 break; 1539 } 1540 physmap[physmap_idx] = smap->base; 1541 physmap[physmap_idx + 1] = smap->base + smap->length; 1542next_run: 1543 } while (vmf.vmf_ebx != 0); 1544 1545 if (physmap[1] != 0) 1546 goto physmap_done; 1547 1548 /* 1549 * If we failed above, try memory map with INT 15:E801 1550 */ 1551 vmf.vmf_ax = 0xE801; 1552 if (vm86_intcall(0x15, &vmf) == 0) { 1553 extmem = vmf.vmf_cx + vmf.vmf_dx * 64; 1554 } else { 1555#if 0 1556 vmf.vmf_ah = 0x88; 1557 vm86_intcall(0x15, &vmf); 1558 extmem = vmf.vmf_ax; 1559#else 1560 /* 1561 * Prefer the RTC value for extended memory. 1562 */ 1563 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8); 1564#endif 1565 } 1566 1567 /* 1568 * Special hack for chipsets that still remap the 384k hole when 1569 * there's 16MB of memory - this really confuses people that 1570 * are trying to use bus mastering ISA controllers with the 1571 * "16MB limit"; they only have 16MB, but the remapping puts 1572 * them beyond the limit. 1573 * 1574 * If extended memory is between 15-16MB (16-17MB phys address range), 1575 * chop it to 15MB. 1576 */ 1577 if ((extmem > 15 * 1024) && (extmem < 16 * 1024)) 1578 extmem = 15 * 1024; 1579 1580 physmap[0] = 0; 1581 physmap[1] = basemem * 1024; 1582 physmap_idx = 2; 1583 physmap[physmap_idx] = 0x100000; 1584 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024; 1585 1586physmap_done: 1587 /* 1588 * Now, physmap contains a map of physical memory. 1589 */ 1590 1591#ifdef SMP 1592 /* make hole for AP bootstrap code */ 1593 physmap[1] = mp_bootaddress(physmap[1] / 1024); 1594 1595 /* look for the MP hardware - needed for apic addresses */ 1596 i386_mp_probe(); 1597#endif 1598 1599 /* 1600 * Maxmem isn't the "maximum memory", it's one larger than the 1601 * highest page of the physical address space. It should be 1602 * called something like "Maxphyspage". We may adjust this 1603 * based on ``hw.physmem'' and the results of the memory test. 1604 */ 1605 Maxmem = atop(physmap[physmap_idx + 1]); 1606 1607#ifdef MAXMEM 1608 Maxmem = MAXMEM / 4; 1609#endif 1610 1611 /* 1612 * hw.physmem is a size in bytes; we also allow k, m, and g suffixes 1613 * for the appropriate modifiers. This overrides MAXMEM. 1614 */ 1615 if ((cp = getenv("hw.physmem")) != NULL) { 1616 u_int64_t AllowMem, sanity; 1617 char *ep; 1618 1619 sanity = AllowMem = strtouq(cp, &ep, 0); 1620 if ((ep != cp) && (*ep != 0)) { 1621 switch(*ep) { 1622 case 'g': 1623 case 'G': 1624 AllowMem <<= 10; 1625 case 'm': 1626 case 'M': 1627 AllowMem <<= 10; 1628 case 'k': 1629 case 'K': 1630 AllowMem <<= 10; 1631 break; 1632 default: 1633 AllowMem = sanity = 0; 1634 } 1635 if (AllowMem < sanity) 1636 AllowMem = 0; 1637 } 1638 if (AllowMem == 0) 1639 printf("Ignoring invalid memory size of '%s'\n", cp); 1640 else 1641 Maxmem = atop(AllowMem); 1642 } 1643 1644 if (atop(physmap[physmap_idx + 1]) != Maxmem && 1645 (boothowto & RB_VERBOSE)) 1646 printf("Physical memory use set to %uK\n", Maxmem * 4); 1647 1648 /* 1649 * If Maxmem has been increased beyond what the system has detected, 1650 * extend the last memory segment to the new limit. 1651 */ 1652 if (atop(physmap[physmap_idx + 1]) < Maxmem) 1653 physmap[physmap_idx + 1] = ptoa(Maxmem); 1654 1655 /* call pmap initialization to make new kernel address space */ 1656 pmap_bootstrap(first, 0); 1657 1658 /* 1659 * Size up each available chunk of physical memory. 1660 */ 1661 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 1662 pa_indx = 0; 1663 phys_avail[pa_indx++] = physmap[0]; 1664 phys_avail[pa_indx] = physmap[0]; 1665#if 0 1666 pte = (pt_entry_t)vtopte(KERNBASE); 1667#else 1668 pte = (pt_entry_t)CMAP1; 1669#endif 1670 1671 /* 1672 * physmap is in bytes, so when converting to page boundaries, 1673 * round up the start address and round down the end address. 1674 */ 1675 for (i = 0; i <= physmap_idx; i += 2) { 1676 vm_offset_t end; 1677 1678 end = ptoa(Maxmem); 1679 if (physmap[i + 1] < end) 1680 end = trunc_page(physmap[i + 1]); 1681 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 1682 int tmp, page_bad; 1683#if 0 1684 int *ptr = 0; 1685#else 1686 int *ptr = (int *)CADDR1; 1687#endif 1688 1689 /* 1690 * block out kernel memory as not available. 1691 */ 1692 if (pa >= 0x100000 && pa < first) 1693 continue; 1694 1695 page_bad = FALSE; 1696 1697 /* 1698 * map page into kernel: valid, read/write,non-cacheable 1699 */ 1700 *pte = pa | PG_V | PG_RW | PG_N; 1701 invltlb(); 1702 1703 tmp = *(int *)ptr; 1704 /* 1705 * Test for alternating 1's and 0's 1706 */ 1707 *(volatile int *)ptr = 0xaaaaaaaa; 1708 if (*(volatile int *)ptr != 0xaaaaaaaa) { 1709 page_bad = TRUE; 1710 } 1711 /* 1712 * Test for alternating 0's and 1's 1713 */ 1714 *(volatile int *)ptr = 0x55555555; 1715 if (*(volatile int *)ptr != 0x55555555) { 1716 page_bad = TRUE; 1717 } 1718 /* 1719 * Test for all 1's 1720 */ 1721 *(volatile int *)ptr = 0xffffffff; 1722 if (*(volatile int *)ptr != 0xffffffff) { 1723 page_bad = TRUE; 1724 } 1725 /* 1726 * Test for all 0's 1727 */ 1728 *(volatile int *)ptr = 0x0; 1729 if (*(volatile int *)ptr != 0x0) { 1730 page_bad = TRUE; 1731 } 1732 /* 1733 * Restore original value. 1734 */ 1735 *(int *)ptr = tmp; 1736 1737 /* 1738 * Adjust array of valid/good pages. 1739 */ 1740 if (page_bad == TRUE) { 1741 continue; 1742 } 1743 /* 1744 * If this good page is a continuation of the 1745 * previous set of good pages, then just increase 1746 * the end pointer. Otherwise start a new chunk. 1747 * Note that "end" points one higher than end, 1748 * making the range >= start and < end. 1749 * If we're also doing a speculative memory 1750 * test and we at or past the end, bump up Maxmem 1751 * so that we keep going. The first bad page 1752 * will terminate the loop. 1753 */ 1754 if (phys_avail[pa_indx] == pa) { 1755 phys_avail[pa_indx] += PAGE_SIZE; 1756 } else { 1757 pa_indx++; 1758 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1759 printf( 1760 "Too many holes in the physical address space, giving up\n"); 1761 pa_indx--; 1762 break; 1763 } 1764 phys_avail[pa_indx++] = pa; /* start */ 1765 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1766 } 1767 physmem++; 1768 } 1769 } 1770 *pte = 0; 1771 invltlb(); 1772 1773 /* 1774 * XXX 1775 * The last chunk must contain at least one page plus the message 1776 * buffer to avoid complicating other code (message buffer address 1777 * calculation, etc.). 1778 */ 1779 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1780 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { 1781 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1782 phys_avail[pa_indx--] = 0; 1783 phys_avail[pa_indx--] = 0; 1784 } 1785 1786 Maxmem = atop(phys_avail[pa_indx]); 1787 1788 /* Trim off space for the message buffer. */ 1789 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); 1790 1791 avail_end = phys_avail[pa_indx]; 1792} 1793 1794void 1795init386(first) 1796 int first; 1797{ 1798 struct gate_descriptor *gdp; 1799 int gsel_tss, metadata_missing, off, x; 1800#ifndef SMP 1801 /* table descriptors - used to load tables by microp */ 1802 struct region_descriptor r_gdt, r_idt; 1803#endif 1804 1805 proc0.p_addr = proc0paddr; 1806 1807 atdevbase = ISA_HOLE_START + KERNBASE; 1808 1809 metadata_missing = 0; 1810 if (bootinfo.bi_modulep) { 1811 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE; 1812 preload_bootstrap_relocate(KERNBASE); 1813 } else { 1814 metadata_missing = 1; 1815 } 1816 if (envmode == 1) 1817 kern_envp = static_env; 1818 else if (bootinfo.bi_envp) 1819 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE; 1820 1821 /* Init basic tunables, hz etc */ 1822 init_param(); 1823 1824 /* 1825 * make gdt memory segments, the code segment goes up to end of the 1826 * page with etext in it, the data segment goes to the end of 1827 * the address space 1828 */ 1829 /* 1830 * XXX text protection is temporarily (?) disabled. The limit was 1831 * i386_btop(round_page(etext)) - 1. 1832 */ 1833 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1); 1834 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1); 1835#ifdef SMP 1836 gdt_segs[GPRIV_SEL].ssd_limit = 1837 atop(sizeof(struct privatespace) - 1); 1838 gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[0]; 1839 gdt_segs[GPROC0_SEL].ssd_base = 1840 (int) &SMP_prvspace[0].globaldata.gd_common_tss; 1841 SMP_prvspace[0].globaldata.gd_prvspace = &SMP_prvspace[0].globaldata; 1842#else 1843 gdt_segs[GPRIV_SEL].ssd_limit = 1844 atop(sizeof(struct globaldata) - 1); 1845 gdt_segs[GPRIV_SEL].ssd_base = (int) &__globaldata; 1846 gdt_segs[GPROC0_SEL].ssd_base = 1847 (int) &__globaldata.gd_common_tss; 1848 __globaldata.gd_prvspace = &__globaldata; 1849#endif 1850 1851 for (x = 0; x < NGDT; x++) { 1852#ifdef BDE_DEBUGGER 1853 /* avoid overwriting db entries with APM ones */ 1854 if (x >= GAPMCODE32_SEL && x <= GAPMDATA_SEL) 1855 continue; 1856#endif 1857 ssdtosd(&gdt_segs[x], &gdt[x].sd); 1858 } 1859 1860 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1861 r_gdt.rd_base = (int) gdt; 1862 lgdt(&r_gdt); 1863 1864 /* setup curproc so that mutexes work */ 1865 PCPU_SET(curproc, &proc0); 1866 PCPU_SET(spinlocks, NULL); 1867 1868 LIST_INIT(&proc0.p_contested); 1869 1870 /* 1871 * Initialize mutexes. 1872 */ 1873 mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE); 1874 mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE); 1875 mtx_init(&proc0.p_mtx, "process lock", MTX_DEF); 1876 mtx_init(&clock_lock, "clk", MTX_SPIN | MTX_RECURSE); 1877#ifdef SMP 1878 mtx_init(&imen_mtx, "imen", MTX_SPIN); 1879#endif 1880 mtx_lock(&Giant); 1881 1882 /* make ldt memory segments */ 1883 /* 1884 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it 1885 * should be spelled ...MAX_USER... 1886 */ 1887 ldt_segs[LUCODE_SEL].ssd_limit = atop(VM_MAXUSER_ADDRESS - 1); 1888 ldt_segs[LUDATA_SEL].ssd_limit = atop(VM_MAXUSER_ADDRESS - 1); 1889 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++) 1890 ssdtosd(&ldt_segs[x], &ldt[x].sd); 1891 1892 _default_ldt = GSEL(GLDT_SEL, SEL_KPL); 1893 lldt(_default_ldt); 1894 PCPU_SET(currentldt, _default_ldt); 1895 1896 /* exceptions */ 1897 for (x = 0; x < NIDT; x++) 1898 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, 1899 GSEL(GCODE_SEL, SEL_KPL)); 1900 setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, 1901 GSEL(GCODE_SEL, SEL_KPL)); 1902 setidt(1, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL, 1903 GSEL(GCODE_SEL, SEL_KPL)); 1904 setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, 1905 GSEL(GCODE_SEL, SEL_KPL)); 1906 setidt(3, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL, 1907 GSEL(GCODE_SEL, SEL_KPL)); 1908 setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, 1909 GSEL(GCODE_SEL, SEL_KPL)); 1910 setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, 1911 GSEL(GCODE_SEL, SEL_KPL)); 1912 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, 1913 GSEL(GCODE_SEL, SEL_KPL)); 1914 setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL 1915 , GSEL(GCODE_SEL, SEL_KPL)); 1916 setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL)); 1917 setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, 1918 GSEL(GCODE_SEL, SEL_KPL)); 1919 setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, 1920 GSEL(GCODE_SEL, SEL_KPL)); 1921 setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, 1922 GSEL(GCODE_SEL, SEL_KPL)); 1923 setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, 1924 GSEL(GCODE_SEL, SEL_KPL)); 1925 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, 1926 GSEL(GCODE_SEL, SEL_KPL)); 1927 setidt(14, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, 1928 GSEL(GCODE_SEL, SEL_KPL)); 1929 setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, 1930 GSEL(GCODE_SEL, SEL_KPL)); 1931 setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, 1932 GSEL(GCODE_SEL, SEL_KPL)); 1933 setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, 1934 GSEL(GCODE_SEL, SEL_KPL)); 1935 setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, 1936 GSEL(GCODE_SEL, SEL_KPL)); 1937 setidt(19, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL, 1938 GSEL(GCODE_SEL, SEL_KPL)); 1939 setidt(0x80, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL, 1940 GSEL(GCODE_SEL, SEL_KPL)); 1941 1942 r_idt.rd_limit = sizeof(idt0) - 1; 1943 r_idt.rd_base = (int) idt; 1944 lidt(&r_idt); 1945 1946 /* 1947 * Initialize the console before we print anything out. 1948 */ 1949 cninit(); 1950 1951 if (metadata_missing) 1952 printf("WARNING: loader(8) metadata is missing!\n"); 1953 1954#ifdef DEV_ISA 1955 isa_defaultirq(); 1956#endif 1957 1958#ifdef DDB 1959 kdb_init(); 1960 if (boothowto & RB_KDB) 1961 Debugger("Boot flags requested debugger"); 1962#endif 1963 1964 finishidentcpu(); /* Final stage of CPU initialization */ 1965 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, 1966 GSEL(GCODE_SEL, SEL_KPL)); 1967 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, 1968 GSEL(GCODE_SEL, SEL_KPL)); 1969 initializecpu(); /* Initialize CPU registers */ 1970 1971 /* make an initial tss so cpu can get interrupt stack on syscall! */ 1972 PCPU_SET(common_tss.tss_esp0, 1973 (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16); 1974 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); 1975 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1976 private_tss = 0; 1977 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd); 1978 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 1979 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16); 1980 ltr(gsel_tss); 1981 1982 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = 1983 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)]; 1984 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = 1985 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); 1986 dblfault_tss.tss_cr3 = (int)IdlePTD; 1987 dblfault_tss.tss_eip = (int)dblfault_handler; 1988 dblfault_tss.tss_eflags = PSL_KERNEL; 1989 dblfault_tss.tss_ds = dblfault_tss.tss_es = 1990 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); 1991 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL); 1992 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); 1993 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); 1994 1995 vm86_initialize(); 1996 getmemsize(first); 1997 1998 /* now running on new page tables, configured,and u/iom is accessible */ 1999 2000 /* Map the message buffer. */ 2001 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) 2002 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off); 2003 2004 msgbufinit(msgbufp, MSGBUF_SIZE); 2005 2006 /* make a call gate to reenter kernel with */ 2007 gdp = &ldt[LSYS5CALLS_SEL].gd; 2008 2009 x = (int) &IDTVEC(lcall_syscall); 2010 gdp->gd_looffset = x; 2011 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL); 2012 gdp->gd_stkcpy = 1; 2013 gdp->gd_type = SDT_SYS386CGT; 2014 gdp->gd_dpl = SEL_UPL; 2015 gdp->gd_p = 1; 2016 gdp->gd_hioffset = x >> 16; 2017 2018 /* XXX does this work? */ 2019 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL]; 2020 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL]; 2021 2022 /* transfer to user mode */ 2023 2024 _ucodesel = LSEL(LUCODE_SEL, SEL_UPL); 2025 _udatasel = LSEL(LUDATA_SEL, SEL_UPL); 2026 2027 /* setup proc 0's pcb */ 2028 proc0.p_addr->u_pcb.pcb_flags = 0; 2029 proc0.p_addr->u_pcb.pcb_cr3 = (int)IdlePTD; 2030 proc0.p_addr->u_pcb.pcb_ext = 0; 2031 proc0.p_frame = &proc0_tf; 2032} 2033 2034#if defined(I586_CPU) && !defined(NO_F00F_HACK) 2035static void f00f_hack(void *unused); 2036SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL); 2037 2038static void 2039f00f_hack(void *unused) { 2040 struct gate_descriptor *new_idt; 2041#ifndef SMP 2042 struct region_descriptor r_idt; 2043#endif 2044 vm_offset_t tmp; 2045 2046 if (!has_f00f_bug) 2047 return; 2048 2049 GIANT_REQUIRED; 2050 2051 printf("Intel Pentium detected, installing workaround for F00F bug\n"); 2052 2053 r_idt.rd_limit = sizeof(idt0) - 1; 2054 2055 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2); 2056 if (tmp == 0) 2057 panic("kmem_alloc returned 0"); 2058 if (((unsigned int)tmp & (PAGE_SIZE-1)) != 0) 2059 panic("kmem_alloc returned non-page-aligned memory"); 2060 /* Put the first seven entries in the lower page */ 2061 new_idt = (struct gate_descriptor*)(tmp + PAGE_SIZE - (7*8)); 2062 bcopy(idt, new_idt, sizeof(idt0)); 2063 r_idt.rd_base = (int)new_idt; 2064 lidt(&r_idt); 2065 idt = new_idt; 2066 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE, 2067 VM_PROT_READ, FALSE) != KERN_SUCCESS) 2068 panic("vm_map_protect failed"); 2069 return; 2070} 2071#endif /* defined(I586_CPU) && !NO_F00F_HACK */ 2072 2073int 2074ptrace_set_pc(p, addr) 2075 struct proc *p; 2076 unsigned long addr; 2077{ 2078 p->p_frame->tf_eip = addr; 2079 return (0); 2080} 2081 2082int 2083ptrace_single_step(p) 2084 struct proc *p; 2085{ 2086 p->p_frame->tf_eflags |= PSL_T; 2087 return (0); 2088} 2089 2090int 2091fill_regs(p, regs) 2092 struct proc *p; 2093 struct reg *regs; 2094{ 2095 struct pcb *pcb; 2096 struct trapframe *tp; 2097 2098 tp = p->p_frame; 2099 regs->r_fs = tp->tf_fs; 2100 regs->r_es = tp->tf_es; 2101 regs->r_ds = tp->tf_ds; 2102 regs->r_edi = tp->tf_edi; 2103 regs->r_esi = tp->tf_esi; 2104 regs->r_ebp = tp->tf_ebp; 2105 regs->r_ebx = tp->tf_ebx; 2106 regs->r_edx = tp->tf_edx; 2107 regs->r_ecx = tp->tf_ecx; 2108 regs->r_eax = tp->tf_eax; 2109 regs->r_eip = tp->tf_eip; 2110 regs->r_cs = tp->tf_cs; 2111 regs->r_eflags = tp->tf_eflags; 2112 regs->r_esp = tp->tf_esp; 2113 regs->r_ss = tp->tf_ss; 2114 pcb = &p->p_addr->u_pcb; 2115 regs->r_gs = pcb->pcb_gs; 2116 return (0); 2117} 2118 2119int 2120set_regs(p, regs) 2121 struct proc *p; 2122 struct reg *regs; 2123{ 2124 struct pcb *pcb; 2125 struct trapframe *tp; 2126 2127 tp = p->p_frame; 2128 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) || 2129 !CS_SECURE(regs->r_cs)) 2130 return (EINVAL); 2131 tp->tf_fs = regs->r_fs; 2132 tp->tf_es = regs->r_es; 2133 tp->tf_ds = regs->r_ds; 2134 tp->tf_edi = regs->r_edi; 2135 tp->tf_esi = regs->r_esi; 2136 tp->tf_ebp = regs->r_ebp; 2137 tp->tf_ebx = regs->r_ebx; 2138 tp->tf_edx = regs->r_edx; 2139 tp->tf_ecx = regs->r_ecx; 2140 tp->tf_eax = regs->r_eax; 2141 tp->tf_eip = regs->r_eip; 2142 tp->tf_cs = regs->r_cs; 2143 tp->tf_eflags = regs->r_eflags; 2144 tp->tf_esp = regs->r_esp; 2145 tp->tf_ss = regs->r_ss; 2146 pcb = &p->p_addr->u_pcb; 2147 pcb->pcb_gs = regs->r_gs; 2148 return (0); 2149} 2150 2151#ifdef CPU_ENABLE_SSE 2152static void 2153fill_fpregs_xmm(sv_xmm, sv_87) 2154 struct savexmm *sv_xmm; 2155 struct save87 *sv_87; 2156{ 2157 register struct env87 *penv_87 = &sv_87->sv_env; 2158 register struct envxmm *penv_xmm = &sv_xmm->sv_env; 2159 int i; 2160 2161 /* FPU control/status */ 2162 penv_87->en_cw = penv_xmm->en_cw; 2163 penv_87->en_sw = penv_xmm->en_sw; 2164 penv_87->en_tw = penv_xmm->en_tw; 2165 penv_87->en_fip = penv_xmm->en_fip; 2166 penv_87->en_fcs = penv_xmm->en_fcs; 2167 penv_87->en_opcode = penv_xmm->en_opcode; 2168 penv_87->en_foo = penv_xmm->en_foo; 2169 penv_87->en_fos = penv_xmm->en_fos; 2170 2171 /* FPU registers */ 2172 for (i = 0; i < 8; ++i) 2173 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc; 2174 2175 sv_87->sv_ex_sw = sv_xmm->sv_ex_sw; 2176} 2177 2178static void 2179set_fpregs_xmm(sv_87, sv_xmm) 2180 struct save87 *sv_87; 2181 struct savexmm *sv_xmm; 2182{ 2183 register struct env87 *penv_87 = &sv_87->sv_env; 2184 register struct envxmm *penv_xmm = &sv_xmm->sv_env; 2185 int i; 2186 2187 /* FPU control/status */ 2188 penv_xmm->en_cw = penv_87->en_cw; 2189 penv_xmm->en_sw = penv_87->en_sw; 2190 penv_xmm->en_tw = penv_87->en_tw; 2191 penv_xmm->en_fip = penv_87->en_fip; 2192 penv_xmm->en_fcs = penv_87->en_fcs; 2193 penv_xmm->en_opcode = penv_87->en_opcode; 2194 penv_xmm->en_foo = penv_87->en_foo; 2195 penv_xmm->en_fos = penv_87->en_fos; 2196 2197 /* FPU registers */ 2198 for (i = 0; i < 8; ++i) 2199 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i]; 2200 2201 sv_xmm->sv_ex_sw = sv_87->sv_ex_sw; 2202} 2203#endif /* CPU_ENABLE_SSE */ 2204 2205int 2206fill_fpregs(p, fpregs) 2207 struct proc *p; 2208 struct fpreg *fpregs; 2209{ 2210#ifdef CPU_ENABLE_SSE 2211 if (cpu_fxsr) { 2212 fill_fpregs_xmm(&p->p_addr->u_pcb.pcb_save.sv_xmm, 2213 (struct save87 *)fpregs); 2214 return (0); 2215 } 2216#endif /* CPU_ENABLE_SSE */ 2217 bcopy(&p->p_addr->u_pcb.pcb_save.sv_87, fpregs, sizeof *fpregs); 2218 return (0); 2219} 2220 2221int 2222set_fpregs(p, fpregs) 2223 struct proc *p; 2224 struct fpreg *fpregs; 2225{ 2226#ifdef CPU_ENABLE_SSE 2227 if (cpu_fxsr) { 2228 set_fpregs_xmm((struct save87 *)fpregs, 2229 &p->p_addr->u_pcb.pcb_save.sv_xmm); 2230 return (0); 2231 } 2232#endif /* CPU_ENABLE_SSE */ 2233 bcopy(fpregs, &p->p_addr->u_pcb.pcb_save.sv_87, sizeof *fpregs); 2234 return (0); 2235} 2236 2237int 2238fill_dbregs(p, dbregs) 2239 struct proc *p; 2240 struct dbreg *dbregs; 2241{ 2242 struct pcb *pcb; 2243 2244 if (p == NULL) { 2245 dbregs->dr0 = rdr0(); 2246 dbregs->dr1 = rdr1(); 2247 dbregs->dr2 = rdr2(); 2248 dbregs->dr3 = rdr3(); 2249 dbregs->dr4 = rdr4(); 2250 dbregs->dr5 = rdr5(); 2251 dbregs->dr6 = rdr6(); 2252 dbregs->dr7 = rdr7(); 2253 } 2254 else { 2255 pcb = &p->p_addr->u_pcb; 2256 dbregs->dr0 = pcb->pcb_dr0; 2257 dbregs->dr1 = pcb->pcb_dr1; 2258 dbregs->dr2 = pcb->pcb_dr2; 2259 dbregs->dr3 = pcb->pcb_dr3; 2260 dbregs->dr4 = 0; 2261 dbregs->dr5 = 0; 2262 dbregs->dr6 = pcb->pcb_dr6; 2263 dbregs->dr7 = pcb->pcb_dr7; 2264 } 2265 return (0); 2266} 2267 2268int 2269set_dbregs(p, dbregs) 2270 struct proc *p; 2271 struct dbreg *dbregs; 2272{ 2273 struct pcb *pcb; 2274 int i; 2275 u_int32_t mask1, mask2; 2276 2277 if (p == NULL) { 2278 load_dr0(dbregs->dr0); 2279 load_dr1(dbregs->dr1); 2280 load_dr2(dbregs->dr2); 2281 load_dr3(dbregs->dr3); 2282 load_dr4(dbregs->dr4); 2283 load_dr5(dbregs->dr5); 2284 load_dr6(dbregs->dr6); 2285 load_dr7(dbregs->dr7); 2286 } 2287 else { 2288 /* 2289 * Don't let an illegal value for dr7 get set. Specifically, 2290 * check for undefined settings. Setting these bit patterns 2291 * result in undefined behaviour and can lead to an unexpected 2292 * TRCTRAP. 2293 */ 2294 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 8; 2295 i++, mask1 <<= 2, mask2 <<= 2) 2296 if ((dbregs->dr7 & mask1) == mask2) 2297 return (EINVAL); 2298 2299 pcb = &p->p_addr->u_pcb; 2300 2301 /* 2302 * Don't let a process set a breakpoint that is not within the 2303 * process's address space. If a process could do this, it 2304 * could halt the system by setting a breakpoint in the kernel 2305 * (if ddb was enabled). Thus, we need to check to make sure 2306 * that no breakpoints are being enabled for addresses outside 2307 * process's address space, unless, perhaps, we were called by 2308 * uid 0. 2309 * 2310 * XXX - what about when the watched area of the user's 2311 * address space is written into from within the kernel 2312 * ... wouldn't that still cause a breakpoint to be generated 2313 * from within kernel mode? 2314 */ 2315 2316 if (suser(p) != 0) { 2317 if (dbregs->dr7 & 0x3) { 2318 /* dr0 is enabled */ 2319 if (dbregs->dr0 >= VM_MAXUSER_ADDRESS) 2320 return (EINVAL); 2321 } 2322 2323 if (dbregs->dr7 & (0x3<<2)) { 2324 /* dr1 is enabled */ 2325 if (dbregs->dr1 >= VM_MAXUSER_ADDRESS) 2326 return (EINVAL); 2327 } 2328 2329 if (dbregs->dr7 & (0x3<<4)) { 2330 /* dr2 is enabled */ 2331 if (dbregs->dr2 >= VM_MAXUSER_ADDRESS) 2332 return (EINVAL); 2333 } 2334 2335 if (dbregs->dr7 & (0x3<<6)) { 2336 /* dr3 is enabled */ 2337 if (dbregs->dr3 >= VM_MAXUSER_ADDRESS) 2338 return (EINVAL); 2339 } 2340 } 2341 2342 pcb->pcb_dr0 = dbregs->dr0; 2343 pcb->pcb_dr1 = dbregs->dr1; 2344 pcb->pcb_dr2 = dbregs->dr2; 2345 pcb->pcb_dr3 = dbregs->dr3; 2346 pcb->pcb_dr6 = dbregs->dr6; 2347 pcb->pcb_dr7 = dbregs->dr7; 2348 2349 pcb->pcb_flags |= PCB_DBREGS; 2350 } 2351 2352 return (0); 2353} 2354 2355/* 2356 * Return > 0 if a hardware breakpoint has been hit, and the 2357 * breakpoint was in user space. Return 0, otherwise. 2358 */ 2359int 2360user_dbreg_trap(void) 2361{ 2362 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */ 2363 u_int32_t bp; /* breakpoint bits extracted from dr6 */ 2364 int nbp; /* number of breakpoints that triggered */ 2365 caddr_t addr[4]; /* breakpoint addresses */ 2366 int i; 2367 2368 dr7 = rdr7(); 2369 if ((dr7 & 0x000000ff) == 0) { 2370 /* 2371 * all GE and LE bits in the dr7 register are zero, 2372 * thus the trap couldn't have been caused by the 2373 * hardware debug registers 2374 */ 2375 return 0; 2376 } 2377 2378 nbp = 0; 2379 dr6 = rdr6(); 2380 bp = dr6 & 0x0000000f; 2381 2382 if (!bp) { 2383 /* 2384 * None of the breakpoint bits are set meaning this 2385 * trap was not caused by any of the debug registers 2386 */ 2387 return 0; 2388 } 2389 2390 /* 2391 * at least one of the breakpoints were hit, check to see 2392 * which ones and if any of them are user space addresses 2393 */ 2394 2395 if (bp & 0x01) { 2396 addr[nbp++] = (caddr_t)rdr0(); 2397 } 2398 if (bp & 0x02) { 2399 addr[nbp++] = (caddr_t)rdr1(); 2400 } 2401 if (bp & 0x04) { 2402 addr[nbp++] = (caddr_t)rdr2(); 2403 } 2404 if (bp & 0x08) { 2405 addr[nbp++] = (caddr_t)rdr3(); 2406 } 2407 2408 for (i=0; i<nbp; i++) { 2409 if (addr[i] < 2410 (caddr_t)VM_MAXUSER_ADDRESS) { 2411 /* 2412 * addr[i] is in user space 2413 */ 2414 return nbp; 2415 } 2416 } 2417 2418 /* 2419 * None of the breakpoints are in user space. 2420 */ 2421 return 0; 2422} 2423 2424 2425#ifndef DDB 2426void 2427Debugger(const char *msg) 2428{ 2429 printf("Debugger(\"%s\") called.\n", msg); 2430} 2431#endif /* no DDB */ 2432 2433#include <sys/disklabel.h> 2434 2435/* 2436 * Determine the size of the transfer, and make sure it is 2437 * within the boundaries of the partition. Adjust transfer 2438 * if needed, and signal errors or early completion. 2439 */ 2440int 2441bounds_check_with_label(struct bio *bp, struct disklabel *lp, int wlabel) 2442{ 2443 struct partition *p = lp->d_partitions + dkpart(bp->bio_dev); 2444 int labelsect = lp->d_partitions[0].p_offset; 2445 int maxsz = p->p_size, 2446 sz = (bp->bio_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT; 2447 2448 /* overwriting disk label ? */ 2449 /* XXX should also protect bootstrap in first 8K */ 2450 if (bp->bio_blkno + p->p_offset <= LABELSECTOR + labelsect && 2451#if LABELSECTOR != 0 2452 bp->bio_blkno + p->p_offset + sz > LABELSECTOR + labelsect && 2453#endif 2454 (bp->bio_cmd == BIO_WRITE) && wlabel == 0) { 2455 bp->bio_error = EROFS; 2456 goto bad; 2457 } 2458 2459#if defined(DOSBBSECTOR) && defined(notyet) 2460 /* overwriting master boot record? */ 2461 if (bp->bio_blkno + p->p_offset <= DOSBBSECTOR && 2462 (bp->bio_cmd == BIO_WRITE) && wlabel == 0) { 2463 bp->bio_error = EROFS; 2464 goto bad; 2465 } 2466#endif 2467 2468 /* beyond partition? */ 2469 if (bp->bio_blkno < 0 || bp->bio_blkno + sz > maxsz) { 2470 /* if exactly at end of disk, return an EOF */ 2471 if (bp->bio_blkno == maxsz) { 2472 bp->bio_resid = bp->bio_bcount; 2473 return(0); 2474 } 2475 /* or truncate if part of it fits */ 2476 sz = maxsz - bp->bio_blkno; 2477 if (sz <= 0) { 2478 bp->bio_error = EINVAL; 2479 goto bad; 2480 } 2481 bp->bio_bcount = sz << DEV_BSHIFT; 2482 } 2483 2484 bp->bio_pblkno = bp->bio_blkno + p->p_offset; 2485 return(1); 2486 2487bad: 2488 bp->bio_flags |= BIO_ERROR; 2489 return(-1); 2490} 2491 2492#ifdef DDB 2493 2494/* 2495 * Provide inb() and outb() as functions. They are normally only 2496 * available as macros calling inlined functions, thus cannot be 2497 * called inside DDB. 2498 * 2499 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 2500 */ 2501 2502#undef inb 2503#undef outb 2504 2505/* silence compiler warnings */ 2506u_char inb(u_int); 2507void outb(u_int, u_char); 2508 2509u_char 2510inb(u_int port) 2511{ 2512 u_char data; 2513 /* 2514 * We use %%dx and not %1 here because i/o is done at %dx and not at 2515 * %edx, while gcc generates inferior code (movw instead of movl) 2516 * if we tell it to load (u_short) port. 2517 */ 2518 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 2519 return (data); 2520} 2521 2522void 2523outb(u_int port, u_char data) 2524{ 2525 u_char al; 2526 /* 2527 * Use an unnecessary assignment to help gcc's register allocator. 2528 * This make a large difference for gcc-1.40 and a tiny difference 2529 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 2530 * best results. gcc-2.6.0 can't handle this. 2531 */ 2532 al = data; 2533 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 2534} 2535 2536#endif /* DDB */ 2537