machdep.c revision 78427
1/*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 38 * $FreeBSD: head/sys/amd64/amd64/machdep.c 78427 2001-06-18 19:19:38Z jhb $ 39 */ 40 41#include "opt_atalk.h" 42#include "opt_compat.h" 43#include "opt_cpu.h" 44#include "opt_ddb.h" 45#include "opt_inet.h" 46#include "opt_ipx.h" 47#include "opt_isa.h" 48#include "opt_maxmem.h" 49#include "opt_msgbuf.h" 50#include "opt_npx.h" 51#include "opt_perfmon.h" 52/* #include "opt_userconfig.h" */ 53 54#include <sys/param.h> 55#include <sys/systm.h> 56#include <sys/sysproto.h> 57#include <sys/signalvar.h> 58#include <sys/kernel.h> 59#include <sys/ktr.h> 60#include <sys/linker.h> 61#include <sys/lock.h> 62#include <sys/malloc.h> 63#include <sys/mutex.h> 64#include <sys/pcpu.h> 65#include <sys/proc.h> 66#include <sys/bio.h> 67#include <sys/buf.h> 68#include <sys/reboot.h> 69#include <sys/smp.h> 70#include <sys/callout.h> 71#include <sys/msgbuf.h> 72#include <sys/sysent.h> 73#include <sys/sysctl.h> 74#include <sys/vmmeter.h> 75#include <sys/bus.h> 76#include <sys/eventhandler.h> 77 78#include <vm/vm.h> 79#include <vm/vm_param.h> 80#include <sys/lock.h> 81#include <vm/vm_kern.h> 82#include <vm/vm_object.h> 83#include <vm/vm_page.h> 84#include <vm/vm_map.h> 85#include <vm/vm_pager.h> 86#include <vm/vm_extern.h> 87 88#include <sys/user.h> 89#include <sys/exec.h> 90#include <sys/cons.h> 91 92#include <ddb/ddb.h> 93 94#include <net/netisr.h> 95 96#include <machine/cpu.h> 97#include <machine/cputypes.h> 98#include <machine/reg.h> 99#include <machine/clock.h> 100#include <machine/specialreg.h> 101#include <machine/bootinfo.h> 102#include <machine/md_var.h> 103#include <machine/pc/bios.h> 104#include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */ 105#include <machine/globals.h> 106#ifdef PERFMON 107#include <machine/perfmon.h> 108#endif 109 110#ifdef OLD_BUS_ARCH 111#include <i386/isa/isa_device.h> 112#endif 113#include <i386/isa/icu.h> 114#include <i386/isa/intr_machdep.h> 115#include <isa/rtc.h> 116#include <machine/vm86.h> 117#include <sys/ptrace.h> 118#include <machine/sigframe.h> 119 120extern void init386 __P((int first)); 121extern void dblfault_handler __P((void)); 122 123extern void printcpuinfo(void); /* XXX header file */ 124extern void earlysetcpuclass(void); /* same header file */ 125extern void finishidentcpu(void); 126extern void panicifcpuunsupported(void); 127extern void initializecpu(void); 128 129#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 130#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 131 132static void cpu_startup __P((void *)); 133SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) 134 135int _udatasel, _ucodesel; 136u_int atdevbase; 137 138#if defined(SWTCH_OPTIM_STATS) 139extern int swtch_optim_stats; 140SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, 141 CTLFLAG_RD, &swtch_optim_stats, 0, ""); 142SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, 143 CTLFLAG_RD, &tlb_flush_count, 0, ""); 144#endif 145 146#ifdef PC98 147static int ispc98 = 1; 148#else 149static int ispc98 = 0; 150#endif 151SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, ""); 152 153int physmem = 0; 154int cold = 1; 155 156static void osendsig __P((sig_t catcher, int sig, sigset_t *mask, u_long code)); 157 158static int 159sysctl_hw_physmem(SYSCTL_HANDLER_ARGS) 160{ 161 int error = sysctl_handle_int(oidp, 0, ctob(physmem), req); 162 return (error); 163} 164 165SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD, 166 0, 0, sysctl_hw_physmem, "I", ""); 167 168static int 169sysctl_hw_usermem(SYSCTL_HANDLER_ARGS) 170{ 171 int error = sysctl_handle_int(oidp, 0, 172 ctob(physmem - cnt.v_wire_count), req); 173 return (error); 174} 175 176SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD, 177 0, 0, sysctl_hw_usermem, "I", ""); 178 179static int 180sysctl_hw_availpages(SYSCTL_HANDLER_ARGS) 181{ 182 int error = sysctl_handle_int(oidp, 0, 183 i386_btop(avail_end - avail_start), req); 184 return (error); 185} 186 187SYSCTL_PROC(_hw, OID_AUTO, availpages, CTLTYPE_INT|CTLFLAG_RD, 188 0, 0, sysctl_hw_availpages, "I", ""); 189 190static int 191sysctl_machdep_msgbuf(SYSCTL_HANDLER_ARGS) 192{ 193 int error; 194 195 /* Unwind the buffer, so that it's linear (possibly starting with 196 * some initial nulls). 197 */ 198 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr+msgbufp->msg_bufr, 199 msgbufp->msg_size-msgbufp->msg_bufr,req); 200 if(error) return(error); 201 if(msgbufp->msg_bufr>0) { 202 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr, 203 msgbufp->msg_bufr,req); 204 } 205 return(error); 206} 207 208SYSCTL_PROC(_machdep, OID_AUTO, msgbuf, CTLTYPE_STRING|CTLFLAG_RD, 209 0, 0, sysctl_machdep_msgbuf, "A","Contents of kernel message buffer"); 210 211static int msgbuf_clear; 212 213static int 214sysctl_machdep_msgbuf_clear(SYSCTL_HANDLER_ARGS) 215{ 216 int error; 217 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 218 req); 219 if (!error && req->newptr) { 220 /* Clear the buffer and reset write pointer */ 221 bzero(msgbufp->msg_ptr,msgbufp->msg_size); 222 msgbufp->msg_bufr=msgbufp->msg_bufx=0; 223 msgbuf_clear=0; 224 } 225 return (error); 226} 227 228SYSCTL_PROC(_machdep, OID_AUTO, msgbuf_clear, CTLTYPE_INT|CTLFLAG_RW, 229 &msgbuf_clear, 0, sysctl_machdep_msgbuf_clear, "I", 230 "Clear kernel message buffer"); 231 232int Maxmem = 0; 233long dumplo; 234 235vm_offset_t phys_avail[10]; 236 237/* must be 2 less so 0 0 can signal end of chunks */ 238#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2) 239 240static vm_offset_t buffer_sva, buffer_eva; 241vm_offset_t clean_sva, clean_eva; 242static vm_offset_t pager_sva, pager_eva; 243static struct trapframe proc0_tf; 244#ifndef SMP 245static struct globaldata __globaldata; 246#endif 247 248struct mtx sched_lock; 249struct mtx Giant; 250 251static void 252cpu_startup(dummy) 253 void *dummy; 254{ 255 register unsigned i; 256 register caddr_t v; 257 vm_offset_t maxaddr; 258 vm_size_t size = 0; 259 int firstaddr; 260 vm_offset_t minaddr; 261 int physmem_est; 262 263 /* 264 * Good {morning,afternoon,evening,night}. 265 */ 266 mtx_lock(&vm_mtx); 267 earlysetcpuclass(); 268 startrtclock(); 269 printcpuinfo(); 270 panicifcpuunsupported(); 271#ifdef PERFMON 272 perfmon_init(); 273#endif 274 printf("real memory = %u (%uK bytes)\n", ptoa(Maxmem), ptoa(Maxmem) / 1024); 275 /* 276 * Display any holes after the first chunk of extended memory. 277 */ 278 if (bootverbose) { 279 int indx; 280 281 printf("Physical memory chunk(s):\n"); 282 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 283 unsigned int size1 = phys_avail[indx + 1] - phys_avail[indx]; 284 285 printf("0x%08x - 0x%08x, %u bytes (%u pages)\n", 286 phys_avail[indx], phys_avail[indx + 1] - 1, size1, 287 size1 / PAGE_SIZE); 288 } 289 } 290 291 /* 292 * Calculate callout wheel size 293 */ 294 for (callwheelsize = 1, callwheelbits = 0; 295 callwheelsize < ncallout; 296 callwheelsize <<= 1, ++callwheelbits) 297 ; 298 callwheelmask = callwheelsize - 1; 299 300 /* 301 * Allocate space for system data structures. 302 * The first available kernel virtual address is in "v". 303 * As pages of kernel virtual memory are allocated, "v" is incremented. 304 * As pages of memory are allocated and cleared, 305 * "firstaddr" is incremented. 306 * An index into the kernel page table corresponding to the 307 * virtual memory address maintained in "v" is kept in "mapaddr". 308 */ 309 310 /* 311 * Make two passes. The first pass calculates how much memory is 312 * needed and allocates it. The second pass assigns virtual 313 * addresses to the various data structures. 314 */ 315 firstaddr = 0; 316again: 317 v = (caddr_t)firstaddr; 318 319#define valloc(name, type, num) \ 320 (name) = (type *)v; v = (caddr_t)((name)+(num)) 321#define valloclim(name, type, num, lim) \ 322 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num))) 323 324 valloc(callout, struct callout, ncallout); 325 valloc(callwheel, struct callout_tailq, callwheelsize); 326 327 /* 328 * Discount the physical memory larger than the size of kernel_map 329 * to avoid eating up all of KVA space. 330 */ 331 if (kernel_map->first_free == NULL) { 332 printf("Warning: no free entries in kernel_map.\n"); 333 physmem_est = physmem; 334 } else 335 physmem_est = min(physmem, kernel_map->max_offset - kernel_map->min_offset); 336 337 /* 338 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE. 339 * For the first 64MB of ram nominally allocate sufficient buffers to 340 * cover 1/4 of our ram. Beyond the first 64MB allocate additional 341 * buffers to cover 1/20 of our ram over 64MB. 342 * 343 * factor represents the 1/4 x ram conversion. 344 */ 345 if (nbuf == 0) { 346 int factor = 4 * BKVASIZE / PAGE_SIZE; 347 348 nbuf = 50; 349 if (physmem_est > 1024) 350 nbuf += min((physmem_est - 1024) / factor, 16384 / factor); 351 if (physmem_est > 16384) 352 nbuf += (physmem_est - 16384) * 2 / (factor * 5); 353 } 354 355 /* 356 * Do not allow the buffer_map to be more then 1/2 the size of the 357 * kernel_map. 358 */ 359 if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) / 360 (BKVASIZE * 2)) { 361 nbuf = (kernel_map->max_offset - kernel_map->min_offset) / 362 (BKVASIZE * 2); 363 printf("Warning: nbufs capped at %d\n", nbuf); 364 } 365 366 nswbuf = max(min(nbuf/4, 256), 16); 367 368 valloc(swbuf, struct buf, nswbuf); 369 valloc(buf, struct buf, nbuf); 370 v = bufhashinit(v); 371 372 /* 373 * End of first pass, size has been calculated so allocate memory 374 */ 375 if (firstaddr == 0) { 376 size = (vm_size_t)(v - firstaddr); 377 firstaddr = (int)kmem_alloc(kernel_map, round_page(size)); 378 if (firstaddr == 0) 379 panic("startup: no room for tables"); 380 goto again; 381 } 382 383 /* 384 * End of second pass, addresses have been assigned 385 */ 386 if ((vm_size_t)(v - firstaddr) != size) 387 panic("startup: table size inconsistency"); 388 389 clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva, 390 (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size); 391 buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva, 392 (nbuf*BKVASIZE)); 393 buffer_map->system_map = 1; 394 pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva, 395 (nswbuf*MAXPHYS) + pager_map_size); 396 pager_map->system_map = 1; 397 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 398 (16*(ARG_MAX+(PAGE_SIZE*3)))); 399 400 mtx_unlock(&vm_mtx); 401 /* 402 * XXX: Mbuf system machine-specific initializations should 403 * go here, if anywhere. 404 */ 405 406 /* 407 * Initialize callouts 408 */ 409 SLIST_INIT(&callfree); 410 for (i = 0; i < ncallout; i++) { 411 callout_init(&callout[i], 0); 412 callout[i].c_flags = CALLOUT_LOCAL_ALLOC; 413 SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); 414 } 415 416 for (i = 0; i < callwheelsize; i++) { 417 TAILQ_INIT(&callwheel[i]); 418 } 419 420 mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE); 421 422#if defined(USERCONFIG) 423 userconfig(); 424 cninit(); /* the preferred console may have changed */ 425#endif 426 427 printf("avail memory = %u (%uK bytes)\n", ptoa(cnt.v_free_count), 428 ptoa(cnt.v_free_count) / 1024); 429 430 /* 431 * Set up buffers, so they can be used to read disk labels. 432 */ 433 bufinit(); 434 vm_pager_bufferinit(); 435 436 globaldata_register(GLOBALDATA); 437#ifndef SMP 438 /* For SMP, we delay the cpu_setregs() until after SMP startup. */ 439 cpu_setregs(); 440#endif 441} 442 443/* 444 * Send an interrupt to process. 445 * 446 * Stack is set up to allow sigcode stored 447 * at top to call routine, followed by kcall 448 * to sigreturn routine below. After sigreturn 449 * resets the signal mask, the stack, and the 450 * frame pointer, it returns to the user 451 * specified pc, psl. 452 */ 453static void 454osendsig(catcher, sig, mask, code) 455 sig_t catcher; 456 int sig; 457 sigset_t *mask; 458 u_long code; 459{ 460 struct osigframe sf; 461 struct osigframe *fp; 462 struct proc *p; 463 struct sigacts *psp; 464 struct trapframe *regs; 465 int oonstack; 466 467 p = curproc; 468 PROC_LOCK(p); 469 psp = p->p_sigacts; 470 regs = p->p_md.md_regs; 471 oonstack = sigonstack(regs->tf_esp); 472 473 /* Allocate and validate space for the signal handler context. */ 474 if ((p->p_flag & P_ALTSTACK) && !oonstack && 475 SIGISMEMBER(psp->ps_sigonstack, sig)) { 476 fp = (struct osigframe *)(p->p_sigstk.ss_sp + 477 p->p_sigstk.ss_size - sizeof(struct osigframe)); 478#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 479 p->p_sigstk.ss_flags |= SS_ONSTACK; 480#endif 481 } else 482 fp = (struct osigframe *)regs->tf_esp - 1; 483 PROC_UNLOCK(p); 484 485 /* 486 * grow_stack() will return 0 if *fp does not fit inside the stack 487 * and the stack can not be grown. 488 * useracc() will return FALSE if access is denied. 489 */ 490 if (grow_stack(p, (int)fp) == 0 || 491 !useracc((caddr_t)fp, sizeof(*fp), VM_PROT_WRITE)) { 492 /* 493 * Process has trashed its stack; give it an illegal 494 * instruction to halt it in its tracks. 495 */ 496 PROC_LOCK(p); 497 SIGACTION(p, SIGILL) = SIG_DFL; 498 SIGDELSET(p->p_sigignore, SIGILL); 499 SIGDELSET(p->p_sigcatch, SIGILL); 500 SIGDELSET(p->p_sigmask, SIGILL); 501 psignal(p, SIGILL); 502 PROC_UNLOCK(p); 503 return; 504 } 505 506 /* Translate the signal if appropriate. */ 507 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 508 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 509 510 /* Build the argument list for the signal handler. */ 511 sf.sf_signum = sig; 512 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc; 513 PROC_LOCK(p); 514 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 515 /* Signal handler installed with SA_SIGINFO. */ 516 sf.sf_arg2 = (register_t)&fp->sf_siginfo; 517 sf.sf_siginfo.si_signo = sig; 518 sf.sf_siginfo.si_code = code; 519 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher; 520 } else { 521 /* Old FreeBSD-style arguments. */ 522 sf.sf_arg2 = code; 523 sf.sf_addr = regs->tf_err; 524 sf.sf_ahu.sf_handler = catcher; 525 } 526 PROC_UNLOCK(p); 527 528 /* Save most if not all of trap frame. */ 529 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax; 530 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx; 531 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx; 532 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx; 533 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi; 534 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi; 535 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs; 536 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds; 537 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss; 538 sf.sf_siginfo.si_sc.sc_es = regs->tf_es; 539 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs; 540 sf.sf_siginfo.si_sc.sc_gs = rgs(); 541 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp; 542 543 /* Build the signal context to be used by osigreturn(). */ 544 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0; 545 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask); 546 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp; 547 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp; 548 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip; 549 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags; 550 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno; 551 sf.sf_siginfo.si_sc.sc_err = regs->tf_err; 552 553 /* 554 * If we're a vm86 process, we want to save the segment registers. 555 * We also change eflags to be our emulated eflags, not the actual 556 * eflags. 557 */ 558 if (regs->tf_eflags & PSL_VM) { 559 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */ 560 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 561 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 562 563 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs; 564 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs; 565 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es; 566 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds; 567 568 if (vm86->vm86_has_vme == 0) 569 sf.sf_siginfo.si_sc.sc_ps = 570 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 571 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 572 573 /* See sendsig() for comments. */ 574 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP); 575 } 576 577 /* Copy the sigframe out to the user's stack. */ 578 if (copyout(&sf, fp, sizeof(*fp)) != 0) { 579 /* 580 * Something is wrong with the stack pointer. 581 * ...Kill the process. 582 */ 583 PROC_LOCK(p); 584 sigexit(p, SIGILL); 585 /* NOTREACHED */ 586 } 587 588 regs->tf_esp = (int)fp; 589 regs->tf_eip = PS_STRINGS - szosigcode; 590 regs->tf_cs = _ucodesel; 591 regs->tf_ds = _udatasel; 592 regs->tf_es = _udatasel; 593 regs->tf_fs = _udatasel; 594 load_gs(_udatasel); 595 regs->tf_ss = _udatasel; 596} 597 598void 599sendsig(catcher, sig, mask, code) 600 sig_t catcher; 601 int sig; 602 sigset_t *mask; 603 u_long code; 604{ 605 struct sigframe sf; 606 struct proc *p; 607 struct sigacts *psp; 608 struct trapframe *regs; 609 struct sigframe *sfp; 610 int oonstack; 611 612 p = curproc; 613 PROC_LOCK(p); 614 psp = p->p_sigacts; 615 if (SIGISMEMBER(psp->ps_osigset, sig)) { 616 PROC_UNLOCK(p); 617 osendsig(catcher, sig, mask, code); 618 return; 619 } 620 regs = p->p_md.md_regs; 621 oonstack = sigonstack(regs->tf_esp); 622 623 /* Save user context. */ 624 bzero(&sf, sizeof(sf)); 625 sf.sf_uc.uc_sigmask = *mask; 626 sf.sf_uc.uc_stack = p->p_sigstk; 627 sf.sf_uc.uc_stack.ss_flags = (p->p_flag & P_ALTSTACK) 628 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 629 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 630 sf.sf_uc.uc_mcontext.mc_gs = rgs(); 631 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs)); 632 633 /* Allocate and validate space for the signal handler context. */ 634 if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack && 635 SIGISMEMBER(psp->ps_sigonstack, sig)) { 636 sfp = (struct sigframe *)(p->p_sigstk.ss_sp + 637 p->p_sigstk.ss_size - sizeof(struct sigframe)); 638#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 639 p->p_sigstk.ss_flags |= SS_ONSTACK; 640#endif 641 } else 642 sfp = (struct sigframe *)regs->tf_esp - 1; 643 PROC_UNLOCK(p); 644 645 /* 646 * grow_stack() will return 0 if *sfp does not fit inside the stack 647 * and the stack can not be grown. 648 * useracc() will return FALSE if access is denied. 649 */ 650 if (grow_stack(p, (int)sfp) == 0 || 651 !useracc((caddr_t)sfp, sizeof(*sfp), VM_PROT_WRITE)) { 652 /* 653 * Process has trashed its stack; give it an illegal 654 * instruction to halt it in its tracks. 655 */ 656#ifdef DEBUG 657 printf("process %d has trashed its stack\n", p->p_pid); 658#endif 659 PROC_LOCK(p); 660 SIGACTION(p, SIGILL) = SIG_DFL; 661 SIGDELSET(p->p_sigignore, SIGILL); 662 SIGDELSET(p->p_sigcatch, SIGILL); 663 SIGDELSET(p->p_sigmask, SIGILL); 664 psignal(p, SIGILL); 665 PROC_UNLOCK(p); 666 return; 667 } 668 669 /* Translate the signal if appropriate. */ 670 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 671 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 672 673 /* Build the argument list for the signal handler. */ 674 sf.sf_signum = sig; 675 sf.sf_ucontext = (register_t)&sfp->sf_uc; 676 PROC_LOCK(p); 677 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 678 /* Signal handler installed with SA_SIGINFO. */ 679 sf.sf_siginfo = (register_t)&sfp->sf_si; 680 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 681 682 /* Fill siginfo structure. */ 683 sf.sf_si.si_signo = sig; 684 sf.sf_si.si_code = code; 685 sf.sf_si.si_addr = (void *)regs->tf_err; 686 } else { 687 /* Old FreeBSD-style arguments. */ 688 sf.sf_siginfo = code; 689 sf.sf_addr = regs->tf_err; 690 sf.sf_ahu.sf_handler = catcher; 691 } 692 PROC_UNLOCK(p); 693 694 /* 695 * If we're a vm86 process, we want to save the segment registers. 696 * We also change eflags to be our emulated eflags, not the actual 697 * eflags. 698 */ 699 if (regs->tf_eflags & PSL_VM) { 700 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 701 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 702 703 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 704 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 705 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 706 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 707 708 if (vm86->vm86_has_vme == 0) 709 sf.sf_uc.uc_mcontext.mc_eflags = 710 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 711 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 712 713 /* 714 * We should never have PSL_T set when returning from vm86 715 * mode. It may be set here if we deliver a signal before 716 * getting to vm86 mode, so turn it off. 717 * 718 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 719 * syscalls made by the signal handler. This just avoids 720 * wasting time for our lazy fixup of such faults. PSL_NT 721 * does nothing in vm86 mode, but vm86 programs can set it 722 * almost legitimately in probes for old cpu types. 723 */ 724 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP); 725 } 726 727 /* Copy the sigframe out to the user's stack. */ 728 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { 729 /* 730 * Something is wrong with the stack pointer. 731 * ...Kill the process. 732 */ 733 PROC_LOCK(p); 734 sigexit(p, SIGILL); 735 /* NOTREACHED */ 736 } 737 738 regs->tf_esp = (int)sfp; 739 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 740 regs->tf_cs = _ucodesel; 741 regs->tf_ds = _udatasel; 742 regs->tf_es = _udatasel; 743 regs->tf_fs = _udatasel; 744 regs->tf_ss = _udatasel; 745} 746 747/* 748 * System call to cleanup state after a signal 749 * has been taken. Reset signal mask and 750 * stack state from context left by sendsig (above). 751 * Return to previous pc and psl as specified by 752 * context left by sendsig. Check carefully to 753 * make sure that the user has not modified the 754 * state to gain improper privileges. 755 */ 756int 757osigreturn(p, uap) 758 struct proc *p; 759 struct osigreturn_args /* { 760 struct osigcontext *sigcntxp; 761 } */ *uap; 762{ 763 struct trapframe *regs; 764 struct osigcontext *scp; 765 int eflags; 766 767 regs = p->p_md.md_regs; 768 scp = uap->sigcntxp; 769 if (!useracc((caddr_t)scp, sizeof(*scp), VM_PROT_READ)) 770 return (EFAULT); 771 eflags = scp->sc_ps; 772 if (eflags & PSL_VM) { 773 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 774 struct vm86_kernel *vm86; 775 776 /* 777 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 778 * set up the vm86 area, and we can't enter vm86 mode. 779 */ 780 if (p->p_addr->u_pcb.pcb_ext == 0) 781 return (EINVAL); 782 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 783 if (vm86->vm86_inited == 0) 784 return (EINVAL); 785 786 /* Go back to user mode if both flags are set. */ 787 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 788 trapsignal(p, SIGBUS, 0); 789 790 if (vm86->vm86_has_vme) { 791 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 792 (eflags & VME_USERCHANGE) | PSL_VM; 793 } else { 794 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 795 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 796 } 797 tf->tf_vm86_ds = scp->sc_ds; 798 tf->tf_vm86_es = scp->sc_es; 799 tf->tf_vm86_fs = scp->sc_fs; 800 tf->tf_vm86_gs = scp->sc_gs; 801 tf->tf_ds = _udatasel; 802 tf->tf_es = _udatasel; 803 tf->tf_fs = _udatasel; 804 } else { 805 /* 806 * Don't allow users to change privileged or reserved flags. 807 */ 808 /* 809 * XXX do allow users to change the privileged flag PSL_RF. 810 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 811 * should sometimes set it there too. tf_eflags is kept in 812 * the signal context during signal handling and there is no 813 * other place to remember it, so the PSL_RF bit may be 814 * corrupted by the signal handler without us knowing. 815 * Corruption of the PSL_RF bit at worst causes one more or 816 * one less debugger trap, so allowing it is fairly harmless. 817 */ 818 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 819 return (EINVAL); 820 } 821 822 /* 823 * Don't allow users to load a valid privileged %cs. Let the 824 * hardware check for invalid selectors, excess privilege in 825 * other selectors, invalid %eip's and invalid %esp's. 826 */ 827 if (!CS_SECURE(scp->sc_cs)) { 828 trapsignal(p, SIGBUS, T_PROTFLT); 829 return (EINVAL); 830 } 831 regs->tf_ds = scp->sc_ds; 832 regs->tf_es = scp->sc_es; 833 regs->tf_fs = scp->sc_fs; 834 } 835 836 /* Restore remaining registers. */ 837 regs->tf_eax = scp->sc_eax; 838 regs->tf_ebx = scp->sc_ebx; 839 regs->tf_ecx = scp->sc_ecx; 840 regs->tf_edx = scp->sc_edx; 841 regs->tf_esi = scp->sc_esi; 842 regs->tf_edi = scp->sc_edi; 843 regs->tf_cs = scp->sc_cs; 844 regs->tf_ss = scp->sc_ss; 845 regs->tf_isp = scp->sc_isp; 846 847 PROC_LOCK(p); 848#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 849 if (scp->sc_onstack & 1) 850 p->p_sigstk.ss_flags |= SS_ONSTACK; 851 else 852 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 853#endif 854 855 SIGSETOLD(p->p_sigmask, scp->sc_mask); 856 SIG_CANTMASK(p->p_sigmask); 857 PROC_UNLOCK(p); 858 regs->tf_ebp = scp->sc_fp; 859 regs->tf_esp = scp->sc_sp; 860 regs->tf_eip = scp->sc_pc; 861 regs->tf_eflags = eflags; 862 return (EJUSTRETURN); 863} 864 865int 866sigreturn(p, uap) 867 struct proc *p; 868 struct sigreturn_args /* { 869 ucontext_t *sigcntxp; 870 } */ *uap; 871{ 872 struct trapframe *regs; 873 ucontext_t *ucp; 874 int cs, eflags; 875 876 ucp = uap->sigcntxp; 877 if (!useracc((caddr_t)ucp, sizeof(struct osigcontext), VM_PROT_READ)) 878 return (EFAULT); 879 if (((struct osigcontext *)ucp)->sc_trapno == 0x01d516) 880 return (osigreturn(p, (struct osigreturn_args *)uap)); 881 882 /* 883 * Since ucp is not an osigcontext but a ucontext_t, we have to 884 * check again if all of it is accessible. A ucontext_t is 885 * much larger, so instead of just checking for the pointer 886 * being valid for the size of an osigcontext, now check for 887 * it being valid for a whole, new-style ucontext_t. 888 */ 889 if (!useracc((caddr_t)ucp, sizeof(*ucp), VM_PROT_READ)) 890 return (EFAULT); 891 892 regs = p->p_md.md_regs; 893 eflags = ucp->uc_mcontext.mc_eflags; 894 if (eflags & PSL_VM) { 895 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 896 struct vm86_kernel *vm86; 897 898 /* 899 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 900 * set up the vm86 area, and we can't enter vm86 mode. 901 */ 902 if (p->p_addr->u_pcb.pcb_ext == 0) 903 return (EINVAL); 904 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 905 if (vm86->vm86_inited == 0) 906 return (EINVAL); 907 908 /* Go back to user mode if both flags are set. */ 909 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 910 trapsignal(p, SIGBUS, 0); 911 912 if (vm86->vm86_has_vme) { 913 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 914 (eflags & VME_USERCHANGE) | PSL_VM; 915 } else { 916 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 917 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 918 } 919 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe)); 920 tf->tf_eflags = eflags; 921 tf->tf_vm86_ds = tf->tf_ds; 922 tf->tf_vm86_es = tf->tf_es; 923 tf->tf_vm86_fs = tf->tf_fs; 924 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs; 925 tf->tf_ds = _udatasel; 926 tf->tf_es = _udatasel; 927 tf->tf_fs = _udatasel; 928 } else { 929 /* 930 * Don't allow users to change privileged or reserved flags. 931 */ 932 /* 933 * XXX do allow users to change the privileged flag PSL_RF. 934 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 935 * should sometimes set it there too. tf_eflags is kept in 936 * the signal context during signal handling and there is no 937 * other place to remember it, so the PSL_RF bit may be 938 * corrupted by the signal handler without us knowing. 939 * Corruption of the PSL_RF bit at worst causes one more or 940 * one less debugger trap, so allowing it is fairly harmless. 941 */ 942 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 943 printf("sigreturn: eflags = 0x%x\n", eflags); 944 return (EINVAL); 945 } 946 947 /* 948 * Don't allow users to load a valid privileged %cs. Let the 949 * hardware check for invalid selectors, excess privilege in 950 * other selectors, invalid %eip's and invalid %esp's. 951 */ 952 cs = ucp->uc_mcontext.mc_cs; 953 if (!CS_SECURE(cs)) { 954 printf("sigreturn: cs = 0x%x\n", cs); 955 trapsignal(p, SIGBUS, T_PROTFLT); 956 return (EINVAL); 957 } 958 959 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs)); 960 } 961 962 PROC_LOCK(p); 963#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 964 if (ucp->uc_mcontext.mc_onstack & 1) 965 p->p_sigstk.ss_flags |= SS_ONSTACK; 966 else 967 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 968#endif 969 970 p->p_sigmask = ucp->uc_sigmask; 971 SIG_CANTMASK(p->p_sigmask); 972 PROC_UNLOCK(p); 973 return (EJUSTRETURN); 974} 975 976/* 977 * Machine dependent boot() routine 978 * 979 * I haven't seen anything to put here yet 980 * Possibly some stuff might be grafted back here from boot() 981 */ 982void 983cpu_boot(int howto) 984{ 985} 986 987/* 988 * Shutdown the CPU as much as possible 989 */ 990void 991cpu_halt(void) 992{ 993 for (;;) 994 __asm__ ("hlt"); 995} 996 997/* 998 * Hook to idle the CPU when possible. This currently only works in 999 * the !SMP case, as there is no clean way to ensure that a CPU will be 1000 * woken when there is work available for it. 1001 */ 1002static int cpu_idle_hlt = 1; 1003SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, 1004 &cpu_idle_hlt, 0, "Idle loop HLT enable"); 1005 1006/* 1007 * Note that we have to be careful here to avoid a race between checking 1008 * procrunnable() and actually halting. If we don't do this, we may waste 1009 * the time between calling hlt and the next interrupt even though there 1010 * is a runnable process. 1011 */ 1012void 1013cpu_idle(void) 1014{ 1015#ifndef SMP 1016 if (cpu_idle_hlt) { 1017 disable_intr(); 1018 if (procrunnable()) 1019 enable_intr(); 1020 else { 1021 enable_intr(); 1022 __asm __volatile("hlt"); 1023 } 1024 } 1025#endif 1026} 1027 1028/* 1029 * Clear registers on exec 1030 */ 1031void 1032setregs(p, entry, stack, ps_strings) 1033 struct proc *p; 1034 u_long entry; 1035 u_long stack; 1036 u_long ps_strings; 1037{ 1038 struct trapframe *regs = p->p_md.md_regs; 1039 struct pcb *pcb = &p->p_addr->u_pcb; 1040 1041 if (pcb->pcb_ldt) 1042 user_ldt_free(pcb); 1043 1044 bzero((char *)regs, sizeof(struct trapframe)); 1045 regs->tf_eip = entry; 1046 regs->tf_esp = stack; 1047 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T); 1048 regs->tf_ss = _udatasel; 1049 regs->tf_ds = _udatasel; 1050 regs->tf_es = _udatasel; 1051 regs->tf_fs = _udatasel; 1052 regs->tf_cs = _ucodesel; 1053 1054 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */ 1055 regs->tf_ebx = ps_strings; 1056 1057 /* reset %gs as well */ 1058 if (pcb == PCPU_GET(curpcb)) 1059 load_gs(_udatasel); 1060 else 1061 pcb->pcb_gs = _udatasel; 1062 1063 /* 1064 * Reset the hardware debug registers if they were in use. 1065 * They won't have any meaning for the newly exec'd process. 1066 */ 1067 if (pcb->pcb_flags & PCB_DBREGS) { 1068 pcb->pcb_dr0 = 0; 1069 pcb->pcb_dr1 = 0; 1070 pcb->pcb_dr2 = 0; 1071 pcb->pcb_dr3 = 0; 1072 pcb->pcb_dr6 = 0; 1073 pcb->pcb_dr7 = 0; 1074 if (pcb == PCPU_GET(curpcb)) { 1075 /* 1076 * Clear the debug registers on the running 1077 * CPU, otherwise they will end up affecting 1078 * the next process we switch to. 1079 */ 1080 reset_dbregs(); 1081 } 1082 pcb->pcb_flags &= ~PCB_DBREGS; 1083 } 1084 1085 /* 1086 * Initialize the math emulator (if any) for the current process. 1087 * Actually, just clear the bit that says that the emulator has 1088 * been initialized. Initialization is delayed until the process 1089 * traps to the emulator (if it is done at all) mainly because 1090 * emulators don't provide an entry point for initialization. 1091 */ 1092 p->p_addr->u_pcb.pcb_flags &= ~FP_SOFTFP; 1093 1094 /* 1095 * Arrange to trap the next npx or `fwait' instruction (see npx.c 1096 * for why fwait must be trapped at least if there is an npx or an 1097 * emulator). This is mainly to handle the case where npx0 is not 1098 * configured, since the npx routines normally set up the trap 1099 * otherwise. It should be done only at boot time, but doing it 1100 * here allows modifying `npx_exists' for testing the emulator on 1101 * systems with an npx. 1102 */ 1103 load_cr0(rcr0() | CR0_MP | CR0_TS); 1104 1105#ifdef DEV_NPX 1106 /* Initialize the npx (if any) for the current process. */ 1107 npxinit(__INITIAL_NPXCW__); 1108#endif 1109 1110 /* 1111 * XXX - Linux emulator 1112 * Make sure sure edx is 0x0 on entry. Linux binaries depend 1113 * on it. 1114 */ 1115 p->p_retval[1] = 0; 1116} 1117 1118void 1119cpu_setregs(void) 1120{ 1121 unsigned int cr0; 1122 1123 cr0 = rcr0(); 1124 cr0 |= CR0_NE; /* Done by npxinit() */ 1125 cr0 |= CR0_MP | CR0_TS; /* Done at every execve() too. */ 1126#ifndef I386_CPU 1127 cr0 |= CR0_WP | CR0_AM; 1128#endif 1129 load_cr0(cr0); 1130 load_gs(_udatasel); 1131} 1132 1133static int 1134sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS) 1135{ 1136 int error; 1137 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 1138 req); 1139 if (!error && req->newptr) 1140 resettodr(); 1141 return (error); 1142} 1143 1144SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 1145 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 1146 1147SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, 1148 CTLFLAG_RW, &disable_rtc_set, 0, ""); 1149 1150SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo, 1151 CTLFLAG_RD, &bootinfo, bootinfo, ""); 1152 1153SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, 1154 CTLFLAG_RW, &wall_cmos_clock, 0, ""); 1155 1156/* 1157 * Initialize 386 and configure to run kernel 1158 */ 1159 1160/* 1161 * Initialize segments & interrupt table 1162 */ 1163 1164int _default_ldt; 1165union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */ 1166static struct gate_descriptor idt0[NIDT]; 1167struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 1168union descriptor ldt[NLDT]; /* local descriptor table */ 1169#ifdef SMP 1170/* table descriptors - used to load tables by microp */ 1171struct region_descriptor r_gdt, r_idt; 1172#endif 1173 1174int private_tss; /* flag indicating private tss */ 1175 1176#if defined(I586_CPU) && !defined(NO_F00F_HACK) 1177extern int has_f00f_bug; 1178#endif 1179 1180static struct i386tss dblfault_tss; 1181static char dblfault_stack[PAGE_SIZE]; 1182 1183extern struct user *proc0paddr; 1184 1185 1186/* software prototypes -- in more palatable form */ 1187struct soft_segment_descriptor gdt_segs[] = { 1188/* GNULL_SEL 0 Null Descriptor */ 1189{ 0x0, /* segment base address */ 1190 0x0, /* length */ 1191 0, /* segment type */ 1192 0, /* segment descriptor priority level */ 1193 0, /* segment descriptor present */ 1194 0, 0, 1195 0, /* default 32 vs 16 bit size */ 1196 0 /* limit granularity (byte/page units)*/ }, 1197/* GCODE_SEL 1 Code Descriptor for kernel */ 1198{ 0x0, /* segment base address */ 1199 0xfffff, /* length - all address space */ 1200 SDT_MEMERA, /* segment type */ 1201 0, /* segment descriptor priority level */ 1202 1, /* segment descriptor present */ 1203 0, 0, 1204 1, /* default 32 vs 16 bit size */ 1205 1 /* limit granularity (byte/page units)*/ }, 1206/* GDATA_SEL 2 Data Descriptor for kernel */ 1207{ 0x0, /* segment base address */ 1208 0xfffff, /* length - all address space */ 1209 SDT_MEMRWA, /* segment type */ 1210 0, /* segment descriptor priority level */ 1211 1, /* segment descriptor present */ 1212 0, 0, 1213 1, /* default 32 vs 16 bit size */ 1214 1 /* limit granularity (byte/page units)*/ }, 1215/* GPRIV_SEL 3 SMP Per-Processor Private Data Descriptor */ 1216{ 0x0, /* segment base address */ 1217 0xfffff, /* length - all address space */ 1218 SDT_MEMRWA, /* segment type */ 1219 0, /* segment descriptor priority level */ 1220 1, /* segment descriptor present */ 1221 0, 0, 1222 1, /* default 32 vs 16 bit size */ 1223 1 /* limit granularity (byte/page units)*/ }, 1224/* GPROC0_SEL 4 Proc 0 Tss Descriptor */ 1225{ 1226 0x0, /* segment base address */ 1227 sizeof(struct i386tss)-1,/* length - all address space */ 1228 SDT_SYS386TSS, /* segment type */ 1229 0, /* segment descriptor priority level */ 1230 1, /* segment descriptor present */ 1231 0, 0, 1232 0, /* unused - default 32 vs 16 bit size */ 1233 0 /* limit granularity (byte/page units)*/ }, 1234/* GLDT_SEL 5 LDT Descriptor */ 1235{ (int) ldt, /* segment base address */ 1236 sizeof(ldt)-1, /* length - all address space */ 1237 SDT_SYSLDT, /* segment type */ 1238 SEL_UPL, /* segment descriptor priority level */ 1239 1, /* segment descriptor present */ 1240 0, 0, 1241 0, /* unused - default 32 vs 16 bit size */ 1242 0 /* limit granularity (byte/page units)*/ }, 1243/* GUSERLDT_SEL 6 User LDT Descriptor per process */ 1244{ (int) ldt, /* segment base address */ 1245 (512 * sizeof(union descriptor)-1), /* length */ 1246 SDT_SYSLDT, /* segment type */ 1247 0, /* segment descriptor priority level */ 1248 1, /* segment descriptor present */ 1249 0, 0, 1250 0, /* unused - default 32 vs 16 bit size */ 1251 0 /* limit granularity (byte/page units)*/ }, 1252/* GTGATE_SEL 7 Null Descriptor - Placeholder */ 1253{ 0x0, /* segment base address */ 1254 0x0, /* length - all address space */ 1255 0, /* segment type */ 1256 0, /* segment descriptor priority level */ 1257 0, /* segment descriptor present */ 1258 0, 0, 1259 0, /* default 32 vs 16 bit size */ 1260 0 /* limit granularity (byte/page units)*/ }, 1261/* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */ 1262{ 0x400, /* segment base address */ 1263 0xfffff, /* length */ 1264 SDT_MEMRWA, /* segment type */ 1265 0, /* segment descriptor priority level */ 1266 1, /* segment descriptor present */ 1267 0, 0, 1268 1, /* default 32 vs 16 bit size */ 1269 1 /* limit granularity (byte/page units)*/ }, 1270/* GPANIC_SEL 9 Panic Tss Descriptor */ 1271{ (int) &dblfault_tss, /* segment base address */ 1272 sizeof(struct i386tss)-1,/* length - all address space */ 1273 SDT_SYS386TSS, /* segment type */ 1274 0, /* segment descriptor priority level */ 1275 1, /* segment descriptor present */ 1276 0, 0, 1277 0, /* unused - default 32 vs 16 bit size */ 1278 0 /* limit granularity (byte/page units)*/ }, 1279/* GBIOSCODE32_SEL 10 BIOS 32-bit interface (32bit Code) */ 1280{ 0, /* segment base address (overwritten) */ 1281 0xfffff, /* length */ 1282 SDT_MEMERA, /* segment type */ 1283 0, /* segment descriptor priority level */ 1284 1, /* segment descriptor present */ 1285 0, 0, 1286 0, /* default 32 vs 16 bit size */ 1287 1 /* limit granularity (byte/page units)*/ }, 1288/* GBIOSCODE16_SEL 11 BIOS 32-bit interface (16bit Code) */ 1289{ 0, /* segment base address (overwritten) */ 1290 0xfffff, /* length */ 1291 SDT_MEMERA, /* segment type */ 1292 0, /* segment descriptor priority level */ 1293 1, /* segment descriptor present */ 1294 0, 0, 1295 0, /* default 32 vs 16 bit size */ 1296 1 /* limit granularity (byte/page units)*/ }, 1297/* GBIOSDATA_SEL 12 BIOS 32-bit interface (Data) */ 1298{ 0, /* segment base address (overwritten) */ 1299 0xfffff, /* length */ 1300 SDT_MEMRWA, /* segment type */ 1301 0, /* segment descriptor priority level */ 1302 1, /* segment descriptor present */ 1303 0, 0, 1304 1, /* default 32 vs 16 bit size */ 1305 1 /* limit granularity (byte/page units)*/ }, 1306/* GBIOSUTIL_SEL 13 BIOS 16-bit interface (Utility) */ 1307{ 0, /* segment base address (overwritten) */ 1308 0xfffff, /* length */ 1309 SDT_MEMRWA, /* segment type */ 1310 0, /* segment descriptor priority level */ 1311 1, /* segment descriptor present */ 1312 0, 0, 1313 0, /* default 32 vs 16 bit size */ 1314 1 /* limit granularity (byte/page units)*/ }, 1315/* GBIOSARGS_SEL 14 BIOS 16-bit interface (Arguments) */ 1316{ 0, /* segment base address (overwritten) */ 1317 0xfffff, /* length */ 1318 SDT_MEMRWA, /* segment type */ 1319 0, /* segment descriptor priority level */ 1320 1, /* segment descriptor present */ 1321 0, 0, 1322 0, /* default 32 vs 16 bit size */ 1323 1 /* limit granularity (byte/page units)*/ }, 1324}; 1325 1326static struct soft_segment_descriptor ldt_segs[] = { 1327 /* Null Descriptor - overwritten by call gate */ 1328{ 0x0, /* segment base address */ 1329 0x0, /* length - all address space */ 1330 0, /* segment type */ 1331 0, /* segment descriptor priority level */ 1332 0, /* segment descriptor present */ 1333 0, 0, 1334 0, /* default 32 vs 16 bit size */ 1335 0 /* limit granularity (byte/page units)*/ }, 1336 /* Null Descriptor - overwritten by call gate */ 1337{ 0x0, /* segment base address */ 1338 0x0, /* length - all address space */ 1339 0, /* segment type */ 1340 0, /* segment descriptor priority level */ 1341 0, /* segment descriptor present */ 1342 0, 0, 1343 0, /* default 32 vs 16 bit size */ 1344 0 /* limit granularity (byte/page units)*/ }, 1345 /* Null Descriptor - overwritten by call gate */ 1346{ 0x0, /* segment base address */ 1347 0x0, /* length - all address space */ 1348 0, /* segment type */ 1349 0, /* segment descriptor priority level */ 1350 0, /* segment descriptor present */ 1351 0, 0, 1352 0, /* default 32 vs 16 bit size */ 1353 0 /* limit granularity (byte/page units)*/ }, 1354 /* Code Descriptor for user */ 1355{ 0x0, /* segment base address */ 1356 0xfffff, /* length - all address space */ 1357 SDT_MEMERA, /* segment type */ 1358 SEL_UPL, /* segment descriptor priority level */ 1359 1, /* segment descriptor present */ 1360 0, 0, 1361 1, /* default 32 vs 16 bit size */ 1362 1 /* limit granularity (byte/page units)*/ }, 1363 /* Null Descriptor - overwritten by call gate */ 1364{ 0x0, /* segment base address */ 1365 0x0, /* length - all address space */ 1366 0, /* segment type */ 1367 0, /* segment descriptor priority level */ 1368 0, /* segment descriptor present */ 1369 0, 0, 1370 0, /* default 32 vs 16 bit size */ 1371 0 /* limit granularity (byte/page units)*/ }, 1372 /* Data Descriptor for user */ 1373{ 0x0, /* segment base address */ 1374 0xfffff, /* length - all address space */ 1375 SDT_MEMRWA, /* segment type */ 1376 SEL_UPL, /* segment descriptor priority level */ 1377 1, /* segment descriptor present */ 1378 0, 0, 1379 1, /* default 32 vs 16 bit size */ 1380 1 /* limit granularity (byte/page units)*/ }, 1381}; 1382 1383void 1384setidt(idx, func, typ, dpl, selec) 1385 int idx; 1386 inthand_t *func; 1387 int typ; 1388 int dpl; 1389 int selec; 1390{ 1391 struct gate_descriptor *ip; 1392 1393 ip = idt + idx; 1394 ip->gd_looffset = (int)func; 1395 ip->gd_selector = selec; 1396 ip->gd_stkcpy = 0; 1397 ip->gd_xx = 0; 1398 ip->gd_type = typ; 1399 ip->gd_dpl = dpl; 1400 ip->gd_p = 1; 1401 ip->gd_hioffset = ((int)func)>>16 ; 1402} 1403 1404#define IDTVEC(name) __CONCAT(X,name) 1405 1406extern inthand_t 1407 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 1408 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 1409 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 1410 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 1411 IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall); 1412 1413void 1414sdtossd(sd, ssd) 1415 struct segment_descriptor *sd; 1416 struct soft_segment_descriptor *ssd; 1417{ 1418 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 1419 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 1420 ssd->ssd_type = sd->sd_type; 1421 ssd->ssd_dpl = sd->sd_dpl; 1422 ssd->ssd_p = sd->sd_p; 1423 ssd->ssd_def32 = sd->sd_def32; 1424 ssd->ssd_gran = sd->sd_gran; 1425} 1426 1427#define PHYSMAP_SIZE (2 * 8) 1428 1429/* 1430 * Populate the (physmap) array with base/bound pairs describing the 1431 * available physical memory in the system, then test this memory and 1432 * build the phys_avail array describing the actually-available memory. 1433 * 1434 * If we cannot accurately determine the physical memory map, then use 1435 * value from the 0xE801 call, and failing that, the RTC. 1436 * 1437 * Total memory size may be set by the kernel environment variable 1438 * hw.physmem or the compile-time define MAXMEM. 1439 */ 1440static void 1441getmemsize(int first) 1442{ 1443 int i, physmap_idx, pa_indx; 1444 u_int basemem, extmem; 1445 struct vm86frame vmf; 1446 struct vm86context vmc; 1447 vm_offset_t pa, physmap[PHYSMAP_SIZE]; 1448 pt_entry_t pte; 1449 const char *cp; 1450 struct bios_smap *smap; 1451 1452 bzero(&vmf, sizeof(struct vm86frame)); 1453 bzero(physmap, sizeof(physmap)); 1454 1455 /* 1456 * Perform "base memory" related probes & setup 1457 */ 1458 vm86_intcall(0x12, &vmf); 1459 basemem = vmf.vmf_ax; 1460 if (basemem > 640) { 1461 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", 1462 basemem); 1463 basemem = 640; 1464 } 1465 1466 /* 1467 * XXX if biosbasemem is now < 640, there is a `hole' 1468 * between the end of base memory and the start of 1469 * ISA memory. The hole may be empty or it may 1470 * contain BIOS code or data. Map it read/write so 1471 * that the BIOS can write to it. (Memory from 0 to 1472 * the physical end of the kernel is mapped read-only 1473 * to begin with and then parts of it are remapped. 1474 * The parts that aren't remapped form holes that 1475 * remain read-only and are unused by the kernel. 1476 * The base memory area is below the physical end of 1477 * the kernel and right now forms a read-only hole. 1478 * The part of it from PAGE_SIZE to 1479 * (trunc_page(biosbasemem * 1024) - 1) will be 1480 * remapped and used by the kernel later.) 1481 * 1482 * This code is similar to the code used in 1483 * pmap_mapdev, but since no memory needs to be 1484 * allocated we simply change the mapping. 1485 */ 1486 for (pa = trunc_page(basemem * 1024); 1487 pa < ISA_HOLE_START; pa += PAGE_SIZE) { 1488 pte = (pt_entry_t)vtopte(pa + KERNBASE); 1489 *pte = pa | PG_RW | PG_V; 1490 } 1491 1492 /* 1493 * if basemem != 640, map pages r/w into vm86 page table so 1494 * that the bios can scribble on it. 1495 */ 1496 pte = (pt_entry_t)vm86paddr; 1497 for (i = basemem / 4; i < 160; i++) 1498 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; 1499 1500 /* 1501 * map page 1 R/W into the kernel page table so we can use it 1502 * as a buffer. The kernel will unmap this page later. 1503 */ 1504 pte = (pt_entry_t)vtopte(KERNBASE + (1 << PAGE_SHIFT)); 1505 *pte = (1 << PAGE_SHIFT) | PG_RW | PG_V; 1506 1507 /* 1508 * get memory map with INT 15:E820 1509 */ 1510 vmc.npages = 0; 1511 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT)); 1512 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di); 1513 1514 physmap_idx = 0; 1515 vmf.vmf_ebx = 0; 1516 do { 1517 vmf.vmf_eax = 0xE820; 1518 vmf.vmf_edx = SMAP_SIG; 1519 vmf.vmf_ecx = sizeof(struct bios_smap); 1520 i = vm86_datacall(0x15, &vmf, &vmc); 1521 if (i || vmf.vmf_eax != SMAP_SIG) 1522 break; 1523 if (boothowto & RB_VERBOSE) 1524 printf("SMAP type=%02x base=%08x %08x len=%08x %08x\n", 1525 smap->type, 1526 *(u_int32_t *)((char *)&smap->base + 4), 1527 (u_int32_t)smap->base, 1528 *(u_int32_t *)((char *)&smap->length + 4), 1529 (u_int32_t)smap->length); 1530 1531 if (smap->type != 0x01) 1532 goto next_run; 1533 1534 if (smap->length == 0) 1535 goto next_run; 1536 1537 if (smap->base >= 0xffffffff) { 1538 printf("%uK of memory above 4GB ignored\n", 1539 (u_int)(smap->length / 1024)); 1540 goto next_run; 1541 } 1542 1543 for (i = 0; i <= physmap_idx; i += 2) { 1544 if (smap->base < physmap[i + 1]) { 1545 if (boothowto & RB_VERBOSE) 1546 printf( 1547 "Overlapping or non-montonic memory region, ignoring second region\n"); 1548 goto next_run; 1549 } 1550 } 1551 1552 if (smap->base == physmap[physmap_idx + 1]) { 1553 physmap[physmap_idx + 1] += smap->length; 1554 goto next_run; 1555 } 1556 1557 physmap_idx += 2; 1558 if (physmap_idx == PHYSMAP_SIZE) { 1559 printf( 1560 "Too many segments in the physical address map, giving up\n"); 1561 break; 1562 } 1563 physmap[physmap_idx] = smap->base; 1564 physmap[physmap_idx + 1] = smap->base + smap->length; 1565next_run: 1566 } while (vmf.vmf_ebx != 0); 1567 1568 if (physmap[1] != 0) 1569 goto physmap_done; 1570 1571 /* 1572 * If we failed above, try memory map with INT 15:E801 1573 */ 1574 vmf.vmf_ax = 0xE801; 1575 if (vm86_intcall(0x15, &vmf) == 0) { 1576 extmem = vmf.vmf_cx + vmf.vmf_dx * 64; 1577 } else { 1578#if 0 1579 vmf.vmf_ah = 0x88; 1580 vm86_intcall(0x15, &vmf); 1581 extmem = vmf.vmf_ax; 1582#else 1583 /* 1584 * Prefer the RTC value for extended memory. 1585 */ 1586 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8); 1587#endif 1588 } 1589 1590 /* 1591 * Special hack for chipsets that still remap the 384k hole when 1592 * there's 16MB of memory - this really confuses people that 1593 * are trying to use bus mastering ISA controllers with the 1594 * "16MB limit"; they only have 16MB, but the remapping puts 1595 * them beyond the limit. 1596 * 1597 * If extended memory is between 15-16MB (16-17MB phys address range), 1598 * chop it to 15MB. 1599 */ 1600 if ((extmem > 15 * 1024) && (extmem < 16 * 1024)) 1601 extmem = 15 * 1024; 1602 1603 physmap[0] = 0; 1604 physmap[1] = basemem * 1024; 1605 physmap_idx = 2; 1606 physmap[physmap_idx] = 0x100000; 1607 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024; 1608 1609physmap_done: 1610 /* 1611 * Now, physmap contains a map of physical memory. 1612 */ 1613 1614#ifdef SMP 1615 /* make hole for AP bootstrap code */ 1616 physmap[1] = mp_bootaddress(physmap[1] / 1024); 1617 1618 /* look for the MP hardware - needed for apic addresses */ 1619 i386_mp_probe(); 1620#endif 1621 1622 /* 1623 * Maxmem isn't the "maximum memory", it's one larger than the 1624 * highest page of the physical address space. It should be 1625 * called something like "Maxphyspage". We may adjust this 1626 * based on ``hw.physmem'' and the results of the memory test. 1627 */ 1628 Maxmem = atop(physmap[physmap_idx + 1]); 1629 1630#ifdef MAXMEM 1631 Maxmem = MAXMEM / 4; 1632#endif 1633 1634 /* 1635 * hw.maxmem is a size in bytes; we also allow k, m, and g suffixes 1636 * for the appropriate modifiers. This overrides MAXMEM. 1637 */ 1638 if ((cp = getenv("hw.physmem")) != NULL) { 1639 u_int64_t AllowMem, sanity; 1640 char *ep; 1641 1642 sanity = AllowMem = strtouq(cp, &ep, 0); 1643 if ((ep != cp) && (*ep != 0)) { 1644 switch(*ep) { 1645 case 'g': 1646 case 'G': 1647 AllowMem <<= 10; 1648 case 'm': 1649 case 'M': 1650 AllowMem <<= 10; 1651 case 'k': 1652 case 'K': 1653 AllowMem <<= 10; 1654 break; 1655 default: 1656 AllowMem = sanity = 0; 1657 } 1658 if (AllowMem < sanity) 1659 AllowMem = 0; 1660 } 1661 if (AllowMem == 0) 1662 printf("Ignoring invalid memory size of '%s'\n", cp); 1663 else 1664 Maxmem = atop(AllowMem); 1665 } 1666 1667 if (atop(physmap[physmap_idx + 1]) != Maxmem && 1668 (boothowto & RB_VERBOSE)) 1669 printf("Physical memory use set to %uK\n", Maxmem * 4); 1670 1671 /* 1672 * If Maxmem has been increased beyond what the system has detected, 1673 * extend the last memory segment to the new limit. 1674 */ 1675 if (atop(physmap[physmap_idx + 1]) < Maxmem) 1676 physmap[physmap_idx + 1] = ptoa(Maxmem); 1677 1678 /* call pmap initialization to make new kernel address space */ 1679 pmap_bootstrap(first, 0); 1680 1681 /* 1682 * Size up each available chunk of physical memory. 1683 */ 1684 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 1685 pa_indx = 0; 1686 phys_avail[pa_indx++] = physmap[0]; 1687 phys_avail[pa_indx] = physmap[0]; 1688#if 0 1689 pte = (pt_entry_t)vtopte(KERNBASE); 1690#else 1691 pte = (pt_entry_t)CMAP1; 1692#endif 1693 1694 /* 1695 * physmap is in bytes, so when converting to page boundaries, 1696 * round up the start address and round down the end address. 1697 */ 1698 for (i = 0; i <= physmap_idx; i += 2) { 1699 vm_offset_t end; 1700 1701 end = ptoa(Maxmem); 1702 if (physmap[i + 1] < end) 1703 end = trunc_page(physmap[i + 1]); 1704 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 1705 int tmp, page_bad; 1706#if 0 1707 int *ptr = 0; 1708#else 1709 int *ptr = (int *)CADDR1; 1710#endif 1711 1712 /* 1713 * block out kernel memory as not available. 1714 */ 1715 if (pa >= 0x100000 && pa < first) 1716 continue; 1717 1718 page_bad = FALSE; 1719 1720 /* 1721 * map page into kernel: valid, read/write,non-cacheable 1722 */ 1723 *pte = pa | PG_V | PG_RW | PG_N; 1724 invltlb(); 1725 1726 tmp = *(int *)ptr; 1727 /* 1728 * Test for alternating 1's and 0's 1729 */ 1730 *(volatile int *)ptr = 0xaaaaaaaa; 1731 if (*(volatile int *)ptr != 0xaaaaaaaa) { 1732 page_bad = TRUE; 1733 } 1734 /* 1735 * Test for alternating 0's and 1's 1736 */ 1737 *(volatile int *)ptr = 0x55555555; 1738 if (*(volatile int *)ptr != 0x55555555) { 1739 page_bad = TRUE; 1740 } 1741 /* 1742 * Test for all 1's 1743 */ 1744 *(volatile int *)ptr = 0xffffffff; 1745 if (*(volatile int *)ptr != 0xffffffff) { 1746 page_bad = TRUE; 1747 } 1748 /* 1749 * Test for all 0's 1750 */ 1751 *(volatile int *)ptr = 0x0; 1752 if (*(volatile int *)ptr != 0x0) { 1753 page_bad = TRUE; 1754 } 1755 /* 1756 * Restore original value. 1757 */ 1758 *(int *)ptr = tmp; 1759 1760 /* 1761 * Adjust array of valid/good pages. 1762 */ 1763 if (page_bad == TRUE) { 1764 continue; 1765 } 1766 /* 1767 * If this good page is a continuation of the 1768 * previous set of good pages, then just increase 1769 * the end pointer. Otherwise start a new chunk. 1770 * Note that "end" points one higher than end, 1771 * making the range >= start and < end. 1772 * If we're also doing a speculative memory 1773 * test and we at or past the end, bump up Maxmem 1774 * so that we keep going. The first bad page 1775 * will terminate the loop. 1776 */ 1777 if (phys_avail[pa_indx] == pa) { 1778 phys_avail[pa_indx] += PAGE_SIZE; 1779 } else { 1780 pa_indx++; 1781 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1782 printf("Too many holes in the physical address space, giving up\n"); 1783 pa_indx--; 1784 break; 1785 } 1786 phys_avail[pa_indx++] = pa; /* start */ 1787 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1788 } 1789 physmem++; 1790 } 1791 } 1792 *pte = 0; 1793 invltlb(); 1794 1795 /* 1796 * XXX 1797 * The last chunk must contain at least one page plus the message 1798 * buffer to avoid complicating other code (message buffer address 1799 * calculation, etc.). 1800 */ 1801 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1802 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { 1803 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1804 phys_avail[pa_indx--] = 0; 1805 phys_avail[pa_indx--] = 0; 1806 } 1807 1808 Maxmem = atop(phys_avail[pa_indx]); 1809 1810 /* Trim off space for the message buffer. */ 1811 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); 1812 1813 avail_end = phys_avail[pa_indx]; 1814} 1815 1816void 1817init386(first) 1818 int first; 1819{ 1820 int x; 1821 struct gate_descriptor *gdp; 1822 int gsel_tss; 1823#ifndef SMP 1824 /* table descriptors - used to load tables by microp */ 1825 struct region_descriptor r_gdt, r_idt; 1826#endif 1827 int off; 1828 1829 proc0.p_addr = proc0paddr; 1830 1831 atdevbase = ISA_HOLE_START + KERNBASE; 1832 1833 if (bootinfo.bi_modulep) { 1834 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE; 1835 preload_bootstrap_relocate(KERNBASE); 1836 } else { 1837 printf("WARNING: loader(8) metadata is missing!\n"); 1838 } 1839 if (bootinfo.bi_envp) 1840 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE; 1841 1842 /* 1843 * make gdt memory segments, the code segment goes up to end of the 1844 * page with etext in it, the data segment goes to the end of 1845 * the address space 1846 */ 1847 /* 1848 * XXX text protection is temporarily (?) disabled. The limit was 1849 * i386_btop(round_page(etext)) - 1. 1850 */ 1851 gdt_segs[GCODE_SEL].ssd_limit = i386_btop(0) - 1; 1852 gdt_segs[GDATA_SEL].ssd_limit = i386_btop(0) - 1; 1853#ifdef SMP 1854 gdt_segs[GPRIV_SEL].ssd_limit = 1855 i386_btop(sizeof(struct privatespace)) - 1; 1856 gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[0]; 1857 gdt_segs[GPROC0_SEL].ssd_base = 1858 (int) &SMP_prvspace[0].globaldata.gd_common_tss; 1859 SMP_prvspace[0].globaldata.gd_prvspace = &SMP_prvspace[0].globaldata; 1860#else 1861 gdt_segs[GPRIV_SEL].ssd_limit = 1862 i386_btop(sizeof(struct globaldata)) - 1; 1863 gdt_segs[GPRIV_SEL].ssd_base = (int) &__globaldata; 1864 gdt_segs[GPROC0_SEL].ssd_base = 1865 (int) &__globaldata.gd_common_tss; 1866 __globaldata.gd_prvspace = &__globaldata; 1867#endif 1868 1869 for (x = 0; x < NGDT; x++) { 1870#ifdef BDE_DEBUGGER 1871 /* avoid overwriting db entries with APM ones */ 1872 if (x >= GAPMCODE32_SEL && x <= GAPMDATA_SEL) 1873 continue; 1874#endif 1875 ssdtosd(&gdt_segs[x], &gdt[x].sd); 1876 } 1877 1878 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1879 r_gdt.rd_base = (int) gdt; 1880 lgdt(&r_gdt); 1881 1882 /* setup curproc so that mutexes work */ 1883 PCPU_SET(curproc, &proc0); 1884 PCPU_SET(spinlocks, NULL); 1885 1886 LIST_INIT(&proc0.p_contested); 1887 1888 /* 1889 * Initialize mutexes. 1890 */ 1891 mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE); 1892 mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE); 1893 mtx_init(&proc0.p_mtx, "process lock", MTX_DEF); 1894 mtx_init(&clock_lock, "clk", MTX_SPIN | MTX_RECURSE); 1895#ifdef SMP 1896 mtx_init(&imen_mtx, "imen", MTX_SPIN); 1897#endif 1898 mtx_lock(&Giant); 1899 1900 /* make ldt memory segments */ 1901 /* 1902 * The data segment limit must not cover the user area because we 1903 * don't want the user area to be writable in copyout() etc. (page 1904 * level protection is lost in kernel mode on 386's). Also, we 1905 * don't want the user area to be writable directly (page level 1906 * protection of the user area is not available on 486's with 1907 * CR0_WP set, because there is no user-read/kernel-write mode). 1908 * 1909 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it 1910 * should be spelled ...MAX_USER... 1911 */ 1912#define VM_END_USER_RW_ADDRESS VM_MAXUSER_ADDRESS 1913 /* 1914 * The code segment limit has to cover the user area until we move 1915 * the signal trampoline out of the user area. This is safe because 1916 * the code segment cannot be written to directly. 1917 */ 1918#define VM_END_USER_R_ADDRESS (VM_END_USER_RW_ADDRESS + UPAGES * PAGE_SIZE) 1919 ldt_segs[LUCODE_SEL].ssd_limit = i386_btop(VM_END_USER_R_ADDRESS) - 1; 1920 ldt_segs[LUDATA_SEL].ssd_limit = i386_btop(VM_END_USER_RW_ADDRESS) - 1; 1921 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++) 1922 ssdtosd(&ldt_segs[x], &ldt[x].sd); 1923 1924 _default_ldt = GSEL(GLDT_SEL, SEL_KPL); 1925 lldt(_default_ldt); 1926 PCPU_SET(currentldt, _default_ldt); 1927 1928 /* exceptions */ 1929 for (x = 0; x < NIDT; x++) 1930 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1931 setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1932 setidt(1, &IDTVEC(dbg), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1933 setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1934 setidt(3, &IDTVEC(bpt), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1935 setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1936 setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1937 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1938 setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1939 setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL)); 1940 setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1941 setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1942 setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1943 setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1944 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1945 setidt(14, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1946 setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1947 setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1948 setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1949 setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1950 setidt(0x80, &IDTVEC(int0x80_syscall), 1951 SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1952 1953 r_idt.rd_limit = sizeof(idt0) - 1; 1954 r_idt.rd_base = (int) idt; 1955 lidt(&r_idt); 1956 1957 /* 1958 * Initialize the console before we print anything out. 1959 */ 1960 cninit(); 1961 1962#ifdef DEV_ISA 1963 isa_defaultirq(); 1964#endif 1965 1966#ifdef DDB 1967 kdb_init(); 1968 if (boothowto & RB_KDB) 1969 Debugger("Boot flags requested debugger"); 1970#endif 1971 1972 finishidentcpu(); /* Final stage of CPU initialization */ 1973 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1974 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1975 initializecpu(); /* Initialize CPU registers */ 1976 1977 /* make an initial tss so cpu can get interrupt stack on syscall! */ 1978 PCPU_SET(common_tss.tss_esp0, 1979 (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16); 1980 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); 1981 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1982 private_tss = 0; 1983 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd); 1984 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 1985 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16); 1986 ltr(gsel_tss); 1987 1988 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = 1989 dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)]; 1990 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = 1991 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); 1992 dblfault_tss.tss_cr3 = (int)IdlePTD; 1993 dblfault_tss.tss_eip = (int) dblfault_handler; 1994 dblfault_tss.tss_eflags = PSL_KERNEL; 1995 dblfault_tss.tss_ds = dblfault_tss.tss_es = 1996 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); 1997 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL); 1998 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); 1999 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); 2000 2001 vm86_initialize(); 2002 getmemsize(first); 2003 2004 /* now running on new page tables, configured,and u/iom is accessible */ 2005 2006 /* Map the message buffer. */ 2007 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) 2008 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off); 2009 2010 msgbufinit(msgbufp, MSGBUF_SIZE); 2011 2012 /* make a call gate to reenter kernel with */ 2013 gdp = &ldt[LSYS5CALLS_SEL].gd; 2014 2015 x = (int) &IDTVEC(lcall_syscall); 2016 gdp->gd_looffset = x; 2017 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL); 2018 gdp->gd_stkcpy = 1; 2019 gdp->gd_type = SDT_SYS386CGT; 2020 gdp->gd_dpl = SEL_UPL; 2021 gdp->gd_p = 1; 2022 gdp->gd_hioffset = x >> 16; 2023 2024 /* XXX does this work? */ 2025 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL]; 2026 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL]; 2027 2028 /* transfer to user mode */ 2029 2030 _ucodesel = LSEL(LUCODE_SEL, SEL_UPL); 2031 _udatasel = LSEL(LUDATA_SEL, SEL_UPL); 2032 2033 /* setup proc 0's pcb */ 2034 proc0.p_addr->u_pcb.pcb_flags = 0; 2035 proc0.p_addr->u_pcb.pcb_cr3 = (int)IdlePTD; 2036 proc0.p_addr->u_pcb.pcb_ext = 0; 2037 proc0.p_md.md_regs = &proc0_tf; 2038} 2039 2040#if defined(I586_CPU) && !defined(NO_F00F_HACK) 2041static void f00f_hack(void *unused); 2042SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL); 2043 2044static void 2045f00f_hack(void *unused) { 2046 struct gate_descriptor *new_idt; 2047#ifndef SMP 2048 struct region_descriptor r_idt; 2049#endif 2050 vm_offset_t tmp; 2051 2052 if (!has_f00f_bug) 2053 return; 2054 2055 printf("Intel Pentium detected, installing workaround for F00F bug\n"); 2056 2057 r_idt.rd_limit = sizeof(idt0) - 1; 2058 2059 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2); 2060 if (tmp == 0) 2061 panic("kmem_alloc returned 0"); 2062 if (((unsigned int)tmp & (PAGE_SIZE-1)) != 0) 2063 panic("kmem_alloc returned non-page-aligned memory"); 2064 /* Put the first seven entries in the lower page */ 2065 new_idt = (struct gate_descriptor*)(tmp + PAGE_SIZE - (7*8)); 2066 bcopy(idt, new_idt, sizeof(idt0)); 2067 r_idt.rd_base = (int)new_idt; 2068 lidt(&r_idt); 2069 idt = new_idt; 2070 mtx_lock(&vm_mtx); 2071 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE, 2072 VM_PROT_READ, FALSE) != KERN_SUCCESS) 2073 panic("vm_map_protect failed"); 2074 mtx_unlock(&vm_mtx); 2075 return; 2076} 2077#endif /* defined(I586_CPU) && !NO_F00F_HACK */ 2078 2079int 2080ptrace_set_pc(p, addr) 2081 struct proc *p; 2082 unsigned long addr; 2083{ 2084 p->p_md.md_regs->tf_eip = addr; 2085 return (0); 2086} 2087 2088int 2089ptrace_single_step(p) 2090 struct proc *p; 2091{ 2092 p->p_md.md_regs->tf_eflags |= PSL_T; 2093 return (0); 2094} 2095 2096int ptrace_read_u_check(p, addr, len) 2097 struct proc *p; 2098 vm_offset_t addr; 2099 size_t len; 2100{ 2101 vm_offset_t gap; 2102 2103 if ((vm_offset_t) (addr + len) < addr) 2104 return EPERM; 2105 if ((vm_offset_t) (addr + len) <= sizeof(struct user)) 2106 return 0; 2107 2108 gap = (char *) p->p_md.md_regs - (char *) p->p_addr; 2109 2110 if ((vm_offset_t) addr < gap) 2111 return EPERM; 2112 if ((vm_offset_t) (addr + len) <= 2113 (vm_offset_t) (gap + sizeof(struct trapframe))) 2114 return 0; 2115 return EPERM; 2116} 2117 2118int ptrace_write_u(p, off, data) 2119 struct proc *p; 2120 vm_offset_t off; 2121 long data; 2122{ 2123 struct trapframe frame_copy; 2124 vm_offset_t min; 2125 struct trapframe *tp; 2126 2127 /* 2128 * Privileged kernel state is scattered all over the user area. 2129 * Only allow write access to parts of regs and to fpregs. 2130 */ 2131 min = (char *)p->p_md.md_regs - (char *)p->p_addr; 2132 if (off >= min && off <= min + sizeof(struct trapframe) - sizeof(int)) { 2133 tp = p->p_md.md_regs; 2134 frame_copy = *tp; 2135 *(int *)((char *)&frame_copy + (off - min)) = data; 2136 if (!EFL_SECURE(frame_copy.tf_eflags, tp->tf_eflags) || 2137 !CS_SECURE(frame_copy.tf_cs)) 2138 return (EINVAL); 2139 *(int*)((char *)p->p_addr + off) = data; 2140 return (0); 2141 } 2142 min = offsetof(struct user, u_pcb) + offsetof(struct pcb, pcb_savefpu); 2143 if (off >= min && off <= min + sizeof(struct save87) - sizeof(int)) { 2144 *(int*)((char *)p->p_addr + off) = data; 2145 return (0); 2146 } 2147 return (EFAULT); 2148} 2149 2150int 2151fill_regs(p, regs) 2152 struct proc *p; 2153 struct reg *regs; 2154{ 2155 struct pcb *pcb; 2156 struct trapframe *tp; 2157 2158 tp = p->p_md.md_regs; 2159 regs->r_fs = tp->tf_fs; 2160 regs->r_es = tp->tf_es; 2161 regs->r_ds = tp->tf_ds; 2162 regs->r_edi = tp->tf_edi; 2163 regs->r_esi = tp->tf_esi; 2164 regs->r_ebp = tp->tf_ebp; 2165 regs->r_ebx = tp->tf_ebx; 2166 regs->r_edx = tp->tf_edx; 2167 regs->r_ecx = tp->tf_ecx; 2168 regs->r_eax = tp->tf_eax; 2169 regs->r_eip = tp->tf_eip; 2170 regs->r_cs = tp->tf_cs; 2171 regs->r_eflags = tp->tf_eflags; 2172 regs->r_esp = tp->tf_esp; 2173 regs->r_ss = tp->tf_ss; 2174 pcb = &p->p_addr->u_pcb; 2175 regs->r_gs = pcb->pcb_gs; 2176 return (0); 2177} 2178 2179int 2180set_regs(p, regs) 2181 struct proc *p; 2182 struct reg *regs; 2183{ 2184 struct pcb *pcb; 2185 struct trapframe *tp; 2186 2187 tp = p->p_md.md_regs; 2188 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) || 2189 !CS_SECURE(regs->r_cs)) 2190 return (EINVAL); 2191 tp->tf_fs = regs->r_fs; 2192 tp->tf_es = regs->r_es; 2193 tp->tf_ds = regs->r_ds; 2194 tp->tf_edi = regs->r_edi; 2195 tp->tf_esi = regs->r_esi; 2196 tp->tf_ebp = regs->r_ebp; 2197 tp->tf_ebx = regs->r_ebx; 2198 tp->tf_edx = regs->r_edx; 2199 tp->tf_ecx = regs->r_ecx; 2200 tp->tf_eax = regs->r_eax; 2201 tp->tf_eip = regs->r_eip; 2202 tp->tf_cs = regs->r_cs; 2203 tp->tf_eflags = regs->r_eflags; 2204 tp->tf_esp = regs->r_esp; 2205 tp->tf_ss = regs->r_ss; 2206 pcb = &p->p_addr->u_pcb; 2207 pcb->pcb_gs = regs->r_gs; 2208 return (0); 2209} 2210 2211int 2212fill_fpregs(p, fpregs) 2213 struct proc *p; 2214 struct fpreg *fpregs; 2215{ 2216 bcopy(&p->p_addr->u_pcb.pcb_savefpu, fpregs, sizeof *fpregs); 2217 return (0); 2218} 2219 2220int 2221set_fpregs(p, fpregs) 2222 struct proc *p; 2223 struct fpreg *fpregs; 2224{ 2225 bcopy(fpregs, &p->p_addr->u_pcb.pcb_savefpu, sizeof *fpregs); 2226 return (0); 2227} 2228 2229int 2230fill_dbregs(p, dbregs) 2231 struct proc *p; 2232 struct dbreg *dbregs; 2233{ 2234 struct pcb *pcb; 2235 2236 pcb = &p->p_addr->u_pcb; 2237 dbregs->dr0 = pcb->pcb_dr0; 2238 dbregs->dr1 = pcb->pcb_dr1; 2239 dbregs->dr2 = pcb->pcb_dr2; 2240 dbregs->dr3 = pcb->pcb_dr3; 2241 dbregs->dr4 = 0; 2242 dbregs->dr5 = 0; 2243 dbregs->dr6 = pcb->pcb_dr6; 2244 dbregs->dr7 = pcb->pcb_dr7; 2245 return (0); 2246} 2247 2248int 2249set_dbregs(p, dbregs) 2250 struct proc *p; 2251 struct dbreg *dbregs; 2252{ 2253 struct pcb *pcb; 2254 int i; 2255 u_int32_t mask1, mask2; 2256 2257 /* 2258 * Don't let an illegal value for dr7 get set. Specifically, 2259 * check for undefined settings. Setting these bit patterns 2260 * result in undefined behaviour and can lead to an unexpected 2261 * TRCTRAP. 2262 */ 2263 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 8; 2264 i++, mask1 <<= 2, mask2 <<= 2) 2265 if ((dbregs->dr7 & mask1) == mask2) 2266 return (EINVAL); 2267 2268 if (dbregs->dr7 & 0x0000fc00) 2269 return (EINVAL); 2270 2271 2272 2273 pcb = &p->p_addr->u_pcb; 2274 2275 /* 2276 * Don't let a process set a breakpoint that is not within the 2277 * process's address space. If a process could do this, it 2278 * could halt the system by setting a breakpoint in the kernel 2279 * (if ddb was enabled). Thus, we need to check to make sure 2280 * that no breakpoints are being enabled for addresses outside 2281 * process's address space, unless, perhaps, we were called by 2282 * uid 0. 2283 * 2284 * XXX - what about when the watched area of the user's 2285 * address space is written into from within the kernel 2286 * ... wouldn't that still cause a breakpoint to be generated 2287 * from within kernel mode? 2288 */ 2289 2290 if (suser(p) != 0) { 2291 if (dbregs->dr7 & 0x3) { 2292 /* dr0 is enabled */ 2293 if (dbregs->dr0 >= VM_MAXUSER_ADDRESS) 2294 return (EINVAL); 2295 } 2296 2297 if (dbregs->dr7 & (0x3<<2)) { 2298 /* dr1 is enabled */ 2299 if (dbregs->dr1 >= VM_MAXUSER_ADDRESS) 2300 return (EINVAL); 2301 } 2302 2303 if (dbregs->dr7 & (0x3<<4)) { 2304 /* dr2 is enabled */ 2305 if (dbregs->dr2 >= VM_MAXUSER_ADDRESS) 2306 return (EINVAL); 2307 } 2308 2309 if (dbregs->dr7 & (0x3<<6)) { 2310 /* dr3 is enabled */ 2311 if (dbregs->dr3 >= VM_MAXUSER_ADDRESS) 2312 return (EINVAL); 2313 } 2314 } 2315 2316 pcb->pcb_dr0 = dbregs->dr0; 2317 pcb->pcb_dr1 = dbregs->dr1; 2318 pcb->pcb_dr2 = dbregs->dr2; 2319 pcb->pcb_dr3 = dbregs->dr3; 2320 pcb->pcb_dr6 = dbregs->dr6; 2321 pcb->pcb_dr7 = dbregs->dr7; 2322 2323 pcb->pcb_flags |= PCB_DBREGS; 2324 2325 return (0); 2326} 2327 2328/* 2329 * Return > 0 if a hardware breakpoint has been hit, and the 2330 * breakpoint was in user space. Return 0, otherwise. 2331 */ 2332int 2333user_dbreg_trap(void) 2334{ 2335 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */ 2336 u_int32_t bp; /* breakpoint bits extracted from dr6 */ 2337 int nbp; /* number of breakpoints that triggered */ 2338 caddr_t addr[4]; /* breakpoint addresses */ 2339 int i; 2340 2341 dr7 = rdr7(); 2342 if ((dr7 & 0x000000ff) == 0) { 2343 /* 2344 * all GE and LE bits in the dr7 register are zero, 2345 * thus the trap couldn't have been caused by the 2346 * hardware debug registers 2347 */ 2348 return 0; 2349 } 2350 2351 nbp = 0; 2352 dr6 = rdr6(); 2353 bp = dr6 & 0x0000000f; 2354 2355 if (!bp) { 2356 /* 2357 * None of the breakpoint bits are set meaning this 2358 * trap was not caused by any of the debug registers 2359 */ 2360 return 0; 2361 } 2362 2363 /* 2364 * at least one of the breakpoints were hit, check to see 2365 * which ones and if any of them are user space addresses 2366 */ 2367 2368 if (bp & 0x01) { 2369 addr[nbp++] = (caddr_t)rdr0(); 2370 } 2371 if (bp & 0x02) { 2372 addr[nbp++] = (caddr_t)rdr1(); 2373 } 2374 if (bp & 0x04) { 2375 addr[nbp++] = (caddr_t)rdr2(); 2376 } 2377 if (bp & 0x08) { 2378 addr[nbp++] = (caddr_t)rdr3(); 2379 } 2380 2381 for (i=0; i<nbp; i++) { 2382 if (addr[i] < 2383 (caddr_t)VM_MAXUSER_ADDRESS) { 2384 /* 2385 * addr[i] is in user space 2386 */ 2387 return nbp; 2388 } 2389 } 2390 2391 /* 2392 * None of the breakpoints are in user space. 2393 */ 2394 return 0; 2395} 2396 2397 2398#ifndef DDB 2399void 2400Debugger(const char *msg) 2401{ 2402 printf("Debugger(\"%s\") called.\n", msg); 2403} 2404#endif /* no DDB */ 2405 2406#include <sys/disklabel.h> 2407 2408/* 2409 * Determine the size of the transfer, and make sure it is 2410 * within the boundaries of the partition. Adjust transfer 2411 * if needed, and signal errors or early completion. 2412 */ 2413int 2414bounds_check_with_label(struct bio *bp, struct disklabel *lp, int wlabel) 2415{ 2416 struct partition *p = lp->d_partitions + dkpart(bp->bio_dev); 2417 int labelsect = lp->d_partitions[0].p_offset; 2418 int maxsz = p->p_size, 2419 sz = (bp->bio_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT; 2420 2421 /* overwriting disk label ? */ 2422 /* XXX should also protect bootstrap in first 8K */ 2423 if (bp->bio_blkno + p->p_offset <= LABELSECTOR + labelsect && 2424#if LABELSECTOR != 0 2425 bp->bio_blkno + p->p_offset + sz > LABELSECTOR + labelsect && 2426#endif 2427 (bp->bio_cmd == BIO_WRITE) && wlabel == 0) { 2428 bp->bio_error = EROFS; 2429 goto bad; 2430 } 2431 2432#if defined(DOSBBSECTOR) && defined(notyet) 2433 /* overwriting master boot record? */ 2434 if (bp->bio_blkno + p->p_offset <= DOSBBSECTOR && 2435 (bp->bio_cmd == BIO_WRITE) && wlabel == 0) { 2436 bp->bio_error = EROFS; 2437 goto bad; 2438 } 2439#endif 2440 2441 /* beyond partition? */ 2442 if (bp->bio_blkno < 0 || bp->bio_blkno + sz > maxsz) { 2443 /* if exactly at end of disk, return an EOF */ 2444 if (bp->bio_blkno == maxsz) { 2445 bp->bio_resid = bp->bio_bcount; 2446 return(0); 2447 } 2448 /* or truncate if part of it fits */ 2449 sz = maxsz - bp->bio_blkno; 2450 if (sz <= 0) { 2451 bp->bio_error = EINVAL; 2452 goto bad; 2453 } 2454 bp->bio_bcount = sz << DEV_BSHIFT; 2455 } 2456 2457 bp->bio_pblkno = bp->bio_blkno + p->p_offset; 2458 return(1); 2459 2460bad: 2461 bp->bio_flags |= BIO_ERROR; 2462 return(-1); 2463} 2464 2465#ifdef DDB 2466 2467/* 2468 * Provide inb() and outb() as functions. They are normally only 2469 * available as macros calling inlined functions, thus cannot be 2470 * called inside DDB. 2471 * 2472 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 2473 */ 2474 2475#undef inb 2476#undef outb 2477 2478/* silence compiler warnings */ 2479u_char inb(u_int); 2480void outb(u_int, u_char); 2481 2482u_char 2483inb(u_int port) 2484{ 2485 u_char data; 2486 /* 2487 * We use %%dx and not %1 here because i/o is done at %dx and not at 2488 * %edx, while gcc generates inferior code (movw instead of movl) 2489 * if we tell it to load (u_short) port. 2490 */ 2491 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 2492 return (data); 2493} 2494 2495void 2496outb(u_int port, u_char data) 2497{ 2498 u_char al; 2499 /* 2500 * Use an unnecessary assignment to help gcc's register allocator. 2501 * This make a large difference for gcc-1.40 and a tiny difference 2502 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 2503 * best results. gcc-2.6.0 can't handle this. 2504 */ 2505 al = data; 2506 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 2507} 2508 2509#endif /* DDB */ 2510