machdep.c revision 76827
1/*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 38 * $FreeBSD: head/sys/amd64/amd64/machdep.c 76827 2001-05-19 01:28:09Z alfred $ 39 */ 40 41#include "opt_atalk.h" 42#include "opt_compat.h" 43#include "opt_cpu.h" 44#include "opt_ddb.h" 45#include "opt_inet.h" 46#include "opt_ipx.h" 47#include "opt_isa.h" 48#include "opt_maxmem.h" 49#include "opt_msgbuf.h" 50#include "opt_npx.h" 51#include "opt_perfmon.h" 52#include "opt_userconfig.h" 53 54#include <sys/param.h> 55#include <sys/systm.h> 56#include <sys/sysproto.h> 57#include <sys/signalvar.h> 58#include <sys/kernel.h> 59#include <sys/ktr.h> 60#include <sys/linker.h> 61#include <sys/lock.h> 62#include <sys/malloc.h> 63#include <sys/mutex.h> 64#include <sys/pcpu.h> 65#include <sys/proc.h> 66#include <sys/bio.h> 67#include <sys/buf.h> 68#include <sys/reboot.h> 69#include <sys/smp.h> 70#include <sys/callout.h> 71#include <sys/msgbuf.h> 72#include <sys/sysent.h> 73#include <sys/sysctl.h> 74#include <sys/vmmeter.h> 75#include <sys/bus.h> 76#include <sys/eventhandler.h> 77 78#include <vm/vm.h> 79#include <vm/vm_param.h> 80#include <sys/lock.h> 81#include <vm/vm_kern.h> 82#include <vm/vm_object.h> 83#include <vm/vm_page.h> 84#include <vm/vm_map.h> 85#include <vm/vm_pager.h> 86#include <vm/vm_extern.h> 87 88#include <sys/user.h> 89#include <sys/exec.h> 90#include <sys/cons.h> 91 92#include <ddb/ddb.h> 93 94#include <net/netisr.h> 95 96#include <machine/cpu.h> 97#include <machine/cputypes.h> 98#include <machine/reg.h> 99#include <machine/clock.h> 100#include <machine/specialreg.h> 101#include <machine/bootinfo.h> 102#include <machine/md_var.h> 103#include <machine/pc/bios.h> 104#include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */ 105#include <machine/globals.h> 106#include <machine/intrcnt.h> 107#ifdef PERFMON 108#include <machine/perfmon.h> 109#endif 110 111#ifdef OLD_BUS_ARCH 112#include <i386/isa/isa_device.h> 113#endif 114#include <i386/isa/icu.h> 115#include <i386/isa/intr_machdep.h> 116#include <isa/rtc.h> 117#include <machine/vm86.h> 118#include <sys/ptrace.h> 119#include <machine/sigframe.h> 120 121extern void init386 __P((int first)); 122extern void dblfault_handler __P((void)); 123 124extern void printcpuinfo(void); /* XXX header file */ 125extern void earlysetcpuclass(void); /* same header file */ 126extern void finishidentcpu(void); 127extern void panicifcpuunsupported(void); 128extern void initializecpu(void); 129 130#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 131#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 132 133static void cpu_startup __P((void *)); 134SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) 135 136int _udatasel, _ucodesel; 137u_int atdevbase; 138 139#if defined(SWTCH_OPTIM_STATS) 140extern int swtch_optim_stats; 141SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, 142 CTLFLAG_RD, &swtch_optim_stats, 0, ""); 143SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, 144 CTLFLAG_RD, &tlb_flush_count, 0, ""); 145#endif 146 147#ifdef PC98 148static int ispc98 = 1; 149#else 150static int ispc98 = 0; 151#endif 152SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, ""); 153 154int physmem = 0; 155int cold = 1; 156 157static void osendsig __P((sig_t catcher, int sig, sigset_t *mask, u_long code)); 158 159static int 160sysctl_hw_physmem(SYSCTL_HANDLER_ARGS) 161{ 162 int error = sysctl_handle_int(oidp, 0, ctob(physmem), req); 163 return (error); 164} 165 166SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD, 167 0, 0, sysctl_hw_physmem, "I", ""); 168 169static int 170sysctl_hw_usermem(SYSCTL_HANDLER_ARGS) 171{ 172 int error = sysctl_handle_int(oidp, 0, 173 ctob(physmem - cnt.v_wire_count), req); 174 return (error); 175} 176 177SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD, 178 0, 0, sysctl_hw_usermem, "I", ""); 179 180static int 181sysctl_hw_availpages(SYSCTL_HANDLER_ARGS) 182{ 183 int error = sysctl_handle_int(oidp, 0, 184 i386_btop(avail_end - avail_start), req); 185 return (error); 186} 187 188SYSCTL_PROC(_hw, OID_AUTO, availpages, CTLTYPE_INT|CTLFLAG_RD, 189 0, 0, sysctl_hw_availpages, "I", ""); 190 191static int 192sysctl_machdep_msgbuf(SYSCTL_HANDLER_ARGS) 193{ 194 int error; 195 196 /* Unwind the buffer, so that it's linear (possibly starting with 197 * some initial nulls). 198 */ 199 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr+msgbufp->msg_bufr, 200 msgbufp->msg_size-msgbufp->msg_bufr,req); 201 if(error) return(error); 202 if(msgbufp->msg_bufr>0) { 203 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr, 204 msgbufp->msg_bufr,req); 205 } 206 return(error); 207} 208 209SYSCTL_PROC(_machdep, OID_AUTO, msgbuf, CTLTYPE_STRING|CTLFLAG_RD, 210 0, 0, sysctl_machdep_msgbuf, "A","Contents of kernel message buffer"); 211 212static int msgbuf_clear; 213 214static int 215sysctl_machdep_msgbuf_clear(SYSCTL_HANDLER_ARGS) 216{ 217 int error; 218 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 219 req); 220 if (!error && req->newptr) { 221 /* Clear the buffer and reset write pointer */ 222 bzero(msgbufp->msg_ptr,msgbufp->msg_size); 223 msgbufp->msg_bufr=msgbufp->msg_bufx=0; 224 msgbuf_clear=0; 225 } 226 return (error); 227} 228 229SYSCTL_PROC(_machdep, OID_AUTO, msgbuf_clear, CTLTYPE_INT|CTLFLAG_RW, 230 &msgbuf_clear, 0, sysctl_machdep_msgbuf_clear, "I", 231 "Clear kernel message buffer"); 232 233int Maxmem = 0; 234long dumplo; 235 236vm_offset_t phys_avail[10]; 237 238/* must be 2 less so 0 0 can signal end of chunks */ 239#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2) 240 241static vm_offset_t buffer_sva, buffer_eva; 242vm_offset_t clean_sva, clean_eva; 243static vm_offset_t pager_sva, pager_eva; 244static struct trapframe proc0_tf; 245#ifndef SMP 246static struct globaldata __globaldata; 247#endif 248 249struct mtx sched_lock; 250struct mtx Giant; 251 252static void 253cpu_startup(dummy) 254 void *dummy; 255{ 256 register unsigned i; 257 register caddr_t v; 258 vm_offset_t maxaddr; 259 vm_size_t size = 0; 260 int firstaddr; 261 vm_offset_t minaddr; 262 int physmem_est; 263 264 /* 265 * Good {morning,afternoon,evening,night}. 266 */ 267 mtx_lock(&vm_mtx); 268 earlysetcpuclass(); 269 startrtclock(); 270 printcpuinfo(); 271 panicifcpuunsupported(); 272#ifdef PERFMON 273 perfmon_init(); 274#endif 275 printf("real memory = %u (%uK bytes)\n", ptoa(Maxmem), ptoa(Maxmem) / 1024); 276 /* 277 * Display any holes after the first chunk of extended memory. 278 */ 279 if (bootverbose) { 280 int indx; 281 282 printf("Physical memory chunk(s):\n"); 283 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 284 unsigned int size1 = phys_avail[indx + 1] - phys_avail[indx]; 285 286 printf("0x%08x - 0x%08x, %u bytes (%u pages)\n", 287 phys_avail[indx], phys_avail[indx + 1] - 1, size1, 288 size1 / PAGE_SIZE); 289 } 290 } 291 292 /* 293 * Calculate callout wheel size 294 */ 295 for (callwheelsize = 1, callwheelbits = 0; 296 callwheelsize < ncallout; 297 callwheelsize <<= 1, ++callwheelbits) 298 ; 299 callwheelmask = callwheelsize - 1; 300 301 /* 302 * Allocate space for system data structures. 303 * The first available kernel virtual address is in "v". 304 * As pages of kernel virtual memory are allocated, "v" is incremented. 305 * As pages of memory are allocated and cleared, 306 * "firstaddr" is incremented. 307 * An index into the kernel page table corresponding to the 308 * virtual memory address maintained in "v" is kept in "mapaddr". 309 */ 310 311 /* 312 * Make two passes. The first pass calculates how much memory is 313 * needed and allocates it. The second pass assigns virtual 314 * addresses to the various data structures. 315 */ 316 firstaddr = 0; 317again: 318 v = (caddr_t)firstaddr; 319 320#define valloc(name, type, num) \ 321 (name) = (type *)v; v = (caddr_t)((name)+(num)) 322#define valloclim(name, type, num, lim) \ 323 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num))) 324 325 valloc(callout, struct callout, ncallout); 326 valloc(callwheel, struct callout_tailq, callwheelsize); 327 328 /* 329 * Discount the physical memory larger than the size of kernel_map 330 * to avoid eating up all of KVA space. 331 */ 332 if (kernel_map->first_free == NULL) { 333 printf("Warning: no free entries in kernel_map.\n"); 334 physmem_est = physmem; 335 } else 336 physmem_est = min(physmem, kernel_map->max_offset - kernel_map->min_offset); 337 338 /* 339 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE. 340 * For the first 64MB of ram nominally allocate sufficient buffers to 341 * cover 1/4 of our ram. Beyond the first 64MB allocate additional 342 * buffers to cover 1/20 of our ram over 64MB. 343 * 344 * factor represents the 1/4 x ram conversion. 345 */ 346 if (nbuf == 0) { 347 int factor = 4 * BKVASIZE / PAGE_SIZE; 348 349 nbuf = 50; 350 if (physmem_est > 1024) 351 nbuf += min((physmem_est - 1024) / factor, 16384 / factor); 352 if (physmem_est > 16384) 353 nbuf += (physmem_est - 16384) * 2 / (factor * 5); 354 } 355 356 /* 357 * Do not allow the buffer_map to be more then 1/2 the size of the 358 * kernel_map. 359 */ 360 if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) / 361 (BKVASIZE * 2)) { 362 nbuf = (kernel_map->max_offset - kernel_map->min_offset) / 363 (BKVASIZE * 2); 364 printf("Warning: nbufs capped at %d\n", nbuf); 365 } 366 367 nswbuf = max(min(nbuf/4, 256), 16); 368 369 valloc(swbuf, struct buf, nswbuf); 370 valloc(buf, struct buf, nbuf); 371 v = bufhashinit(v); 372 373 /* 374 * End of first pass, size has been calculated so allocate memory 375 */ 376 if (firstaddr == 0) { 377 size = (vm_size_t)(v - firstaddr); 378 firstaddr = (int)kmem_alloc(kernel_map, round_page(size)); 379 if (firstaddr == 0) 380 panic("startup: no room for tables"); 381 goto again; 382 } 383 384 /* 385 * End of second pass, addresses have been assigned 386 */ 387 if ((vm_size_t)(v - firstaddr) != size) 388 panic("startup: table size inconsistency"); 389 390 clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva, 391 (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size); 392 buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva, 393 (nbuf*BKVASIZE)); 394 buffer_map->system_map = 1; 395 pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva, 396 (nswbuf*MAXPHYS) + pager_map_size); 397 pager_map->system_map = 1; 398 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 399 (16*(ARG_MAX+(PAGE_SIZE*3)))); 400 401 mtx_unlock(&vm_mtx); 402 /* 403 * XXX: Mbuf system machine-specific initializations should 404 * go here, if anywhere. 405 */ 406 407 /* 408 * Initialize callouts 409 */ 410 SLIST_INIT(&callfree); 411 for (i = 0; i < ncallout; i++) { 412 callout_init(&callout[i], 0); 413 callout[i].c_flags = CALLOUT_LOCAL_ALLOC; 414 SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); 415 } 416 417 for (i = 0; i < callwheelsize; i++) { 418 TAILQ_INIT(&callwheel[i]); 419 } 420 421 mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE); 422 423#if defined(USERCONFIG) 424 userconfig(); 425 cninit(); /* the preferred console may have changed */ 426#endif 427 428 printf("avail memory = %u (%uK bytes)\n", ptoa(cnt.v_free_count), 429 ptoa(cnt.v_free_count) / 1024); 430 431 /* 432 * Set up buffers, so they can be used to read disk labels. 433 */ 434 bufinit(); 435 vm_pager_bufferinit(); 436 437 globaldata_register(GLOBALDATA); 438#ifndef SMP 439 /* For SMP, we delay the cpu_setregs() until after SMP startup. */ 440 cpu_setregs(); 441#endif 442} 443 444/* 445 * Send an interrupt to process. 446 * 447 * Stack is set up to allow sigcode stored 448 * at top to call routine, followed by kcall 449 * to sigreturn routine below. After sigreturn 450 * resets the signal mask, the stack, and the 451 * frame pointer, it returns to the user 452 * specified pc, psl. 453 */ 454static void 455osendsig(catcher, sig, mask, code) 456 sig_t catcher; 457 int sig; 458 sigset_t *mask; 459 u_long code; 460{ 461 struct osigframe sf; 462 struct osigframe *fp; 463 struct proc *p; 464 struct sigacts *psp; 465 struct trapframe *regs; 466 int oonstack; 467 468 p = curproc; 469 PROC_LOCK(p); 470 psp = p->p_sigacts; 471 regs = p->p_md.md_regs; 472 oonstack = sigonstack(regs->tf_esp); 473 474 /* Allocate and validate space for the signal handler context. */ 475 if ((p->p_flag & P_ALTSTACK) && !oonstack && 476 SIGISMEMBER(psp->ps_sigonstack, sig)) { 477 fp = (struct osigframe *)(p->p_sigstk.ss_sp + 478 p->p_sigstk.ss_size - sizeof(struct osigframe)); 479#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 480 p->p_sigstk.ss_flags |= SS_ONSTACK; 481#endif 482 } else 483 fp = (struct osigframe *)regs->tf_esp - 1; 484 PROC_UNLOCK(p); 485 486 /* 487 * grow_stack() will return 0 if *fp does not fit inside the stack 488 * and the stack can not be grown. 489 * useracc() will return FALSE if access is denied. 490 */ 491 if (grow_stack(p, (int)fp) == 0 || 492 !useracc((caddr_t)fp, sizeof(*fp), VM_PROT_WRITE)) { 493 /* 494 * Process has trashed its stack; give it an illegal 495 * instruction to halt it in its tracks. 496 */ 497 PROC_LOCK(p); 498 SIGACTION(p, SIGILL) = SIG_DFL; 499 SIGDELSET(p->p_sigignore, SIGILL); 500 SIGDELSET(p->p_sigcatch, SIGILL); 501 SIGDELSET(p->p_sigmask, SIGILL); 502 psignal(p, SIGILL); 503 PROC_UNLOCK(p); 504 return; 505 } 506 507 /* Translate the signal if appropriate. */ 508 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 509 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 510 511 /* Build the argument list for the signal handler. */ 512 sf.sf_signum = sig; 513 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc; 514 PROC_LOCK(p); 515 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 516 /* Signal handler installed with SA_SIGINFO. */ 517 sf.sf_arg2 = (register_t)&fp->sf_siginfo; 518 sf.sf_siginfo.si_signo = sig; 519 sf.sf_siginfo.si_code = code; 520 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher; 521 } else { 522 /* Old FreeBSD-style arguments. */ 523 sf.sf_arg2 = code; 524 sf.sf_addr = regs->tf_err; 525 sf.sf_ahu.sf_handler = catcher; 526 } 527 PROC_UNLOCK(p); 528 529 /* Save most if not all of trap frame. */ 530 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax; 531 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx; 532 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx; 533 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx; 534 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi; 535 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi; 536 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs; 537 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds; 538 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss; 539 sf.sf_siginfo.si_sc.sc_es = regs->tf_es; 540 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs; 541 sf.sf_siginfo.si_sc.sc_gs = rgs(); 542 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp; 543 544 /* Build the signal context to be used by osigreturn(). */ 545 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0; 546 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask); 547 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp; 548 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp; 549 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip; 550 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags; 551 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno; 552 sf.sf_siginfo.si_sc.sc_err = regs->tf_err; 553 554 /* 555 * If we're a vm86 process, we want to save the segment registers. 556 * We also change eflags to be our emulated eflags, not the actual 557 * eflags. 558 */ 559 if (regs->tf_eflags & PSL_VM) { 560 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */ 561 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 562 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 563 564 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs; 565 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs; 566 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es; 567 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds; 568 569 if (vm86->vm86_has_vme == 0) 570 sf.sf_siginfo.si_sc.sc_ps = 571 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 572 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 573 574 /* See sendsig() for comments. */ 575 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP); 576 } 577 578 /* Copy the sigframe out to the user's stack. */ 579 if (copyout(&sf, fp, sizeof(*fp)) != 0) { 580 /* 581 * Something is wrong with the stack pointer. 582 * ...Kill the process. 583 */ 584 PROC_LOCK(p); 585 sigexit(p, SIGILL); 586 /* NOTREACHED */ 587 } 588 589 regs->tf_esp = (int)fp; 590 regs->tf_eip = PS_STRINGS - szosigcode; 591 regs->tf_cs = _ucodesel; 592 regs->tf_ds = _udatasel; 593 regs->tf_es = _udatasel; 594 regs->tf_fs = _udatasel; 595 load_gs(_udatasel); 596 regs->tf_ss = _udatasel; 597} 598 599void 600sendsig(catcher, sig, mask, code) 601 sig_t catcher; 602 int sig; 603 sigset_t *mask; 604 u_long code; 605{ 606 struct sigframe sf; 607 struct proc *p; 608 struct sigacts *psp; 609 struct trapframe *regs; 610 struct sigframe *sfp; 611 int oonstack; 612 613 p = curproc; 614 PROC_LOCK(p); 615 psp = p->p_sigacts; 616 if (SIGISMEMBER(psp->ps_osigset, sig)) { 617 PROC_UNLOCK(p); 618 osendsig(catcher, sig, mask, code); 619 return; 620 } 621 regs = p->p_md.md_regs; 622 oonstack = sigonstack(regs->tf_esp); 623 624 /* Save user context. */ 625 bzero(&sf, sizeof(sf)); 626 sf.sf_uc.uc_sigmask = *mask; 627 sf.sf_uc.uc_stack = p->p_sigstk; 628 sf.sf_uc.uc_stack.ss_flags = (p->p_flag & P_ALTSTACK) 629 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 630 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 631 sf.sf_uc.uc_mcontext.mc_gs = rgs(); 632 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs)); 633 634 /* Allocate and validate space for the signal handler context. */ 635 if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack && 636 SIGISMEMBER(psp->ps_sigonstack, sig)) { 637 sfp = (struct sigframe *)(p->p_sigstk.ss_sp + 638 p->p_sigstk.ss_size - sizeof(struct sigframe)); 639#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 640 p->p_sigstk.ss_flags |= SS_ONSTACK; 641#endif 642 } else 643 sfp = (struct sigframe *)regs->tf_esp - 1; 644 PROC_UNLOCK(p); 645 646 /* 647 * grow_stack() will return 0 if *sfp does not fit inside the stack 648 * and the stack can not be grown. 649 * useracc() will return FALSE if access is denied. 650 */ 651 if (grow_stack(p, (int)sfp) == 0 || 652 !useracc((caddr_t)sfp, sizeof(*sfp), VM_PROT_WRITE)) { 653 /* 654 * Process has trashed its stack; give it an illegal 655 * instruction to halt it in its tracks. 656 */ 657#ifdef DEBUG 658 printf("process %d has trashed its stack\n", p->p_pid); 659#endif 660 PROC_LOCK(p); 661 SIGACTION(p, SIGILL) = SIG_DFL; 662 SIGDELSET(p->p_sigignore, SIGILL); 663 SIGDELSET(p->p_sigcatch, SIGILL); 664 SIGDELSET(p->p_sigmask, SIGILL); 665 psignal(p, SIGILL); 666 PROC_UNLOCK(p); 667 return; 668 } 669 670 /* Translate the signal if appropriate. */ 671 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 672 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 673 674 /* Build the argument list for the signal handler. */ 675 sf.sf_signum = sig; 676 sf.sf_ucontext = (register_t)&sfp->sf_uc; 677 PROC_LOCK(p); 678 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 679 /* Signal handler installed with SA_SIGINFO. */ 680 sf.sf_siginfo = (register_t)&sfp->sf_si; 681 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 682 683 /* Fill siginfo structure. */ 684 sf.sf_si.si_signo = sig; 685 sf.sf_si.si_code = code; 686 sf.sf_si.si_addr = (void *)regs->tf_err; 687 } else { 688 /* Old FreeBSD-style arguments. */ 689 sf.sf_siginfo = code; 690 sf.sf_addr = regs->tf_err; 691 sf.sf_ahu.sf_handler = catcher; 692 } 693 PROC_UNLOCK(p); 694 695 /* 696 * If we're a vm86 process, we want to save the segment registers. 697 * We also change eflags to be our emulated eflags, not the actual 698 * eflags. 699 */ 700 if (regs->tf_eflags & PSL_VM) { 701 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 702 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 703 704 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 705 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 706 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 707 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 708 709 if (vm86->vm86_has_vme == 0) 710 sf.sf_uc.uc_mcontext.mc_eflags = 711 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 712 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 713 714 /* 715 * We should never have PSL_T set when returning from vm86 716 * mode. It may be set here if we deliver a signal before 717 * getting to vm86 mode, so turn it off. 718 * 719 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 720 * syscalls made by the signal handler. This just avoids 721 * wasting time for our lazy fixup of such faults. PSL_NT 722 * does nothing in vm86 mode, but vm86 programs can set it 723 * almost legitimately in probes for old cpu types. 724 */ 725 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP); 726 } 727 728 /* Copy the sigframe out to the user's stack. */ 729 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { 730 /* 731 * Something is wrong with the stack pointer. 732 * ...Kill the process. 733 */ 734 PROC_LOCK(p); 735 sigexit(p, SIGILL); 736 /* NOTREACHED */ 737 } 738 739 regs->tf_esp = (int)sfp; 740 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 741 regs->tf_cs = _ucodesel; 742 regs->tf_ds = _udatasel; 743 regs->tf_es = _udatasel; 744 regs->tf_fs = _udatasel; 745 regs->tf_ss = _udatasel; 746} 747 748/* 749 * System call to cleanup state after a signal 750 * has been taken. Reset signal mask and 751 * stack state from context left by sendsig (above). 752 * Return to previous pc and psl as specified by 753 * context left by sendsig. Check carefully to 754 * make sure that the user has not modified the 755 * state to gain improper privileges. 756 */ 757int 758osigreturn(p, uap) 759 struct proc *p; 760 struct osigreturn_args /* { 761 struct osigcontext *sigcntxp; 762 } */ *uap; 763{ 764 struct trapframe *regs; 765 struct osigcontext *scp; 766 int eflags; 767 768 regs = p->p_md.md_regs; 769 scp = uap->sigcntxp; 770 if (!useracc((caddr_t)scp, sizeof(*scp), VM_PROT_READ)) 771 return (EFAULT); 772 eflags = scp->sc_ps; 773 if (eflags & PSL_VM) { 774 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 775 struct vm86_kernel *vm86; 776 777 /* 778 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 779 * set up the vm86 area, and we can't enter vm86 mode. 780 */ 781 if (p->p_addr->u_pcb.pcb_ext == 0) 782 return (EINVAL); 783 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 784 if (vm86->vm86_inited == 0) 785 return (EINVAL); 786 787 /* Go back to user mode if both flags are set. */ 788 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 789 trapsignal(p, SIGBUS, 0); 790 791 if (vm86->vm86_has_vme) { 792 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 793 (eflags & VME_USERCHANGE) | PSL_VM; 794 } else { 795 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 796 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 797 } 798 tf->tf_vm86_ds = scp->sc_ds; 799 tf->tf_vm86_es = scp->sc_es; 800 tf->tf_vm86_fs = scp->sc_fs; 801 tf->tf_vm86_gs = scp->sc_gs; 802 tf->tf_ds = _udatasel; 803 tf->tf_es = _udatasel; 804 tf->tf_fs = _udatasel; 805 } else { 806 /* 807 * Don't allow users to change privileged or reserved flags. 808 */ 809 /* 810 * XXX do allow users to change the privileged flag PSL_RF. 811 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 812 * should sometimes set it there too. tf_eflags is kept in 813 * the signal context during signal handling and there is no 814 * other place to remember it, so the PSL_RF bit may be 815 * corrupted by the signal handler without us knowing. 816 * Corruption of the PSL_RF bit at worst causes one more or 817 * one less debugger trap, so allowing it is fairly harmless. 818 */ 819 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 820 return (EINVAL); 821 } 822 823 /* 824 * Don't allow users to load a valid privileged %cs. Let the 825 * hardware check for invalid selectors, excess privilege in 826 * other selectors, invalid %eip's and invalid %esp's. 827 */ 828 if (!CS_SECURE(scp->sc_cs)) { 829 trapsignal(p, SIGBUS, T_PROTFLT); 830 return (EINVAL); 831 } 832 regs->tf_ds = scp->sc_ds; 833 regs->tf_es = scp->sc_es; 834 regs->tf_fs = scp->sc_fs; 835 } 836 837 /* Restore remaining registers. */ 838 regs->tf_eax = scp->sc_eax; 839 regs->tf_ebx = scp->sc_ebx; 840 regs->tf_ecx = scp->sc_ecx; 841 regs->tf_edx = scp->sc_edx; 842 regs->tf_esi = scp->sc_esi; 843 regs->tf_edi = scp->sc_edi; 844 regs->tf_cs = scp->sc_cs; 845 regs->tf_ss = scp->sc_ss; 846 regs->tf_isp = scp->sc_isp; 847 848 PROC_LOCK(p); 849#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 850 if (scp->sc_onstack & 1) 851 p->p_sigstk.ss_flags |= SS_ONSTACK; 852 else 853 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 854#endif 855 856 SIGSETOLD(p->p_sigmask, scp->sc_mask); 857 SIG_CANTMASK(p->p_sigmask); 858 PROC_UNLOCK(p); 859 regs->tf_ebp = scp->sc_fp; 860 regs->tf_esp = scp->sc_sp; 861 regs->tf_eip = scp->sc_pc; 862 regs->tf_eflags = eflags; 863 return (EJUSTRETURN); 864} 865 866int 867sigreturn(p, uap) 868 struct proc *p; 869 struct sigreturn_args /* { 870 ucontext_t *sigcntxp; 871 } */ *uap; 872{ 873 struct trapframe *regs; 874 ucontext_t *ucp; 875 int cs, eflags; 876 877 ucp = uap->sigcntxp; 878 if (!useracc((caddr_t)ucp, sizeof(struct osigcontext), VM_PROT_READ)) 879 return (EFAULT); 880 if (((struct osigcontext *)ucp)->sc_trapno == 0x01d516) 881 return (osigreturn(p, (struct osigreturn_args *)uap)); 882 883 /* 884 * Since ucp is not an osigcontext but a ucontext_t, we have to 885 * check again if all of it is accessible. A ucontext_t is 886 * much larger, so instead of just checking for the pointer 887 * being valid for the size of an osigcontext, now check for 888 * it being valid for a whole, new-style ucontext_t. 889 */ 890 if (!useracc((caddr_t)ucp, sizeof(*ucp), VM_PROT_READ)) 891 return (EFAULT); 892 893 regs = p->p_md.md_regs; 894 eflags = ucp->uc_mcontext.mc_eflags; 895 if (eflags & PSL_VM) { 896 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 897 struct vm86_kernel *vm86; 898 899 /* 900 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 901 * set up the vm86 area, and we can't enter vm86 mode. 902 */ 903 if (p->p_addr->u_pcb.pcb_ext == 0) 904 return (EINVAL); 905 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 906 if (vm86->vm86_inited == 0) 907 return (EINVAL); 908 909 /* Go back to user mode if both flags are set. */ 910 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 911 trapsignal(p, SIGBUS, 0); 912 913 if (vm86->vm86_has_vme) { 914 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 915 (eflags & VME_USERCHANGE) | PSL_VM; 916 } else { 917 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 918 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 919 } 920 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe)); 921 tf->tf_eflags = eflags; 922 tf->tf_vm86_ds = tf->tf_ds; 923 tf->tf_vm86_es = tf->tf_es; 924 tf->tf_vm86_fs = tf->tf_fs; 925 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs; 926 tf->tf_ds = _udatasel; 927 tf->tf_es = _udatasel; 928 tf->tf_fs = _udatasel; 929 } else { 930 /* 931 * Don't allow users to change privileged or reserved flags. 932 */ 933 /* 934 * XXX do allow users to change the privileged flag PSL_RF. 935 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 936 * should sometimes set it there too. tf_eflags is kept in 937 * the signal context during signal handling and there is no 938 * other place to remember it, so the PSL_RF bit may be 939 * corrupted by the signal handler without us knowing. 940 * Corruption of the PSL_RF bit at worst causes one more or 941 * one less debugger trap, so allowing it is fairly harmless. 942 */ 943 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 944 printf("sigreturn: eflags = 0x%x\n", eflags); 945 return (EINVAL); 946 } 947 948 /* 949 * Don't allow users to load a valid privileged %cs. Let the 950 * hardware check for invalid selectors, excess privilege in 951 * other selectors, invalid %eip's and invalid %esp's. 952 */ 953 cs = ucp->uc_mcontext.mc_cs; 954 if (!CS_SECURE(cs)) { 955 printf("sigreturn: cs = 0x%x\n", cs); 956 trapsignal(p, SIGBUS, T_PROTFLT); 957 return (EINVAL); 958 } 959 960 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs)); 961 } 962 963 PROC_LOCK(p); 964#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 965 if (ucp->uc_mcontext.mc_onstack & 1) 966 p->p_sigstk.ss_flags |= SS_ONSTACK; 967 else 968 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 969#endif 970 971 p->p_sigmask = ucp->uc_sigmask; 972 SIG_CANTMASK(p->p_sigmask); 973 PROC_UNLOCK(p); 974 return (EJUSTRETURN); 975} 976 977/* 978 * Machine dependent boot() routine 979 * 980 * I haven't seen anything to put here yet 981 * Possibly some stuff might be grafted back here from boot() 982 */ 983void 984cpu_boot(int howto) 985{ 986} 987 988/* 989 * Shutdown the CPU as much as possible 990 */ 991void 992cpu_halt(void) 993{ 994 for (;;) 995 __asm__ ("hlt"); 996} 997 998/* 999 * Hook to idle the CPU when possible. This currently only works in 1000 * the !SMP case, as there is no clean way to ensure that a CPU will be 1001 * woken when there is work available for it. 1002 */ 1003static int cpu_idle_hlt = 1; 1004SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, 1005 &cpu_idle_hlt, 0, "Idle loop HLT enable"); 1006 1007/* 1008 * Note that we have to be careful here to avoid a race between checking 1009 * procrunnable() and actually halting. If we don't do this, we may waste 1010 * the time between calling hlt and the next interrupt even though there 1011 * is a runnable process. 1012 */ 1013void 1014cpu_idle(void) 1015{ 1016#ifndef SMP 1017 if (cpu_idle_hlt) { 1018 disable_intr(); 1019 if (procrunnable()) 1020 enable_intr(); 1021 else { 1022 enable_intr(); 1023 __asm __volatile("hlt"); 1024 } 1025 } 1026#endif 1027} 1028 1029/* 1030 * Clear registers on exec 1031 */ 1032void 1033setregs(p, entry, stack, ps_strings) 1034 struct proc *p; 1035 u_long entry; 1036 u_long stack; 1037 u_long ps_strings; 1038{ 1039 struct trapframe *regs = p->p_md.md_regs; 1040 struct pcb *pcb = &p->p_addr->u_pcb; 1041 1042 if (pcb->pcb_ldt) 1043 user_ldt_free(pcb); 1044 1045 bzero((char *)regs, sizeof(struct trapframe)); 1046 regs->tf_eip = entry; 1047 regs->tf_esp = stack; 1048 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T); 1049 regs->tf_ss = _udatasel; 1050 regs->tf_ds = _udatasel; 1051 regs->tf_es = _udatasel; 1052 regs->tf_fs = _udatasel; 1053 regs->tf_cs = _ucodesel; 1054 1055 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */ 1056 regs->tf_ebx = ps_strings; 1057 1058 /* reset %gs as well */ 1059 if (pcb == PCPU_GET(curpcb)) 1060 load_gs(_udatasel); 1061 else 1062 pcb->pcb_gs = _udatasel; 1063 1064 /* 1065 * Reset the hardware debug registers if they were in use. 1066 * They won't have any meaning for the newly exec'd process. 1067 */ 1068 if (pcb->pcb_flags & PCB_DBREGS) { 1069 pcb->pcb_dr0 = 0; 1070 pcb->pcb_dr1 = 0; 1071 pcb->pcb_dr2 = 0; 1072 pcb->pcb_dr3 = 0; 1073 pcb->pcb_dr6 = 0; 1074 pcb->pcb_dr7 = 0; 1075 if (pcb == PCPU_GET(curpcb)) { 1076 /* 1077 * Clear the debug registers on the running 1078 * CPU, otherwise they will end up affecting 1079 * the next process we switch to. 1080 */ 1081 reset_dbregs(); 1082 } 1083 pcb->pcb_flags &= ~PCB_DBREGS; 1084 } 1085 1086 /* 1087 * Initialize the math emulator (if any) for the current process. 1088 * Actually, just clear the bit that says that the emulator has 1089 * been initialized. Initialization is delayed until the process 1090 * traps to the emulator (if it is done at all) mainly because 1091 * emulators don't provide an entry point for initialization. 1092 */ 1093 p->p_addr->u_pcb.pcb_flags &= ~FP_SOFTFP; 1094 1095 /* 1096 * Arrange to trap the next npx or `fwait' instruction (see npx.c 1097 * for why fwait must be trapped at least if there is an npx or an 1098 * emulator). This is mainly to handle the case where npx0 is not 1099 * configured, since the npx routines normally set up the trap 1100 * otherwise. It should be done only at boot time, but doing it 1101 * here allows modifying `npx_exists' for testing the emulator on 1102 * systems with an npx. 1103 */ 1104 load_cr0(rcr0() | CR0_MP | CR0_TS); 1105 1106#ifdef DEV_NPX 1107 /* Initialize the npx (if any) for the current process. */ 1108 npxinit(__INITIAL_NPXCW__); 1109#endif 1110 1111 /* 1112 * XXX - Linux emulator 1113 * Make sure sure edx is 0x0 on entry. Linux binaries depend 1114 * on it. 1115 */ 1116 p->p_retval[1] = 0; 1117} 1118 1119void 1120cpu_setregs(void) 1121{ 1122 unsigned int cr0; 1123 1124 cr0 = rcr0(); 1125 cr0 |= CR0_NE; /* Done by npxinit() */ 1126 cr0 |= CR0_MP | CR0_TS; /* Done at every execve() too. */ 1127#ifndef I386_CPU 1128 cr0 |= CR0_WP | CR0_AM; 1129#endif 1130 load_cr0(cr0); 1131 load_gs(_udatasel); 1132} 1133 1134static int 1135sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS) 1136{ 1137 int error; 1138 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 1139 req); 1140 if (!error && req->newptr) 1141 resettodr(); 1142 return (error); 1143} 1144 1145SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 1146 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 1147 1148SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, 1149 CTLFLAG_RW, &disable_rtc_set, 0, ""); 1150 1151SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo, 1152 CTLFLAG_RD, &bootinfo, bootinfo, ""); 1153 1154SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, 1155 CTLFLAG_RW, &wall_cmos_clock, 0, ""); 1156 1157/* 1158 * Initialize 386 and configure to run kernel 1159 */ 1160 1161/* 1162 * Initialize segments & interrupt table 1163 */ 1164 1165int _default_ldt; 1166union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */ 1167static struct gate_descriptor idt0[NIDT]; 1168struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 1169union descriptor ldt[NLDT]; /* local descriptor table */ 1170#ifdef SMP 1171/* table descriptors - used to load tables by microp */ 1172struct region_descriptor r_gdt, r_idt; 1173#endif 1174 1175int private_tss; /* flag indicating private tss */ 1176 1177#if defined(I586_CPU) && !defined(NO_F00F_HACK) 1178extern int has_f00f_bug; 1179#endif 1180 1181static struct i386tss dblfault_tss; 1182static char dblfault_stack[PAGE_SIZE]; 1183 1184extern struct user *proc0paddr; 1185 1186 1187/* software prototypes -- in more palatable form */ 1188struct soft_segment_descriptor gdt_segs[] = { 1189/* GNULL_SEL 0 Null Descriptor */ 1190{ 0x0, /* segment base address */ 1191 0x0, /* length */ 1192 0, /* segment type */ 1193 0, /* segment descriptor priority level */ 1194 0, /* segment descriptor present */ 1195 0, 0, 1196 0, /* default 32 vs 16 bit size */ 1197 0 /* limit granularity (byte/page units)*/ }, 1198/* GCODE_SEL 1 Code Descriptor for kernel */ 1199{ 0x0, /* segment base address */ 1200 0xfffff, /* length - all address space */ 1201 SDT_MEMERA, /* segment type */ 1202 0, /* segment descriptor priority level */ 1203 1, /* segment descriptor present */ 1204 0, 0, 1205 1, /* default 32 vs 16 bit size */ 1206 1 /* limit granularity (byte/page units)*/ }, 1207/* GDATA_SEL 2 Data Descriptor for kernel */ 1208{ 0x0, /* segment base address */ 1209 0xfffff, /* length - all address space */ 1210 SDT_MEMRWA, /* segment type */ 1211 0, /* segment descriptor priority level */ 1212 1, /* segment descriptor present */ 1213 0, 0, 1214 1, /* default 32 vs 16 bit size */ 1215 1 /* limit granularity (byte/page units)*/ }, 1216/* GPRIV_SEL 3 SMP Per-Processor Private Data Descriptor */ 1217{ 0x0, /* segment base address */ 1218 0xfffff, /* length - all address space */ 1219 SDT_MEMRWA, /* segment type */ 1220 0, /* segment descriptor priority level */ 1221 1, /* segment descriptor present */ 1222 0, 0, 1223 1, /* default 32 vs 16 bit size */ 1224 1 /* limit granularity (byte/page units)*/ }, 1225/* GPROC0_SEL 4 Proc 0 Tss Descriptor */ 1226{ 1227 0x0, /* segment base address */ 1228 sizeof(struct i386tss)-1,/* length - all address space */ 1229 SDT_SYS386TSS, /* segment type */ 1230 0, /* segment descriptor priority level */ 1231 1, /* segment descriptor present */ 1232 0, 0, 1233 0, /* unused - default 32 vs 16 bit size */ 1234 0 /* limit granularity (byte/page units)*/ }, 1235/* GLDT_SEL 5 LDT Descriptor */ 1236{ (int) ldt, /* segment base address */ 1237 sizeof(ldt)-1, /* length - all address space */ 1238 SDT_SYSLDT, /* segment type */ 1239 SEL_UPL, /* segment descriptor priority level */ 1240 1, /* segment descriptor present */ 1241 0, 0, 1242 0, /* unused - default 32 vs 16 bit size */ 1243 0 /* limit granularity (byte/page units)*/ }, 1244/* GUSERLDT_SEL 6 User LDT Descriptor per process */ 1245{ (int) ldt, /* segment base address */ 1246 (512 * sizeof(union descriptor)-1), /* length */ 1247 SDT_SYSLDT, /* segment type */ 1248 0, /* segment descriptor priority level */ 1249 1, /* segment descriptor present */ 1250 0, 0, 1251 0, /* unused - default 32 vs 16 bit size */ 1252 0 /* limit granularity (byte/page units)*/ }, 1253/* GTGATE_SEL 7 Null Descriptor - Placeholder */ 1254{ 0x0, /* segment base address */ 1255 0x0, /* length - all address space */ 1256 0, /* segment type */ 1257 0, /* segment descriptor priority level */ 1258 0, /* segment descriptor present */ 1259 0, 0, 1260 0, /* default 32 vs 16 bit size */ 1261 0 /* limit granularity (byte/page units)*/ }, 1262/* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */ 1263{ 0x400, /* segment base address */ 1264 0xfffff, /* length */ 1265 SDT_MEMRWA, /* segment type */ 1266 0, /* segment descriptor priority level */ 1267 1, /* segment descriptor present */ 1268 0, 0, 1269 1, /* default 32 vs 16 bit size */ 1270 1 /* limit granularity (byte/page units)*/ }, 1271/* GPANIC_SEL 9 Panic Tss Descriptor */ 1272{ (int) &dblfault_tss, /* segment base address */ 1273 sizeof(struct i386tss)-1,/* length - all address space */ 1274 SDT_SYS386TSS, /* segment type */ 1275 0, /* segment descriptor priority level */ 1276 1, /* segment descriptor present */ 1277 0, 0, 1278 0, /* unused - default 32 vs 16 bit size */ 1279 0 /* limit granularity (byte/page units)*/ }, 1280/* GBIOSCODE32_SEL 10 BIOS 32-bit interface (32bit Code) */ 1281{ 0, /* segment base address (overwritten) */ 1282 0xfffff, /* length */ 1283 SDT_MEMERA, /* segment type */ 1284 0, /* segment descriptor priority level */ 1285 1, /* segment descriptor present */ 1286 0, 0, 1287 0, /* default 32 vs 16 bit size */ 1288 1 /* limit granularity (byte/page units)*/ }, 1289/* GBIOSCODE16_SEL 11 BIOS 32-bit interface (16bit Code) */ 1290{ 0, /* segment base address (overwritten) */ 1291 0xfffff, /* length */ 1292 SDT_MEMERA, /* segment type */ 1293 0, /* segment descriptor priority level */ 1294 1, /* segment descriptor present */ 1295 0, 0, 1296 0, /* default 32 vs 16 bit size */ 1297 1 /* limit granularity (byte/page units)*/ }, 1298/* GBIOSDATA_SEL 12 BIOS 32-bit interface (Data) */ 1299{ 0, /* segment base address (overwritten) */ 1300 0xfffff, /* length */ 1301 SDT_MEMRWA, /* segment type */ 1302 0, /* segment descriptor priority level */ 1303 1, /* segment descriptor present */ 1304 0, 0, 1305 1, /* default 32 vs 16 bit size */ 1306 1 /* limit granularity (byte/page units)*/ }, 1307/* GBIOSUTIL_SEL 13 BIOS 16-bit interface (Utility) */ 1308{ 0, /* segment base address (overwritten) */ 1309 0xfffff, /* length */ 1310 SDT_MEMRWA, /* segment type */ 1311 0, /* segment descriptor priority level */ 1312 1, /* segment descriptor present */ 1313 0, 0, 1314 0, /* default 32 vs 16 bit size */ 1315 1 /* limit granularity (byte/page units)*/ }, 1316/* GBIOSARGS_SEL 14 BIOS 16-bit interface (Arguments) */ 1317{ 0, /* segment base address (overwritten) */ 1318 0xfffff, /* length */ 1319 SDT_MEMRWA, /* segment type */ 1320 0, /* segment descriptor priority level */ 1321 1, /* segment descriptor present */ 1322 0, 0, 1323 0, /* default 32 vs 16 bit size */ 1324 1 /* limit granularity (byte/page units)*/ }, 1325}; 1326 1327static struct soft_segment_descriptor ldt_segs[] = { 1328 /* Null Descriptor - overwritten by call gate */ 1329{ 0x0, /* segment base address */ 1330 0x0, /* length - all address space */ 1331 0, /* segment type */ 1332 0, /* segment descriptor priority level */ 1333 0, /* segment descriptor present */ 1334 0, 0, 1335 0, /* default 32 vs 16 bit size */ 1336 0 /* limit granularity (byte/page units)*/ }, 1337 /* Null Descriptor - overwritten by call gate */ 1338{ 0x0, /* segment base address */ 1339 0x0, /* length - all address space */ 1340 0, /* segment type */ 1341 0, /* segment descriptor priority level */ 1342 0, /* segment descriptor present */ 1343 0, 0, 1344 0, /* default 32 vs 16 bit size */ 1345 0 /* limit granularity (byte/page units)*/ }, 1346 /* Null Descriptor - overwritten by call gate */ 1347{ 0x0, /* segment base address */ 1348 0x0, /* length - all address space */ 1349 0, /* segment type */ 1350 0, /* segment descriptor priority level */ 1351 0, /* segment descriptor present */ 1352 0, 0, 1353 0, /* default 32 vs 16 bit size */ 1354 0 /* limit granularity (byte/page units)*/ }, 1355 /* Code Descriptor for user */ 1356{ 0x0, /* segment base address */ 1357 0xfffff, /* length - all address space */ 1358 SDT_MEMERA, /* segment type */ 1359 SEL_UPL, /* segment descriptor priority level */ 1360 1, /* segment descriptor present */ 1361 0, 0, 1362 1, /* default 32 vs 16 bit size */ 1363 1 /* limit granularity (byte/page units)*/ }, 1364 /* Null Descriptor - overwritten by call gate */ 1365{ 0x0, /* segment base address */ 1366 0x0, /* length - all address space */ 1367 0, /* segment type */ 1368 0, /* segment descriptor priority level */ 1369 0, /* segment descriptor present */ 1370 0, 0, 1371 0, /* default 32 vs 16 bit size */ 1372 0 /* limit granularity (byte/page units)*/ }, 1373 /* Data Descriptor for user */ 1374{ 0x0, /* segment base address */ 1375 0xfffff, /* length - all address space */ 1376 SDT_MEMRWA, /* segment type */ 1377 SEL_UPL, /* segment descriptor priority level */ 1378 1, /* segment descriptor present */ 1379 0, 0, 1380 1, /* default 32 vs 16 bit size */ 1381 1 /* limit granularity (byte/page units)*/ }, 1382}; 1383 1384void 1385setidt(idx, func, typ, dpl, selec) 1386 int idx; 1387 inthand_t *func; 1388 int typ; 1389 int dpl; 1390 int selec; 1391{ 1392 struct gate_descriptor *ip; 1393 1394 ip = idt + idx; 1395 ip->gd_looffset = (int)func; 1396 ip->gd_selector = selec; 1397 ip->gd_stkcpy = 0; 1398 ip->gd_xx = 0; 1399 ip->gd_type = typ; 1400 ip->gd_dpl = dpl; 1401 ip->gd_p = 1; 1402 ip->gd_hioffset = ((int)func)>>16 ; 1403} 1404 1405#define IDTVEC(name) __CONCAT(X,name) 1406 1407extern inthand_t 1408 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 1409 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 1410 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 1411 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 1412 IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall); 1413 1414void 1415sdtossd(sd, ssd) 1416 struct segment_descriptor *sd; 1417 struct soft_segment_descriptor *ssd; 1418{ 1419 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 1420 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 1421 ssd->ssd_type = sd->sd_type; 1422 ssd->ssd_dpl = sd->sd_dpl; 1423 ssd->ssd_p = sd->sd_p; 1424 ssd->ssd_def32 = sd->sd_def32; 1425 ssd->ssd_gran = sd->sd_gran; 1426} 1427 1428#define PHYSMAP_SIZE (2 * 8) 1429 1430/* 1431 * Populate the (physmap) array with base/bound pairs describing the 1432 * available physical memory in the system, then test this memory and 1433 * build the phys_avail array describing the actually-available memory. 1434 * 1435 * If we cannot accurately determine the physical memory map, then use 1436 * value from the 0xE801 call, and failing that, the RTC. 1437 * 1438 * Total memory size may be set by the kernel environment variable 1439 * hw.physmem or the compile-time define MAXMEM. 1440 */ 1441static void 1442getmemsize(int first) 1443{ 1444 int i, physmap_idx, pa_indx; 1445 u_int basemem, extmem; 1446 struct vm86frame vmf; 1447 struct vm86context vmc; 1448 vm_offset_t pa, physmap[PHYSMAP_SIZE]; 1449 pt_entry_t pte; 1450 const char *cp; 1451 struct bios_smap *smap; 1452 1453 bzero(&vmf, sizeof(struct vm86frame)); 1454 bzero(physmap, sizeof(physmap)); 1455 1456 /* 1457 * Perform "base memory" related probes & setup 1458 */ 1459 vm86_intcall(0x12, &vmf); 1460 basemem = vmf.vmf_ax; 1461 if (basemem > 640) { 1462 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", 1463 basemem); 1464 basemem = 640; 1465 } 1466 1467 /* 1468 * XXX if biosbasemem is now < 640, there is a `hole' 1469 * between the end of base memory and the start of 1470 * ISA memory. The hole may be empty or it may 1471 * contain BIOS code or data. Map it read/write so 1472 * that the BIOS can write to it. (Memory from 0 to 1473 * the physical end of the kernel is mapped read-only 1474 * to begin with and then parts of it are remapped. 1475 * The parts that aren't remapped form holes that 1476 * remain read-only and are unused by the kernel. 1477 * The base memory area is below the physical end of 1478 * the kernel and right now forms a read-only hole. 1479 * The part of it from PAGE_SIZE to 1480 * (trunc_page(biosbasemem * 1024) - 1) will be 1481 * remapped and used by the kernel later.) 1482 * 1483 * This code is similar to the code used in 1484 * pmap_mapdev, but since no memory needs to be 1485 * allocated we simply change the mapping. 1486 */ 1487 for (pa = trunc_page(basemem * 1024); 1488 pa < ISA_HOLE_START; pa += PAGE_SIZE) { 1489 pte = (pt_entry_t)vtopte(pa + KERNBASE); 1490 *pte = pa | PG_RW | PG_V; 1491 } 1492 1493 /* 1494 * if basemem != 640, map pages r/w into vm86 page table so 1495 * that the bios can scribble on it. 1496 */ 1497 pte = (pt_entry_t)vm86paddr; 1498 for (i = basemem / 4; i < 160; i++) 1499 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; 1500 1501 /* 1502 * map page 1 R/W into the kernel page table so we can use it 1503 * as a buffer. The kernel will unmap this page later. 1504 */ 1505 pte = (pt_entry_t)vtopte(KERNBASE + (1 << PAGE_SHIFT)); 1506 *pte = (1 << PAGE_SHIFT) | PG_RW | PG_V; 1507 1508 /* 1509 * get memory map with INT 15:E820 1510 */ 1511 vmc.npages = 0; 1512 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT)); 1513 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di); 1514 1515 physmap_idx = 0; 1516 vmf.vmf_ebx = 0; 1517 do { 1518 vmf.vmf_eax = 0xE820; 1519 vmf.vmf_edx = SMAP_SIG; 1520 vmf.vmf_ecx = sizeof(struct bios_smap); 1521 i = vm86_datacall(0x15, &vmf, &vmc); 1522 if (i || vmf.vmf_eax != SMAP_SIG) 1523 break; 1524 if (boothowto & RB_VERBOSE) 1525 printf("SMAP type=%02x base=%08x %08x len=%08x %08x\n", 1526 smap->type, 1527 *(u_int32_t *)((char *)&smap->base + 4), 1528 (u_int32_t)smap->base, 1529 *(u_int32_t *)((char *)&smap->length + 4), 1530 (u_int32_t)smap->length); 1531 1532 if (smap->type != 0x01) 1533 goto next_run; 1534 1535 if (smap->length == 0) 1536 goto next_run; 1537 1538 if (smap->base >= 0xffffffff) { 1539 printf("%uK of memory above 4GB ignored\n", 1540 (u_int)(smap->length / 1024)); 1541 goto next_run; 1542 } 1543 1544 for (i = 0; i <= physmap_idx; i += 2) { 1545 if (smap->base < physmap[i + 1]) { 1546 if (boothowto & RB_VERBOSE) 1547 printf( 1548 "Overlapping or non-montonic memory region, ignoring second region\n"); 1549 goto next_run; 1550 } 1551 } 1552 1553 if (smap->base == physmap[physmap_idx + 1]) { 1554 physmap[physmap_idx + 1] += smap->length; 1555 goto next_run; 1556 } 1557 1558 physmap_idx += 2; 1559 if (physmap_idx == PHYSMAP_SIZE) { 1560 printf( 1561 "Too many segments in the physical address map, giving up\n"); 1562 break; 1563 } 1564 physmap[physmap_idx] = smap->base; 1565 physmap[physmap_idx + 1] = smap->base + smap->length; 1566next_run: 1567 } while (vmf.vmf_ebx != 0); 1568 1569 if (physmap[1] != 0) 1570 goto physmap_done; 1571 1572 /* 1573 * If we failed above, try memory map with INT 15:E801 1574 */ 1575 vmf.vmf_ax = 0xE801; 1576 if (vm86_intcall(0x15, &vmf) == 0) { 1577 extmem = vmf.vmf_cx + vmf.vmf_dx * 64; 1578 } else { 1579#if 0 1580 vmf.vmf_ah = 0x88; 1581 vm86_intcall(0x15, &vmf); 1582 extmem = vmf.vmf_ax; 1583#else 1584 /* 1585 * Prefer the RTC value for extended memory. 1586 */ 1587 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8); 1588#endif 1589 } 1590 1591 /* 1592 * Special hack for chipsets that still remap the 384k hole when 1593 * there's 16MB of memory - this really confuses people that 1594 * are trying to use bus mastering ISA controllers with the 1595 * "16MB limit"; they only have 16MB, but the remapping puts 1596 * them beyond the limit. 1597 * 1598 * If extended memory is between 15-16MB (16-17MB phys address range), 1599 * chop it to 15MB. 1600 */ 1601 if ((extmem > 15 * 1024) && (extmem < 16 * 1024)) 1602 extmem = 15 * 1024; 1603 1604 physmap[0] = 0; 1605 physmap[1] = basemem * 1024; 1606 physmap_idx = 2; 1607 physmap[physmap_idx] = 0x100000; 1608 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024; 1609 1610physmap_done: 1611 /* 1612 * Now, physmap contains a map of physical memory. 1613 */ 1614 1615#ifdef SMP 1616 /* make hole for AP bootstrap code */ 1617 physmap[1] = mp_bootaddress(physmap[1] / 1024); 1618 1619 /* look for the MP hardware - needed for apic addresses */ 1620 i386_mp_probe(); 1621#endif 1622 1623 /* 1624 * Maxmem isn't the "maximum memory", it's one larger than the 1625 * highest page of the physical address space. It should be 1626 * called something like "Maxphyspage". We may adjust this 1627 * based on ``hw.physmem'' and the results of the memory test. 1628 */ 1629 Maxmem = atop(physmap[physmap_idx + 1]); 1630 1631#ifdef MAXMEM 1632 Maxmem = MAXMEM / 4; 1633#endif 1634 1635 /* 1636 * hw.maxmem is a size in bytes; we also allow k, m, and g suffixes 1637 * for the appropriate modifiers. This overrides MAXMEM. 1638 */ 1639 if ((cp = getenv("hw.physmem")) != NULL) { 1640 u_int64_t AllowMem, sanity; 1641 char *ep; 1642 1643 sanity = AllowMem = strtouq(cp, &ep, 0); 1644 if ((ep != cp) && (*ep != 0)) { 1645 switch(*ep) { 1646 case 'g': 1647 case 'G': 1648 AllowMem <<= 10; 1649 case 'm': 1650 case 'M': 1651 AllowMem <<= 10; 1652 case 'k': 1653 case 'K': 1654 AllowMem <<= 10; 1655 break; 1656 default: 1657 AllowMem = sanity = 0; 1658 } 1659 if (AllowMem < sanity) 1660 AllowMem = 0; 1661 } 1662 if (AllowMem == 0) 1663 printf("Ignoring invalid memory size of '%s'\n", cp); 1664 else 1665 Maxmem = atop(AllowMem); 1666 } 1667 1668 if (atop(physmap[physmap_idx + 1]) != Maxmem && 1669 (boothowto & RB_VERBOSE)) 1670 printf("Physical memory use set to %uK\n", Maxmem * 4); 1671 1672 /* 1673 * If Maxmem has been increased beyond what the system has detected, 1674 * extend the last memory segment to the new limit. 1675 */ 1676 if (atop(physmap[physmap_idx + 1]) < Maxmem) 1677 physmap[physmap_idx + 1] = ptoa(Maxmem); 1678 1679 /* call pmap initialization to make new kernel address space */ 1680 pmap_bootstrap(first, 0); 1681 1682 /* 1683 * Size up each available chunk of physical memory. 1684 */ 1685 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 1686 pa_indx = 0; 1687 phys_avail[pa_indx++] = physmap[0]; 1688 phys_avail[pa_indx] = physmap[0]; 1689#if 0 1690 pte = (pt_entry_t)vtopte(KERNBASE); 1691#else 1692 pte = (pt_entry_t)CMAP1; 1693#endif 1694 1695 /* 1696 * physmap is in bytes, so when converting to page boundaries, 1697 * round up the start address and round down the end address. 1698 */ 1699 for (i = 0; i <= physmap_idx; i += 2) { 1700 vm_offset_t end; 1701 1702 end = ptoa(Maxmem); 1703 if (physmap[i + 1] < end) 1704 end = trunc_page(physmap[i + 1]); 1705 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 1706 int tmp, page_bad; 1707#if 0 1708 int *ptr = 0; 1709#else 1710 int *ptr = (int *)CADDR1; 1711#endif 1712 1713 /* 1714 * block out kernel memory as not available. 1715 */ 1716 if (pa >= 0x100000 && pa < first) 1717 continue; 1718 1719 page_bad = FALSE; 1720 1721 /* 1722 * map page into kernel: valid, read/write,non-cacheable 1723 */ 1724 *pte = pa | PG_V | PG_RW | PG_N; 1725 invltlb(); 1726 1727 tmp = *(int *)ptr; 1728 /* 1729 * Test for alternating 1's and 0's 1730 */ 1731 *(volatile int *)ptr = 0xaaaaaaaa; 1732 if (*(volatile int *)ptr != 0xaaaaaaaa) { 1733 page_bad = TRUE; 1734 } 1735 /* 1736 * Test for alternating 0's and 1's 1737 */ 1738 *(volatile int *)ptr = 0x55555555; 1739 if (*(volatile int *)ptr != 0x55555555) { 1740 page_bad = TRUE; 1741 } 1742 /* 1743 * Test for all 1's 1744 */ 1745 *(volatile int *)ptr = 0xffffffff; 1746 if (*(volatile int *)ptr != 0xffffffff) { 1747 page_bad = TRUE; 1748 } 1749 /* 1750 * Test for all 0's 1751 */ 1752 *(volatile int *)ptr = 0x0; 1753 if (*(volatile int *)ptr != 0x0) { 1754 page_bad = TRUE; 1755 } 1756 /* 1757 * Restore original value. 1758 */ 1759 *(int *)ptr = tmp; 1760 1761 /* 1762 * Adjust array of valid/good pages. 1763 */ 1764 if (page_bad == TRUE) { 1765 continue; 1766 } 1767 /* 1768 * If this good page is a continuation of the 1769 * previous set of good pages, then just increase 1770 * the end pointer. Otherwise start a new chunk. 1771 * Note that "end" points one higher than end, 1772 * making the range >= start and < end. 1773 * If we're also doing a speculative memory 1774 * test and we at or past the end, bump up Maxmem 1775 * so that we keep going. The first bad page 1776 * will terminate the loop. 1777 */ 1778 if (phys_avail[pa_indx] == pa) { 1779 phys_avail[pa_indx] += PAGE_SIZE; 1780 } else { 1781 pa_indx++; 1782 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1783 printf("Too many holes in the physical address space, giving up\n"); 1784 pa_indx--; 1785 break; 1786 } 1787 phys_avail[pa_indx++] = pa; /* start */ 1788 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1789 } 1790 physmem++; 1791 } 1792 } 1793 *pte = 0; 1794 invltlb(); 1795 1796 /* 1797 * XXX 1798 * The last chunk must contain at least one page plus the message 1799 * buffer to avoid complicating other code (message buffer address 1800 * calculation, etc.). 1801 */ 1802 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1803 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { 1804 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1805 phys_avail[pa_indx--] = 0; 1806 phys_avail[pa_indx--] = 0; 1807 } 1808 1809 Maxmem = atop(phys_avail[pa_indx]); 1810 1811 /* Trim off space for the message buffer. */ 1812 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); 1813 1814 avail_end = phys_avail[pa_indx]; 1815} 1816 1817void 1818init386(first) 1819 int first; 1820{ 1821 int x; 1822 struct gate_descriptor *gdp; 1823 int gsel_tss; 1824#ifndef SMP 1825 /* table descriptors - used to load tables by microp */ 1826 struct region_descriptor r_gdt, r_idt; 1827#endif 1828 int off; 1829 1830 proc0.p_addr = proc0paddr; 1831 1832 atdevbase = ISA_HOLE_START + KERNBASE; 1833 1834 if (bootinfo.bi_modulep) { 1835 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE; 1836 preload_bootstrap_relocate(KERNBASE); 1837 } else { 1838 printf("WARNING: loader(8) metadata is missing!\n"); 1839 } 1840 if (bootinfo.bi_envp) 1841 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE; 1842 1843 /* 1844 * make gdt memory segments, the code segment goes up to end of the 1845 * page with etext in it, the data segment goes to the end of 1846 * the address space 1847 */ 1848 /* 1849 * XXX text protection is temporarily (?) disabled. The limit was 1850 * i386_btop(round_page(etext)) - 1. 1851 */ 1852 gdt_segs[GCODE_SEL].ssd_limit = i386_btop(0) - 1; 1853 gdt_segs[GDATA_SEL].ssd_limit = i386_btop(0) - 1; 1854#ifdef SMP 1855 gdt_segs[GPRIV_SEL].ssd_limit = 1856 i386_btop(sizeof(struct privatespace)) - 1; 1857 gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[0]; 1858 gdt_segs[GPROC0_SEL].ssd_base = 1859 (int) &SMP_prvspace[0].globaldata.gd_common_tss; 1860 SMP_prvspace[0].globaldata.gd_prvspace = &SMP_prvspace[0].globaldata; 1861#else 1862 gdt_segs[GPRIV_SEL].ssd_limit = 1863 i386_btop(sizeof(struct globaldata)) - 1; 1864 gdt_segs[GPRIV_SEL].ssd_base = (int) &__globaldata; 1865 gdt_segs[GPROC0_SEL].ssd_base = 1866 (int) &__globaldata.gd_common_tss; 1867 __globaldata.gd_prvspace = &__globaldata; 1868#endif 1869 1870 for (x = 0; x < NGDT; x++) { 1871#ifdef BDE_DEBUGGER 1872 /* avoid overwriting db entries with APM ones */ 1873 if (x >= GAPMCODE32_SEL && x <= GAPMDATA_SEL) 1874 continue; 1875#endif 1876 ssdtosd(&gdt_segs[x], &gdt[x].sd); 1877 } 1878 1879 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1880 r_gdt.rd_base = (int) gdt; 1881 lgdt(&r_gdt); 1882 1883 /* setup curproc so that mutexes work */ 1884 PCPU_SET(curproc, &proc0); 1885 PCPU_SET(spinlocks, NULL); 1886 1887 LIST_INIT(&proc0.p_contested); 1888 1889 mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE); 1890#ifdef SMP 1891 /* 1892 * Interrupts can happen very early, so initialize imen_mtx here, rather 1893 * than in init_locks(). 1894 */ 1895 mtx_init(&imen_mtx, "imen", MTX_SPIN); 1896#endif 1897 1898 /* 1899 * Giant is used early for at least debugger traps and unexpected traps. 1900 */ 1901 mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE); 1902 mtx_init(&proc0.p_mtx, "process lock", MTX_DEF); 1903 mtx_lock(&Giant); 1904 1905 /* make ldt memory segments */ 1906 /* 1907 * The data segment limit must not cover the user area because we 1908 * don't want the user area to be writable in copyout() etc. (page 1909 * level protection is lost in kernel mode on 386's). Also, we 1910 * don't want the user area to be writable directly (page level 1911 * protection of the user area is not available on 486's with 1912 * CR0_WP set, because there is no user-read/kernel-write mode). 1913 * 1914 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it 1915 * should be spelled ...MAX_USER... 1916 */ 1917#define VM_END_USER_RW_ADDRESS VM_MAXUSER_ADDRESS 1918 /* 1919 * The code segment limit has to cover the user area until we move 1920 * the signal trampoline out of the user area. This is safe because 1921 * the code segment cannot be written to directly. 1922 */ 1923#define VM_END_USER_R_ADDRESS (VM_END_USER_RW_ADDRESS + UPAGES * PAGE_SIZE) 1924 ldt_segs[LUCODE_SEL].ssd_limit = i386_btop(VM_END_USER_R_ADDRESS) - 1; 1925 ldt_segs[LUDATA_SEL].ssd_limit = i386_btop(VM_END_USER_RW_ADDRESS) - 1; 1926 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++) 1927 ssdtosd(&ldt_segs[x], &ldt[x].sd); 1928 1929 _default_ldt = GSEL(GLDT_SEL, SEL_KPL); 1930 lldt(_default_ldt); 1931 PCPU_SET(currentldt, _default_ldt); 1932 1933 /* exceptions */ 1934 for (x = 0; x < NIDT; x++) 1935 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1936 setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1937 setidt(1, &IDTVEC(dbg), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1938 setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1939 setidt(3, &IDTVEC(bpt), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1940 setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1941 setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1942 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1943 setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1944 setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL)); 1945 setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1946 setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1947 setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1948 setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1949 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1950 setidt(14, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1951 setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1952 setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1953 setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1954 setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1955 setidt(0x80, &IDTVEC(int0x80_syscall), 1956 SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1957 1958 r_idt.rd_limit = sizeof(idt0) - 1; 1959 r_idt.rd_base = (int) idt; 1960 lidt(&r_idt); 1961 1962 /* 1963 * We need this mutex before the console probe. 1964 */ 1965 mtx_init(&clock_lock, "clk", MTX_SPIN | MTX_RECURSE); 1966 1967 /* 1968 * Initialize the console before we print anything out. 1969 */ 1970 cninit(); 1971 1972#ifdef DEV_ISA 1973 isa_defaultirq(); 1974#endif 1975 1976#ifdef DDB 1977 kdb_init(); 1978 if (boothowto & RB_KDB) 1979 Debugger("Boot flags requested debugger"); 1980#endif 1981 1982 finishidentcpu(); /* Final stage of CPU initialization */ 1983 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1984 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1985 initializecpu(); /* Initialize CPU registers */ 1986 1987 /* make an initial tss so cpu can get interrupt stack on syscall! */ 1988 PCPU_SET(common_tss.tss_esp0, 1989 (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16); 1990 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); 1991 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1992 private_tss = 0; 1993 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd); 1994 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 1995 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16); 1996 ltr(gsel_tss); 1997 1998 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = 1999 dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)]; 2000 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = 2001 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); 2002 dblfault_tss.tss_cr3 = (int)IdlePTD; 2003 dblfault_tss.tss_eip = (int) dblfault_handler; 2004 dblfault_tss.tss_eflags = PSL_KERNEL; 2005 dblfault_tss.tss_ds = dblfault_tss.tss_es = 2006 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); 2007 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL); 2008 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); 2009 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); 2010 2011 vm86_initialize(); 2012 getmemsize(first); 2013 2014 /* now running on new page tables, configured,and u/iom is accessible */ 2015 2016 /* Map the message buffer. */ 2017 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) 2018 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off); 2019 2020 msgbufinit(msgbufp, MSGBUF_SIZE); 2021 2022 /* make a call gate to reenter kernel with */ 2023 gdp = &ldt[LSYS5CALLS_SEL].gd; 2024 2025 x = (int) &IDTVEC(lcall_syscall); 2026 gdp->gd_looffset = x; 2027 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL); 2028 gdp->gd_stkcpy = 1; 2029 gdp->gd_type = SDT_SYS386CGT; 2030 gdp->gd_dpl = SEL_UPL; 2031 gdp->gd_p = 1; 2032 gdp->gd_hioffset = x >> 16; 2033 2034 /* XXX does this work? */ 2035 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL]; 2036 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL]; 2037 2038 /* transfer to user mode */ 2039 2040 _ucodesel = LSEL(LUCODE_SEL, SEL_UPL); 2041 _udatasel = LSEL(LUDATA_SEL, SEL_UPL); 2042 2043 /* setup proc 0's pcb */ 2044 proc0.p_addr->u_pcb.pcb_flags = 0; 2045 proc0.p_addr->u_pcb.pcb_cr3 = (int)IdlePTD; 2046 proc0.p_addr->u_pcb.pcb_ext = 0; 2047 proc0.p_md.md_regs = &proc0_tf; 2048} 2049 2050#if defined(I586_CPU) && !defined(NO_F00F_HACK) 2051static void f00f_hack(void *unused); 2052SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL); 2053 2054static void 2055f00f_hack(void *unused) { 2056 struct gate_descriptor *new_idt; 2057#ifndef SMP 2058 struct region_descriptor r_idt; 2059#endif 2060 vm_offset_t tmp; 2061 2062 if (!has_f00f_bug) 2063 return; 2064 2065 printf("Intel Pentium detected, installing workaround for F00F bug\n"); 2066 2067 r_idt.rd_limit = sizeof(idt0) - 1; 2068 2069 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2); 2070 if (tmp == 0) 2071 panic("kmem_alloc returned 0"); 2072 if (((unsigned int)tmp & (PAGE_SIZE-1)) != 0) 2073 panic("kmem_alloc returned non-page-aligned memory"); 2074 /* Put the first seven entries in the lower page */ 2075 new_idt = (struct gate_descriptor*)(tmp + PAGE_SIZE - (7*8)); 2076 bcopy(idt, new_idt, sizeof(idt0)); 2077 r_idt.rd_base = (int)new_idt; 2078 lidt(&r_idt); 2079 idt = new_idt; 2080 mtx_lock(&vm_mtx); 2081 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE, 2082 VM_PROT_READ, FALSE) != KERN_SUCCESS) 2083 panic("vm_map_protect failed"); 2084 mtx_unlock(&vm_mtx); 2085 return; 2086} 2087#endif /* defined(I586_CPU) && !NO_F00F_HACK */ 2088 2089int 2090ptrace_set_pc(p, addr) 2091 struct proc *p; 2092 unsigned long addr; 2093{ 2094 p->p_md.md_regs->tf_eip = addr; 2095 return (0); 2096} 2097 2098int 2099ptrace_single_step(p) 2100 struct proc *p; 2101{ 2102 p->p_md.md_regs->tf_eflags |= PSL_T; 2103 return (0); 2104} 2105 2106int ptrace_read_u_check(p, addr, len) 2107 struct proc *p; 2108 vm_offset_t addr; 2109 size_t len; 2110{ 2111 vm_offset_t gap; 2112 2113 if ((vm_offset_t) (addr + len) < addr) 2114 return EPERM; 2115 if ((vm_offset_t) (addr + len) <= sizeof(struct user)) 2116 return 0; 2117 2118 gap = (char *) p->p_md.md_regs - (char *) p->p_addr; 2119 2120 if ((vm_offset_t) addr < gap) 2121 return EPERM; 2122 if ((vm_offset_t) (addr + len) <= 2123 (vm_offset_t) (gap + sizeof(struct trapframe))) 2124 return 0; 2125 return EPERM; 2126} 2127 2128int ptrace_write_u(p, off, data) 2129 struct proc *p; 2130 vm_offset_t off; 2131 long data; 2132{ 2133 struct trapframe frame_copy; 2134 vm_offset_t min; 2135 struct trapframe *tp; 2136 2137 /* 2138 * Privileged kernel state is scattered all over the user area. 2139 * Only allow write access to parts of regs and to fpregs. 2140 */ 2141 min = (char *)p->p_md.md_regs - (char *)p->p_addr; 2142 if (off >= min && off <= min + sizeof(struct trapframe) - sizeof(int)) { 2143 tp = p->p_md.md_regs; 2144 frame_copy = *tp; 2145 *(int *)((char *)&frame_copy + (off - min)) = data; 2146 if (!EFL_SECURE(frame_copy.tf_eflags, tp->tf_eflags) || 2147 !CS_SECURE(frame_copy.tf_cs)) 2148 return (EINVAL); 2149 *(int*)((char *)p->p_addr + off) = data; 2150 return (0); 2151 } 2152 min = offsetof(struct user, u_pcb) + offsetof(struct pcb, pcb_savefpu); 2153 if (off >= min && off <= min + sizeof(struct save87) - sizeof(int)) { 2154 *(int*)((char *)p->p_addr + off) = data; 2155 return (0); 2156 } 2157 return (EFAULT); 2158} 2159 2160int 2161fill_regs(p, regs) 2162 struct proc *p; 2163 struct reg *regs; 2164{ 2165 struct pcb *pcb; 2166 struct trapframe *tp; 2167 2168 tp = p->p_md.md_regs; 2169 regs->r_fs = tp->tf_fs; 2170 regs->r_es = tp->tf_es; 2171 regs->r_ds = tp->tf_ds; 2172 regs->r_edi = tp->tf_edi; 2173 regs->r_esi = tp->tf_esi; 2174 regs->r_ebp = tp->tf_ebp; 2175 regs->r_ebx = tp->tf_ebx; 2176 regs->r_edx = tp->tf_edx; 2177 regs->r_ecx = tp->tf_ecx; 2178 regs->r_eax = tp->tf_eax; 2179 regs->r_eip = tp->tf_eip; 2180 regs->r_cs = tp->tf_cs; 2181 regs->r_eflags = tp->tf_eflags; 2182 regs->r_esp = tp->tf_esp; 2183 regs->r_ss = tp->tf_ss; 2184 pcb = &p->p_addr->u_pcb; 2185 regs->r_gs = pcb->pcb_gs; 2186 return (0); 2187} 2188 2189int 2190set_regs(p, regs) 2191 struct proc *p; 2192 struct reg *regs; 2193{ 2194 struct pcb *pcb; 2195 struct trapframe *tp; 2196 2197 tp = p->p_md.md_regs; 2198 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) || 2199 !CS_SECURE(regs->r_cs)) 2200 return (EINVAL); 2201 tp->tf_fs = regs->r_fs; 2202 tp->tf_es = regs->r_es; 2203 tp->tf_ds = regs->r_ds; 2204 tp->tf_edi = regs->r_edi; 2205 tp->tf_esi = regs->r_esi; 2206 tp->tf_ebp = regs->r_ebp; 2207 tp->tf_ebx = regs->r_ebx; 2208 tp->tf_edx = regs->r_edx; 2209 tp->tf_ecx = regs->r_ecx; 2210 tp->tf_eax = regs->r_eax; 2211 tp->tf_eip = regs->r_eip; 2212 tp->tf_cs = regs->r_cs; 2213 tp->tf_eflags = regs->r_eflags; 2214 tp->tf_esp = regs->r_esp; 2215 tp->tf_ss = regs->r_ss; 2216 pcb = &p->p_addr->u_pcb; 2217 pcb->pcb_gs = regs->r_gs; 2218 return (0); 2219} 2220 2221int 2222fill_fpregs(p, fpregs) 2223 struct proc *p; 2224 struct fpreg *fpregs; 2225{ 2226 bcopy(&p->p_addr->u_pcb.pcb_savefpu, fpregs, sizeof *fpregs); 2227 return (0); 2228} 2229 2230int 2231set_fpregs(p, fpregs) 2232 struct proc *p; 2233 struct fpreg *fpregs; 2234{ 2235 bcopy(fpregs, &p->p_addr->u_pcb.pcb_savefpu, sizeof *fpregs); 2236 return (0); 2237} 2238 2239int 2240fill_dbregs(p, dbregs) 2241 struct proc *p; 2242 struct dbreg *dbregs; 2243{ 2244 struct pcb *pcb; 2245 2246 pcb = &p->p_addr->u_pcb; 2247 dbregs->dr0 = pcb->pcb_dr0; 2248 dbregs->dr1 = pcb->pcb_dr1; 2249 dbregs->dr2 = pcb->pcb_dr2; 2250 dbregs->dr3 = pcb->pcb_dr3; 2251 dbregs->dr4 = 0; 2252 dbregs->dr5 = 0; 2253 dbregs->dr6 = pcb->pcb_dr6; 2254 dbregs->dr7 = pcb->pcb_dr7; 2255 return (0); 2256} 2257 2258int 2259set_dbregs(p, dbregs) 2260 struct proc *p; 2261 struct dbreg *dbregs; 2262{ 2263 struct pcb *pcb; 2264 int i; 2265 u_int32_t mask1, mask2; 2266 2267 /* 2268 * Don't let an illegal value for dr7 get set. Specifically, 2269 * check for undefined settings. Setting these bit patterns 2270 * result in undefined behaviour and can lead to an unexpected 2271 * TRCTRAP. 2272 */ 2273 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 8; 2274 i++, mask1 <<= 2, mask2 <<= 2) 2275 if ((dbregs->dr7 & mask1) == mask2) 2276 return (EINVAL); 2277 2278 if (dbregs->dr7 & 0x0000fc00) 2279 return (EINVAL); 2280 2281 2282 2283 pcb = &p->p_addr->u_pcb; 2284 2285 /* 2286 * Don't let a process set a breakpoint that is not within the 2287 * process's address space. If a process could do this, it 2288 * could halt the system by setting a breakpoint in the kernel 2289 * (if ddb was enabled). Thus, we need to check to make sure 2290 * that no breakpoints are being enabled for addresses outside 2291 * process's address space, unless, perhaps, we were called by 2292 * uid 0. 2293 * 2294 * XXX - what about when the watched area of the user's 2295 * address space is written into from within the kernel 2296 * ... wouldn't that still cause a breakpoint to be generated 2297 * from within kernel mode? 2298 */ 2299 2300 if (suser(p) != 0) { 2301 if (dbregs->dr7 & 0x3) { 2302 /* dr0 is enabled */ 2303 if (dbregs->dr0 >= VM_MAXUSER_ADDRESS) 2304 return (EINVAL); 2305 } 2306 2307 if (dbregs->dr7 & (0x3<<2)) { 2308 /* dr1 is enabled */ 2309 if (dbregs->dr1 >= VM_MAXUSER_ADDRESS) 2310 return (EINVAL); 2311 } 2312 2313 if (dbregs->dr7 & (0x3<<4)) { 2314 /* dr2 is enabled */ 2315 if (dbregs->dr2 >= VM_MAXUSER_ADDRESS) 2316 return (EINVAL); 2317 } 2318 2319 if (dbregs->dr7 & (0x3<<6)) { 2320 /* dr3 is enabled */ 2321 if (dbregs->dr3 >= VM_MAXUSER_ADDRESS) 2322 return (EINVAL); 2323 } 2324 } 2325 2326 pcb->pcb_dr0 = dbregs->dr0; 2327 pcb->pcb_dr1 = dbregs->dr1; 2328 pcb->pcb_dr2 = dbregs->dr2; 2329 pcb->pcb_dr3 = dbregs->dr3; 2330 pcb->pcb_dr6 = dbregs->dr6; 2331 pcb->pcb_dr7 = dbregs->dr7; 2332 2333 pcb->pcb_flags |= PCB_DBREGS; 2334 2335 return (0); 2336} 2337 2338/* 2339 * Return > 0 if a hardware breakpoint has been hit, and the 2340 * breakpoint was in user space. Return 0, otherwise. 2341 */ 2342int 2343user_dbreg_trap(void) 2344{ 2345 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */ 2346 u_int32_t bp; /* breakpoint bits extracted from dr6 */ 2347 int nbp; /* number of breakpoints that triggered */ 2348 caddr_t addr[4]; /* breakpoint addresses */ 2349 int i; 2350 2351 dr7 = rdr7(); 2352 if ((dr7 & 0x000000ff) == 0) { 2353 /* 2354 * all GE and LE bits in the dr7 register are zero, 2355 * thus the trap couldn't have been caused by the 2356 * hardware debug registers 2357 */ 2358 return 0; 2359 } 2360 2361 nbp = 0; 2362 dr6 = rdr6(); 2363 bp = dr6 & 0x0000000f; 2364 2365 if (!bp) { 2366 /* 2367 * None of the breakpoint bits are set meaning this 2368 * trap was not caused by any of the debug registers 2369 */ 2370 return 0; 2371 } 2372 2373 /* 2374 * at least one of the breakpoints were hit, check to see 2375 * which ones and if any of them are user space addresses 2376 */ 2377 2378 if (bp & 0x01) { 2379 addr[nbp++] = (caddr_t)rdr0(); 2380 } 2381 if (bp & 0x02) { 2382 addr[nbp++] = (caddr_t)rdr1(); 2383 } 2384 if (bp & 0x04) { 2385 addr[nbp++] = (caddr_t)rdr2(); 2386 } 2387 if (bp & 0x08) { 2388 addr[nbp++] = (caddr_t)rdr3(); 2389 } 2390 2391 for (i=0; i<nbp; i++) { 2392 if (addr[i] < 2393 (caddr_t)VM_MAXUSER_ADDRESS) { 2394 /* 2395 * addr[i] is in user space 2396 */ 2397 return nbp; 2398 } 2399 } 2400 2401 /* 2402 * None of the breakpoints are in user space. 2403 */ 2404 return 0; 2405} 2406 2407 2408#ifndef DDB 2409void 2410Debugger(const char *msg) 2411{ 2412 printf("Debugger(\"%s\") called.\n", msg); 2413} 2414#endif /* no DDB */ 2415 2416#include <sys/disklabel.h> 2417 2418/* 2419 * Determine the size of the transfer, and make sure it is 2420 * within the boundaries of the partition. Adjust transfer 2421 * if needed, and signal errors or early completion. 2422 */ 2423int 2424bounds_check_with_label(struct bio *bp, struct disklabel *lp, int wlabel) 2425{ 2426 struct partition *p = lp->d_partitions + dkpart(bp->bio_dev); 2427 int labelsect = lp->d_partitions[0].p_offset; 2428 int maxsz = p->p_size, 2429 sz = (bp->bio_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT; 2430 2431 /* overwriting disk label ? */ 2432 /* XXX should also protect bootstrap in first 8K */ 2433 if (bp->bio_blkno + p->p_offset <= LABELSECTOR + labelsect && 2434#if LABELSECTOR != 0 2435 bp->bio_blkno + p->p_offset + sz > LABELSECTOR + labelsect && 2436#endif 2437 (bp->bio_cmd == BIO_WRITE) && wlabel == 0) { 2438 bp->bio_error = EROFS; 2439 goto bad; 2440 } 2441 2442#if defined(DOSBBSECTOR) && defined(notyet) 2443 /* overwriting master boot record? */ 2444 if (bp->bio_blkno + p->p_offset <= DOSBBSECTOR && 2445 (bp->bio_cmd == BIO_WRITE) && wlabel == 0) { 2446 bp->bio_error = EROFS; 2447 goto bad; 2448 } 2449#endif 2450 2451 /* beyond partition? */ 2452 if (bp->bio_blkno < 0 || bp->bio_blkno + sz > maxsz) { 2453 /* if exactly at end of disk, return an EOF */ 2454 if (bp->bio_blkno == maxsz) { 2455 bp->bio_resid = bp->bio_bcount; 2456 return(0); 2457 } 2458 /* or truncate if part of it fits */ 2459 sz = maxsz - bp->bio_blkno; 2460 if (sz <= 0) { 2461 bp->bio_error = EINVAL; 2462 goto bad; 2463 } 2464 bp->bio_bcount = sz << DEV_BSHIFT; 2465 } 2466 2467 bp->bio_pblkno = bp->bio_blkno + p->p_offset; 2468 return(1); 2469 2470bad: 2471 bp->bio_flags |= BIO_ERROR; 2472 return(-1); 2473} 2474 2475#ifdef DDB 2476 2477/* 2478 * Provide inb() and outb() as functions. They are normally only 2479 * available as macros calling inlined functions, thus cannot be 2480 * called inside DDB. 2481 * 2482 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 2483 */ 2484 2485#undef inb 2486#undef outb 2487 2488/* silence compiler warnings */ 2489u_char inb(u_int); 2490void outb(u_int, u_char); 2491 2492u_char 2493inb(u_int port) 2494{ 2495 u_char data; 2496 /* 2497 * We use %%dx and not %1 here because i/o is done at %dx and not at 2498 * %edx, while gcc generates inferior code (movw instead of movl) 2499 * if we tell it to load (u_short) port. 2500 */ 2501 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 2502 return (data); 2503} 2504 2505void 2506outb(u_int port, u_char data) 2507{ 2508 u_char al; 2509 /* 2510 * Use an unnecessary assignment to help gcc's register allocator. 2511 * This make a large difference for gcc-1.40 and a tiny difference 2512 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 2513 * best results. gcc-2.6.0 can't handle this. 2514 */ 2515 al = data; 2516 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 2517} 2518 2519#endif /* DDB */ 2520