machdep.c revision 76078
1/*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 38 * $FreeBSD: head/sys/amd64/amd64/machdep.c 76078 2001-04-27 19:28:25Z jhb $ 39 */ 40 41#include "opt_atalk.h" 42#include "opt_compat.h" 43#include "opt_cpu.h" 44#include "opt_ddb.h" 45#include "opt_inet.h" 46#include "opt_ipx.h" 47#include "opt_isa.h" 48#include "opt_maxmem.h" 49#include "opt_msgbuf.h" 50#include "opt_npx.h" 51#include "opt_perfmon.h" 52#include "opt_userconfig.h" 53 54#include <sys/param.h> 55#include <sys/systm.h> 56#include <sys/sysproto.h> 57#include <sys/signalvar.h> 58#include <sys/ipl.h> 59#include <sys/kernel.h> 60#include <sys/ktr.h> 61#include <sys/linker.h> 62#include <sys/lock.h> 63#include <sys/malloc.h> 64#include <sys/mutex.h> 65#include <sys/proc.h> 66#include <sys/bio.h> 67#include <sys/buf.h> 68#include <sys/reboot.h> 69#include <sys/smp.h> 70#include <sys/callout.h> 71#include <sys/msgbuf.h> 72#include <sys/sysent.h> 73#include <sys/sysctl.h> 74#include <sys/vmmeter.h> 75#include <sys/bus.h> 76#include <sys/eventhandler.h> 77 78#include <vm/vm.h> 79#include <vm/vm_param.h> 80#include <sys/lock.h> 81#include <vm/vm_kern.h> 82#include <vm/vm_object.h> 83#include <vm/vm_page.h> 84#include <vm/vm_map.h> 85#include <vm/vm_pager.h> 86#include <vm/vm_extern.h> 87 88#include <sys/user.h> 89#include <sys/exec.h> 90#include <sys/cons.h> 91 92#include <ddb/ddb.h> 93 94#include <net/netisr.h> 95 96#include <machine/cpu.h> 97#include <machine/cputypes.h> 98#include <machine/reg.h> 99#include <machine/clock.h> 100#include <machine/specialreg.h> 101#include <machine/bootinfo.h> 102#include <machine/md_var.h> 103#include <machine/pc/bios.h> 104#include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */ 105#include <machine/globaldata.h> 106#include <machine/globals.h> 107#include <machine/intrcnt.h> 108#ifdef PERFMON 109#include <machine/perfmon.h> 110#endif 111 112#ifdef OLD_BUS_ARCH 113#include <i386/isa/isa_device.h> 114#endif 115#include <i386/isa/icu.h> 116#include <i386/isa/intr_machdep.h> 117#include <isa/rtc.h> 118#include <machine/vm86.h> 119#include <sys/ptrace.h> 120#include <machine/sigframe.h> 121 122extern void init386 __P((int first)); 123extern void dblfault_handler __P((void)); 124 125extern void printcpuinfo(void); /* XXX header file */ 126extern void earlysetcpuclass(void); /* same header file */ 127extern void finishidentcpu(void); 128extern void panicifcpuunsupported(void); 129extern void initializecpu(void); 130 131#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 132#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 133 134static void cpu_startup __P((void *)); 135SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) 136 137int _udatasel, _ucodesel; 138u_int atdevbase; 139 140#if defined(SWTCH_OPTIM_STATS) 141extern int swtch_optim_stats; 142SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, 143 CTLFLAG_RD, &swtch_optim_stats, 0, ""); 144SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, 145 CTLFLAG_RD, &tlb_flush_count, 0, ""); 146#endif 147 148#ifdef PC98 149static int ispc98 = 1; 150#else 151static int ispc98 = 0; 152#endif 153SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, ""); 154 155int physmem = 0; 156int cold = 1; 157 158static void osendsig __P((sig_t catcher, int sig, sigset_t *mask, u_long code)); 159 160static int 161sysctl_hw_physmem(SYSCTL_HANDLER_ARGS) 162{ 163 int error = sysctl_handle_int(oidp, 0, ctob(physmem), req); 164 return (error); 165} 166 167SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD, 168 0, 0, sysctl_hw_physmem, "I", ""); 169 170static int 171sysctl_hw_usermem(SYSCTL_HANDLER_ARGS) 172{ 173 int error = sysctl_handle_int(oidp, 0, 174 ctob(physmem - cnt.v_wire_count), req); 175 return (error); 176} 177 178SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD, 179 0, 0, sysctl_hw_usermem, "I", ""); 180 181static int 182sysctl_hw_availpages(SYSCTL_HANDLER_ARGS) 183{ 184 int error = sysctl_handle_int(oidp, 0, 185 i386_btop(avail_end - avail_start), req); 186 return (error); 187} 188 189SYSCTL_PROC(_hw, OID_AUTO, availpages, CTLTYPE_INT|CTLFLAG_RD, 190 0, 0, sysctl_hw_availpages, "I", ""); 191 192static int 193sysctl_machdep_msgbuf(SYSCTL_HANDLER_ARGS) 194{ 195 int error; 196 197 /* Unwind the buffer, so that it's linear (possibly starting with 198 * some initial nulls). 199 */ 200 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr+msgbufp->msg_bufr, 201 msgbufp->msg_size-msgbufp->msg_bufr,req); 202 if(error) return(error); 203 if(msgbufp->msg_bufr>0) { 204 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr, 205 msgbufp->msg_bufr,req); 206 } 207 return(error); 208} 209 210SYSCTL_PROC(_machdep, OID_AUTO, msgbuf, CTLTYPE_STRING|CTLFLAG_RD, 211 0, 0, sysctl_machdep_msgbuf, "A","Contents of kernel message buffer"); 212 213static int msgbuf_clear; 214 215static int 216sysctl_machdep_msgbuf_clear(SYSCTL_HANDLER_ARGS) 217{ 218 int error; 219 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 220 req); 221 if (!error && req->newptr) { 222 /* Clear the buffer and reset write pointer */ 223 bzero(msgbufp->msg_ptr,msgbufp->msg_size); 224 msgbufp->msg_bufr=msgbufp->msg_bufx=0; 225 msgbuf_clear=0; 226 } 227 return (error); 228} 229 230SYSCTL_PROC(_machdep, OID_AUTO, msgbuf_clear, CTLTYPE_INT|CTLFLAG_RW, 231 &msgbuf_clear, 0, sysctl_machdep_msgbuf_clear, "I", 232 "Clear kernel message buffer"); 233 234int bootverbose = 0, Maxmem = 0; 235long dumplo; 236 237vm_offset_t phys_avail[10]; 238 239/* must be 2 less so 0 0 can signal end of chunks */ 240#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2) 241 242static vm_offset_t buffer_sva, buffer_eva; 243vm_offset_t clean_sva, clean_eva; 244static vm_offset_t pager_sva, pager_eva; 245static struct trapframe proc0_tf; 246#ifndef SMP 247static struct globaldata __globaldata; 248#endif 249 250struct mtx sched_lock; 251struct mtx Giant; 252 253static void 254cpu_startup(dummy) 255 void *dummy; 256{ 257 register unsigned i; 258 register caddr_t v; 259 vm_offset_t maxaddr; 260 vm_size_t size = 0; 261 int firstaddr; 262 vm_offset_t minaddr; 263 int physmem_est; 264 265 if (boothowto & RB_VERBOSE) 266 bootverbose++; 267 268 /* 269 * Good {morning,afternoon,evening,night}. 270 */ 271 printf("%s", version); 272 earlysetcpuclass(); 273 startrtclock(); 274 printcpuinfo(); 275 panicifcpuunsupported(); 276#ifdef PERFMON 277 perfmon_init(); 278#endif 279 printf("real memory = %u (%uK bytes)\n", ptoa(Maxmem), ptoa(Maxmem) / 1024); 280 /* 281 * Display any holes after the first chunk of extended memory. 282 */ 283 if (bootverbose) { 284 int indx; 285 286 printf("Physical memory chunk(s):\n"); 287 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 288 unsigned int size1 = phys_avail[indx + 1] - phys_avail[indx]; 289 290 printf("0x%08x - 0x%08x, %u bytes (%u pages)\n", 291 phys_avail[indx], phys_avail[indx + 1] - 1, size1, 292 size1 / PAGE_SIZE); 293 } 294 } 295 296 /* 297 * Calculate callout wheel size 298 */ 299 for (callwheelsize = 1, callwheelbits = 0; 300 callwheelsize < ncallout; 301 callwheelsize <<= 1, ++callwheelbits) 302 ; 303 callwheelmask = callwheelsize - 1; 304 305 /* 306 * Allocate space for system data structures. 307 * The first available kernel virtual address is in "v". 308 * As pages of kernel virtual memory are allocated, "v" is incremented. 309 * As pages of memory are allocated and cleared, 310 * "firstaddr" is incremented. 311 * An index into the kernel page table corresponding to the 312 * virtual memory address maintained in "v" is kept in "mapaddr". 313 */ 314 315 /* 316 * Make two passes. The first pass calculates how much memory is 317 * needed and allocates it. The second pass assigns virtual 318 * addresses to the various data structures. 319 */ 320 firstaddr = 0; 321again: 322 v = (caddr_t)firstaddr; 323 324#define valloc(name, type, num) \ 325 (name) = (type *)v; v = (caddr_t)((name)+(num)) 326#define valloclim(name, type, num, lim) \ 327 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num))) 328 329 valloc(callout, struct callout, ncallout); 330 valloc(callwheel, struct callout_tailq, callwheelsize); 331 332 /* 333 * Discount the physical memory larger than the size of kernel_map 334 * to avoid eating up all of KVA space. 335 */ 336 if (kernel_map->first_free == NULL) { 337 printf("Warning: no free entries in kernel_map.\n"); 338 physmem_est = physmem; 339 } else 340 physmem_est = min(physmem, kernel_map->max_offset - kernel_map->min_offset); 341 342 /* 343 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE. 344 * For the first 64MB of ram nominally allocate sufficient buffers to 345 * cover 1/4 of our ram. Beyond the first 64MB allocate additional 346 * buffers to cover 1/20 of our ram over 64MB. 347 * 348 * factor represents the 1/4 x ram conversion. 349 */ 350 if (nbuf == 0) { 351 int factor = 4 * BKVASIZE / PAGE_SIZE; 352 353 nbuf = 50; 354 if (physmem_est > 1024) 355 nbuf += min((physmem_est - 1024) / factor, 16384 / factor); 356 if (physmem_est > 16384) 357 nbuf += (physmem_est - 16384) * 2 / (factor * 5); 358 } 359 360 /* 361 * Do not allow the buffer_map to be more then 1/2 the size of the 362 * kernel_map. 363 */ 364 if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) / 365 (BKVASIZE * 2)) { 366 nbuf = (kernel_map->max_offset - kernel_map->min_offset) / 367 (BKVASIZE * 2); 368 printf("Warning: nbufs capped at %d\n", nbuf); 369 } 370 371 nswbuf = max(min(nbuf/4, 256), 16); 372 373 valloc(swbuf, struct buf, nswbuf); 374 valloc(buf, struct buf, nbuf); 375 v = bufhashinit(v); 376 377 /* 378 * End of first pass, size has been calculated so allocate memory 379 */ 380 if (firstaddr == 0) { 381 size = (vm_size_t)(v - firstaddr); 382 firstaddr = (int)kmem_alloc(kernel_map, round_page(size)); 383 if (firstaddr == 0) 384 panic("startup: no room for tables"); 385 goto again; 386 } 387 388 /* 389 * End of second pass, addresses have been assigned 390 */ 391 if ((vm_size_t)(v - firstaddr) != size) 392 panic("startup: table size inconsistency"); 393 394 clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva, 395 (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size); 396 buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva, 397 (nbuf*BKVASIZE)); 398 buffer_map->system_map = 1; 399 pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva, 400 (nswbuf*MAXPHYS) + pager_map_size); 401 pager_map->system_map = 1; 402 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 403 (16*(ARG_MAX+(PAGE_SIZE*3)))); 404 405 /* 406 * XXX: Mbuf system machine-specific initializations should 407 * go here, if anywhere. 408 */ 409 410 /* 411 * Initialize callouts 412 */ 413 SLIST_INIT(&callfree); 414 for (i = 0; i < ncallout; i++) { 415 callout_init(&callout[i], 0); 416 callout[i].c_flags = CALLOUT_LOCAL_ALLOC; 417 SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); 418 } 419 420 for (i = 0; i < callwheelsize; i++) { 421 TAILQ_INIT(&callwheel[i]); 422 } 423 424 mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE); 425 426#if defined(USERCONFIG) 427 userconfig(); 428 cninit(); /* the preferred console may have changed */ 429#endif 430 431 printf("avail memory = %u (%uK bytes)\n", ptoa(cnt.v_free_count), 432 ptoa(cnt.v_free_count) / 1024); 433 434 /* 435 * Set up buffers, so they can be used to read disk labels. 436 */ 437 bufinit(); 438 vm_pager_bufferinit(); 439 440#ifdef SMP 441 globaldata_register(GLOBALDATA); 442#else 443 /* For SMP, we delay the cpu_setregs() until after SMP startup. */ 444 cpu_setregs(); 445#endif 446} 447 448/* 449 * Send an interrupt to process. 450 * 451 * Stack is set up to allow sigcode stored 452 * at top to call routine, followed by kcall 453 * to sigreturn routine below. After sigreturn 454 * resets the signal mask, the stack, and the 455 * frame pointer, it returns to the user 456 * specified pc, psl. 457 */ 458static void 459osendsig(catcher, sig, mask, code) 460 sig_t catcher; 461 int sig; 462 sigset_t *mask; 463 u_long code; 464{ 465 struct osigframe sf; 466 struct osigframe *fp; 467 struct proc *p; 468 struct sigacts *psp; 469 struct trapframe *regs; 470 int oonstack; 471 472 p = curproc; 473 PROC_LOCK(p); 474 psp = p->p_sigacts; 475 regs = p->p_md.md_regs; 476 oonstack = sigonstack(regs->tf_esp); 477 478 /* Allocate and validate space for the signal handler context. */ 479 if ((p->p_flag & P_ALTSTACK) && !oonstack && 480 SIGISMEMBER(psp->ps_sigonstack, sig)) { 481 fp = (struct osigframe *)(p->p_sigstk.ss_sp + 482 p->p_sigstk.ss_size - sizeof(struct osigframe)); 483#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 484 p->p_sigstk.ss_flags |= SS_ONSTACK; 485#endif 486 } else 487 fp = (struct osigframe *)regs->tf_esp - 1; 488 PROC_UNLOCK(p); 489 490 /* 491 * grow_stack() will return 0 if *fp does not fit inside the stack 492 * and the stack can not be grown. 493 * useracc() will return FALSE if access is denied. 494 */ 495 if (grow_stack(p, (int)fp) == 0 || 496 !useracc((caddr_t)fp, sizeof(*fp), VM_PROT_WRITE)) { 497 /* 498 * Process has trashed its stack; give it an illegal 499 * instruction to halt it in its tracks. 500 */ 501 PROC_LOCK(p); 502 SIGACTION(p, SIGILL) = SIG_DFL; 503 SIGDELSET(p->p_sigignore, SIGILL); 504 SIGDELSET(p->p_sigcatch, SIGILL); 505 SIGDELSET(p->p_sigmask, SIGILL); 506 psignal(p, SIGILL); 507 PROC_UNLOCK(p); 508 return; 509 } 510 511 /* Translate the signal if appropriate. */ 512 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 513 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 514 515 /* Build the argument list for the signal handler. */ 516 sf.sf_signum = sig; 517 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc; 518 PROC_LOCK(p); 519 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 520 /* Signal handler installed with SA_SIGINFO. */ 521 sf.sf_arg2 = (register_t)&fp->sf_siginfo; 522 sf.sf_siginfo.si_signo = sig; 523 sf.sf_siginfo.si_code = code; 524 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher; 525 } else { 526 /* Old FreeBSD-style arguments. */ 527 sf.sf_arg2 = code; 528 sf.sf_addr = regs->tf_err; 529 sf.sf_ahu.sf_handler = catcher; 530 } 531 PROC_UNLOCK(p); 532 533 /* Save most if not all of trap frame. */ 534 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax; 535 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx; 536 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx; 537 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx; 538 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi; 539 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi; 540 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs; 541 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds; 542 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss; 543 sf.sf_siginfo.si_sc.sc_es = regs->tf_es; 544 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs; 545 sf.sf_siginfo.si_sc.sc_gs = rgs(); 546 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp; 547 548 /* Build the signal context to be used by osigreturn(). */ 549 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0; 550 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask); 551 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp; 552 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp; 553 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip; 554 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags; 555 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno; 556 sf.sf_siginfo.si_sc.sc_err = regs->tf_err; 557 558 /* 559 * If we're a vm86 process, we want to save the segment registers. 560 * We also change eflags to be our emulated eflags, not the actual 561 * eflags. 562 */ 563 if (regs->tf_eflags & PSL_VM) { 564 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */ 565 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 566 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 567 568 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs; 569 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs; 570 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es; 571 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds; 572 573 if (vm86->vm86_has_vme == 0) 574 sf.sf_siginfo.si_sc.sc_ps = 575 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 576 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 577 578 /* See sendsig() for comments. */ 579 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP); 580 } 581 582 /* Copy the sigframe out to the user's stack. */ 583 if (copyout(&sf, fp, sizeof(*fp)) != 0) { 584 /* 585 * Something is wrong with the stack pointer. 586 * ...Kill the process. 587 */ 588 PROC_LOCK(p); 589 sigexit(p, SIGILL); 590 /* NOTREACHED */ 591 } 592 593 regs->tf_esp = (int)fp; 594 regs->tf_eip = PS_STRINGS - szosigcode; 595 regs->tf_cs = _ucodesel; 596 regs->tf_ds = _udatasel; 597 regs->tf_es = _udatasel; 598 regs->tf_fs = _udatasel; 599 load_gs(_udatasel); 600 regs->tf_ss = _udatasel; 601} 602 603void 604sendsig(catcher, sig, mask, code) 605 sig_t catcher; 606 int sig; 607 sigset_t *mask; 608 u_long code; 609{ 610 struct sigframe sf; 611 struct proc *p; 612 struct sigacts *psp; 613 struct trapframe *regs; 614 struct sigframe *sfp; 615 int oonstack; 616 617 p = curproc; 618 PROC_LOCK(p); 619 psp = p->p_sigacts; 620 if (SIGISMEMBER(psp->ps_osigset, sig)) { 621 PROC_UNLOCK(p); 622 osendsig(catcher, sig, mask, code); 623 return; 624 } 625 regs = p->p_md.md_regs; 626 oonstack = sigonstack(regs->tf_esp); 627 628 /* Save user context. */ 629 bzero(&sf, sizeof(sf)); 630 sf.sf_uc.uc_sigmask = *mask; 631 sf.sf_uc.uc_stack = p->p_sigstk; 632 sf.sf_uc.uc_stack.ss_flags = (p->p_flag & P_ALTSTACK) 633 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 634 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 635 sf.sf_uc.uc_mcontext.mc_gs = rgs(); 636 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs)); 637 638 /* Allocate and validate space for the signal handler context. */ 639 if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack && 640 SIGISMEMBER(psp->ps_sigonstack, sig)) { 641 sfp = (struct sigframe *)(p->p_sigstk.ss_sp + 642 p->p_sigstk.ss_size - sizeof(struct sigframe)); 643#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 644 p->p_sigstk.ss_flags |= SS_ONSTACK; 645#endif 646 } else 647 sfp = (struct sigframe *)regs->tf_esp - 1; 648 PROC_UNLOCK(p); 649 650 /* 651 * grow_stack() will return 0 if *sfp does not fit inside the stack 652 * and the stack can not be grown. 653 * useracc() will return FALSE if access is denied. 654 */ 655 if (grow_stack(p, (int)sfp) == 0 || 656 !useracc((caddr_t)sfp, sizeof(*sfp), VM_PROT_WRITE)) { 657 /* 658 * Process has trashed its stack; give it an illegal 659 * instruction to halt it in its tracks. 660 */ 661#ifdef DEBUG 662 printf("process %d has trashed its stack\n", p->p_pid); 663#endif 664 PROC_LOCK(p); 665 SIGACTION(p, SIGILL) = SIG_DFL; 666 SIGDELSET(p->p_sigignore, SIGILL); 667 SIGDELSET(p->p_sigcatch, SIGILL); 668 SIGDELSET(p->p_sigmask, SIGILL); 669 psignal(p, SIGILL); 670 PROC_UNLOCK(p); 671 return; 672 } 673 674 /* Translate the signal if appropriate. */ 675 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 676 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 677 678 /* Build the argument list for the signal handler. */ 679 sf.sf_signum = sig; 680 sf.sf_ucontext = (register_t)&sfp->sf_uc; 681 PROC_LOCK(p); 682 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 683 /* Signal handler installed with SA_SIGINFO. */ 684 sf.sf_siginfo = (register_t)&sfp->sf_si; 685 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 686 687 /* Fill siginfo structure. */ 688 sf.sf_si.si_signo = sig; 689 sf.sf_si.si_code = code; 690 sf.sf_si.si_addr = (void *)regs->tf_err; 691 } else { 692 /* Old FreeBSD-style arguments. */ 693 sf.sf_siginfo = code; 694 sf.sf_addr = regs->tf_err; 695 sf.sf_ahu.sf_handler = catcher; 696 } 697 PROC_UNLOCK(p); 698 699 /* 700 * If we're a vm86 process, we want to save the segment registers. 701 * We also change eflags to be our emulated eflags, not the actual 702 * eflags. 703 */ 704 if (regs->tf_eflags & PSL_VM) { 705 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 706 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 707 708 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 709 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 710 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 711 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 712 713 if (vm86->vm86_has_vme == 0) 714 sf.sf_uc.uc_mcontext.mc_eflags = 715 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 716 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 717 718 /* 719 * We should never have PSL_T set when returning from vm86 720 * mode. It may be set here if we deliver a signal before 721 * getting to vm86 mode, so turn it off. 722 * 723 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 724 * syscalls made by the signal handler. This just avoids 725 * wasting time for our lazy fixup of such faults. PSL_NT 726 * does nothing in vm86 mode, but vm86 programs can set it 727 * almost legitimately in probes for old cpu types. 728 */ 729 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP); 730 } 731 732 /* Copy the sigframe out to the user's stack. */ 733 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { 734 /* 735 * Something is wrong with the stack pointer. 736 * ...Kill the process. 737 */ 738 PROC_LOCK(p); 739 sigexit(p, SIGILL); 740 /* NOTREACHED */ 741 } 742 743 regs->tf_esp = (int)sfp; 744 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 745 regs->tf_cs = _ucodesel; 746 regs->tf_ds = _udatasel; 747 regs->tf_es = _udatasel; 748 regs->tf_fs = _udatasel; 749 load_gs(_udatasel); 750 regs->tf_ss = _udatasel; 751} 752 753/* 754 * System call to cleanup state after a signal 755 * has been taken. Reset signal mask and 756 * stack state from context left by sendsig (above). 757 * Return to previous pc and psl as specified by 758 * context left by sendsig. Check carefully to 759 * make sure that the user has not modified the 760 * state to gain improper privileges. 761 */ 762int 763osigreturn(p, uap) 764 struct proc *p; 765 struct osigreturn_args /* { 766 struct osigcontext *sigcntxp; 767 } */ *uap; 768{ 769 struct trapframe *regs; 770 struct osigcontext *scp; 771 int eflags; 772 773 regs = p->p_md.md_regs; 774 scp = uap->sigcntxp; 775 if (!useracc((caddr_t)scp, sizeof(*scp), VM_PROT_READ)) 776 return (EFAULT); 777 eflags = scp->sc_ps; 778 if (eflags & PSL_VM) { 779 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 780 struct vm86_kernel *vm86; 781 782 /* 783 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 784 * set up the vm86 area, and we can't enter vm86 mode. 785 */ 786 if (p->p_addr->u_pcb.pcb_ext == 0) 787 return (EINVAL); 788 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 789 if (vm86->vm86_inited == 0) 790 return (EINVAL); 791 792 /* Go back to user mode if both flags are set. */ 793 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 794 trapsignal(p, SIGBUS, 0); 795 796 if (vm86->vm86_has_vme) { 797 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 798 (eflags & VME_USERCHANGE) | PSL_VM; 799 } else { 800 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 801 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 802 } 803 tf->tf_vm86_ds = scp->sc_ds; 804 tf->tf_vm86_es = scp->sc_es; 805 tf->tf_vm86_fs = scp->sc_fs; 806 tf->tf_vm86_gs = scp->sc_gs; 807 tf->tf_ds = _udatasel; 808 tf->tf_es = _udatasel; 809 tf->tf_fs = _udatasel; 810 } else { 811 /* 812 * Don't allow users to change privileged or reserved flags. 813 */ 814 /* 815 * XXX do allow users to change the privileged flag PSL_RF. 816 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 817 * should sometimes set it there too. tf_eflags is kept in 818 * the signal context during signal handling and there is no 819 * other place to remember it, so the PSL_RF bit may be 820 * corrupted by the signal handler without us knowing. 821 * Corruption of the PSL_RF bit at worst causes one more or 822 * one less debugger trap, so allowing it is fairly harmless. 823 */ 824 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 825 return (EINVAL); 826 } 827 828 /* 829 * Don't allow users to load a valid privileged %cs. Let the 830 * hardware check for invalid selectors, excess privilege in 831 * other selectors, invalid %eip's and invalid %esp's. 832 */ 833 if (!CS_SECURE(scp->sc_cs)) { 834 trapsignal(p, SIGBUS, T_PROTFLT); 835 return (EINVAL); 836 } 837 regs->tf_ds = scp->sc_ds; 838 regs->tf_es = scp->sc_es; 839 regs->tf_fs = scp->sc_fs; 840 } 841 842 /* Restore remaining registers. */ 843 regs->tf_eax = scp->sc_eax; 844 regs->tf_ebx = scp->sc_ebx; 845 regs->tf_ecx = scp->sc_ecx; 846 regs->tf_edx = scp->sc_edx; 847 regs->tf_esi = scp->sc_esi; 848 regs->tf_edi = scp->sc_edi; 849 regs->tf_cs = scp->sc_cs; 850 regs->tf_ss = scp->sc_ss; 851 regs->tf_isp = scp->sc_isp; 852 853 PROC_LOCK(p); 854#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 855 if (scp->sc_onstack & 1) 856 p->p_sigstk.ss_flags |= SS_ONSTACK; 857 else 858 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 859#endif 860 861 SIGSETOLD(p->p_sigmask, scp->sc_mask); 862 SIG_CANTMASK(p->p_sigmask); 863 PROC_UNLOCK(p); 864 regs->tf_ebp = scp->sc_fp; 865 regs->tf_esp = scp->sc_sp; 866 regs->tf_eip = scp->sc_pc; 867 regs->tf_eflags = eflags; 868 return (EJUSTRETURN); 869} 870 871int 872sigreturn(p, uap) 873 struct proc *p; 874 struct sigreturn_args /* { 875 ucontext_t *sigcntxp; 876 } */ *uap; 877{ 878 struct trapframe *regs; 879 ucontext_t *ucp; 880 int cs, eflags; 881 882 ucp = uap->sigcntxp; 883 if (!useracc((caddr_t)ucp, sizeof(struct osigcontext), VM_PROT_READ)) 884 return (EFAULT); 885 if (((struct osigcontext *)ucp)->sc_trapno == 0x01d516) 886 return (osigreturn(p, (struct osigreturn_args *)uap)); 887 888 /* 889 * Since ucp is not an osigcontext but a ucontext_t, we have to 890 * check again if all of it is accessible. A ucontext_t is 891 * much larger, so instead of just checking for the pointer 892 * being valid for the size of an osigcontext, now check for 893 * it being valid for a whole, new-style ucontext_t. 894 */ 895 if (!useracc((caddr_t)ucp, sizeof(*ucp), VM_PROT_READ)) 896 return (EFAULT); 897 898 regs = p->p_md.md_regs; 899 eflags = ucp->uc_mcontext.mc_eflags; 900 if (eflags & PSL_VM) { 901 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 902 struct vm86_kernel *vm86; 903 904 /* 905 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 906 * set up the vm86 area, and we can't enter vm86 mode. 907 */ 908 if (p->p_addr->u_pcb.pcb_ext == 0) 909 return (EINVAL); 910 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 911 if (vm86->vm86_inited == 0) 912 return (EINVAL); 913 914 /* Go back to user mode if both flags are set. */ 915 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 916 trapsignal(p, SIGBUS, 0); 917 918 if (vm86->vm86_has_vme) { 919 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 920 (eflags & VME_USERCHANGE) | PSL_VM; 921 } else { 922 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 923 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 924 } 925 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe)); 926 tf->tf_eflags = eflags; 927 tf->tf_vm86_ds = tf->tf_ds; 928 tf->tf_vm86_es = tf->tf_es; 929 tf->tf_vm86_fs = tf->tf_fs; 930 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs; 931 tf->tf_ds = _udatasel; 932 tf->tf_es = _udatasel; 933 tf->tf_fs = _udatasel; 934 } else { 935 /* 936 * Don't allow users to change privileged or reserved flags. 937 */ 938 /* 939 * XXX do allow users to change the privileged flag PSL_RF. 940 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 941 * should sometimes set it there too. tf_eflags is kept in 942 * the signal context during signal handling and there is no 943 * other place to remember it, so the PSL_RF bit may be 944 * corrupted by the signal handler without us knowing. 945 * Corruption of the PSL_RF bit at worst causes one more or 946 * one less debugger trap, so allowing it is fairly harmless. 947 */ 948 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 949 printf("sigreturn: eflags = 0x%x\n", eflags); 950 return (EINVAL); 951 } 952 953 /* 954 * Don't allow users to load a valid privileged %cs. Let the 955 * hardware check for invalid selectors, excess privilege in 956 * other selectors, invalid %eip's and invalid %esp's. 957 */ 958 cs = ucp->uc_mcontext.mc_cs; 959 if (!CS_SECURE(cs)) { 960 printf("sigreturn: cs = 0x%x\n", cs); 961 trapsignal(p, SIGBUS, T_PROTFLT); 962 return (EINVAL); 963 } 964 965 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs)); 966 } 967 968 PROC_LOCK(p); 969#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 970 if (ucp->uc_mcontext.mc_onstack & 1) 971 p->p_sigstk.ss_flags |= SS_ONSTACK; 972 else 973 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 974#endif 975 976 p->p_sigmask = ucp->uc_sigmask; 977 SIG_CANTMASK(p->p_sigmask); 978 PROC_UNLOCK(p); 979 return (EJUSTRETURN); 980} 981 982/* 983 * Machine dependent boot() routine 984 * 985 * I haven't seen anything to put here yet 986 * Possibly some stuff might be grafted back here from boot() 987 */ 988void 989cpu_boot(int howto) 990{ 991} 992 993/* 994 * Shutdown the CPU as much as possible 995 */ 996void 997cpu_halt(void) 998{ 999 for (;;) 1000 __asm__ ("hlt"); 1001} 1002 1003/* 1004 * Hook to idle the CPU when possible. This currently only works in 1005 * the !SMP case, as there is no clean way to ensure that a CPU will be 1006 * woken when there is work available for it. 1007 */ 1008static int cpu_idle_hlt = 1; 1009SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, 1010 &cpu_idle_hlt, 0, "Idle loop HLT enable"); 1011 1012/* 1013 * Note that we have to be careful here to avoid a race between checking 1014 * procrunnable() and actually halting. If we don't do this, we may waste 1015 * the time between calling hlt and the next interrupt even though there 1016 * is a runnable process. 1017 */ 1018void 1019cpu_idle(void) 1020{ 1021#ifndef SMP 1022 if (cpu_idle_hlt) { 1023 disable_intr(); 1024 if (procrunnable()) 1025 enable_intr(); 1026 else { 1027 enable_intr(); 1028 __asm __volatile("hlt"); 1029 } 1030 } 1031#endif 1032} 1033 1034/* 1035 * Clear registers on exec 1036 */ 1037void 1038setregs(p, entry, stack, ps_strings) 1039 struct proc *p; 1040 u_long entry; 1041 u_long stack; 1042 u_long ps_strings; 1043{ 1044 struct trapframe *regs = p->p_md.md_regs; 1045 struct pcb *pcb = &p->p_addr->u_pcb; 1046 1047 if (pcb->pcb_ldt) 1048 user_ldt_free(pcb); 1049 1050 bzero((char *)regs, sizeof(struct trapframe)); 1051 regs->tf_eip = entry; 1052 regs->tf_esp = stack; 1053 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T); 1054 regs->tf_ss = _udatasel; 1055 regs->tf_ds = _udatasel; 1056 regs->tf_es = _udatasel; 1057 regs->tf_fs = _udatasel; 1058 regs->tf_cs = _ucodesel; 1059 1060 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */ 1061 regs->tf_ebx = ps_strings; 1062 1063 /* reset %gs as well */ 1064 if (pcb == PCPU_GET(curpcb)) 1065 load_gs(_udatasel); 1066 else 1067 pcb->pcb_gs = _udatasel; 1068 1069 /* 1070 * Reset the hardware debug registers if they were in use. 1071 * They won't have any meaning for the newly exec'd process. 1072 */ 1073 if (pcb->pcb_flags & PCB_DBREGS) { 1074 pcb->pcb_dr0 = 0; 1075 pcb->pcb_dr1 = 0; 1076 pcb->pcb_dr2 = 0; 1077 pcb->pcb_dr3 = 0; 1078 pcb->pcb_dr6 = 0; 1079 pcb->pcb_dr7 = 0; 1080 if (pcb == PCPU_GET(curpcb)) { 1081 /* 1082 * Clear the debug registers on the running 1083 * CPU, otherwise they will end up affecting 1084 * the next process we switch to. 1085 */ 1086 reset_dbregs(); 1087 } 1088 pcb->pcb_flags &= ~PCB_DBREGS; 1089 } 1090 1091 /* 1092 * Initialize the math emulator (if any) for the current process. 1093 * Actually, just clear the bit that says that the emulator has 1094 * been initialized. Initialization is delayed until the process 1095 * traps to the emulator (if it is done at all) mainly because 1096 * emulators don't provide an entry point for initialization. 1097 */ 1098 p->p_addr->u_pcb.pcb_flags &= ~FP_SOFTFP; 1099 1100 /* 1101 * Arrange to trap the next npx or `fwait' instruction (see npx.c 1102 * for why fwait must be trapped at least if there is an npx or an 1103 * emulator). This is mainly to handle the case where npx0 is not 1104 * configured, since the npx routines normally set up the trap 1105 * otherwise. It should be done only at boot time, but doing it 1106 * here allows modifying `npx_exists' for testing the emulator on 1107 * systems with an npx. 1108 */ 1109 load_cr0(rcr0() | CR0_MP | CR0_TS); 1110 1111#ifdef DEV_NPX 1112 /* Initialize the npx (if any) for the current process. */ 1113 npxinit(__INITIAL_NPXCW__); 1114#endif 1115 1116 /* 1117 * XXX - Linux emulator 1118 * Make sure sure edx is 0x0 on entry. Linux binaries depend 1119 * on it. 1120 */ 1121 p->p_retval[1] = 0; 1122} 1123 1124void 1125cpu_setregs(void) 1126{ 1127 unsigned int cr0; 1128 1129 cr0 = rcr0(); 1130 cr0 |= CR0_NE; /* Done by npxinit() */ 1131 cr0 |= CR0_MP | CR0_TS; /* Done at every execve() too. */ 1132#ifndef I386_CPU 1133 cr0 |= CR0_WP | CR0_AM; 1134#endif 1135 load_cr0(cr0); 1136 load_gs(_udatasel); 1137} 1138 1139static int 1140sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS) 1141{ 1142 int error; 1143 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 1144 req); 1145 if (!error && req->newptr) 1146 resettodr(); 1147 return (error); 1148} 1149 1150SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 1151 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 1152 1153SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, 1154 CTLFLAG_RW, &disable_rtc_set, 0, ""); 1155 1156SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo, 1157 CTLFLAG_RD, &bootinfo, bootinfo, ""); 1158 1159SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, 1160 CTLFLAG_RW, &wall_cmos_clock, 0, ""); 1161 1162/* 1163 * Initialize 386 and configure to run kernel 1164 */ 1165 1166/* 1167 * Initialize segments & interrupt table 1168 */ 1169 1170int _default_ldt; 1171union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */ 1172static struct gate_descriptor idt0[NIDT]; 1173struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 1174union descriptor ldt[NLDT]; /* local descriptor table */ 1175#ifdef SMP 1176/* table descriptors - used to load tables by microp */ 1177struct region_descriptor r_gdt, r_idt; 1178#endif 1179 1180int private_tss; /* flag indicating private tss */ 1181 1182#if defined(I586_CPU) && !defined(NO_F00F_HACK) 1183extern int has_f00f_bug; 1184#endif 1185 1186static struct i386tss dblfault_tss; 1187static char dblfault_stack[PAGE_SIZE]; 1188 1189extern struct user *proc0paddr; 1190 1191 1192/* software prototypes -- in more palatable form */ 1193struct soft_segment_descriptor gdt_segs[] = { 1194/* GNULL_SEL 0 Null Descriptor */ 1195{ 0x0, /* segment base address */ 1196 0x0, /* length */ 1197 0, /* segment type */ 1198 0, /* segment descriptor priority level */ 1199 0, /* segment descriptor present */ 1200 0, 0, 1201 0, /* default 32 vs 16 bit size */ 1202 0 /* limit granularity (byte/page units)*/ }, 1203/* GCODE_SEL 1 Code Descriptor for kernel */ 1204{ 0x0, /* segment base address */ 1205 0xfffff, /* length - all address space */ 1206 SDT_MEMERA, /* segment type */ 1207 0, /* segment descriptor priority level */ 1208 1, /* segment descriptor present */ 1209 0, 0, 1210 1, /* default 32 vs 16 bit size */ 1211 1 /* limit granularity (byte/page units)*/ }, 1212/* GDATA_SEL 2 Data Descriptor for kernel */ 1213{ 0x0, /* segment base address */ 1214 0xfffff, /* length - all address space */ 1215 SDT_MEMRWA, /* segment type */ 1216 0, /* segment descriptor priority level */ 1217 1, /* segment descriptor present */ 1218 0, 0, 1219 1, /* default 32 vs 16 bit size */ 1220 1 /* limit granularity (byte/page units)*/ }, 1221/* GPRIV_SEL 3 SMP Per-Processor Private Data Descriptor */ 1222{ 0x0, /* segment base address */ 1223 0xfffff, /* length - all address space */ 1224 SDT_MEMRWA, /* segment type */ 1225 0, /* segment descriptor priority level */ 1226 1, /* segment descriptor present */ 1227 0, 0, 1228 1, /* default 32 vs 16 bit size */ 1229 1 /* limit granularity (byte/page units)*/ }, 1230/* GPROC0_SEL 4 Proc 0 Tss Descriptor */ 1231{ 1232 0x0, /* segment base address */ 1233 sizeof(struct i386tss)-1,/* length - all address space */ 1234 SDT_SYS386TSS, /* segment type */ 1235 0, /* segment descriptor priority level */ 1236 1, /* segment descriptor present */ 1237 0, 0, 1238 0, /* unused - default 32 vs 16 bit size */ 1239 0 /* limit granularity (byte/page units)*/ }, 1240/* GLDT_SEL 5 LDT Descriptor */ 1241{ (int) ldt, /* segment base address */ 1242 sizeof(ldt)-1, /* length - all address space */ 1243 SDT_SYSLDT, /* segment type */ 1244 SEL_UPL, /* segment descriptor priority level */ 1245 1, /* segment descriptor present */ 1246 0, 0, 1247 0, /* unused - default 32 vs 16 bit size */ 1248 0 /* limit granularity (byte/page units)*/ }, 1249/* GUSERLDT_SEL 6 User LDT Descriptor per process */ 1250{ (int) ldt, /* segment base address */ 1251 (512 * sizeof(union descriptor)-1), /* length */ 1252 SDT_SYSLDT, /* segment type */ 1253 0, /* segment descriptor priority level */ 1254 1, /* segment descriptor present */ 1255 0, 0, 1256 0, /* unused - default 32 vs 16 bit size */ 1257 0 /* limit granularity (byte/page units)*/ }, 1258/* GTGATE_SEL 7 Null Descriptor - Placeholder */ 1259{ 0x0, /* segment base address */ 1260 0x0, /* length - all address space */ 1261 0, /* segment type */ 1262 0, /* segment descriptor priority level */ 1263 0, /* segment descriptor present */ 1264 0, 0, 1265 0, /* default 32 vs 16 bit size */ 1266 0 /* limit granularity (byte/page units)*/ }, 1267/* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */ 1268{ 0x400, /* segment base address */ 1269 0xfffff, /* length */ 1270 SDT_MEMRWA, /* segment type */ 1271 0, /* segment descriptor priority level */ 1272 1, /* segment descriptor present */ 1273 0, 0, 1274 1, /* default 32 vs 16 bit size */ 1275 1 /* limit granularity (byte/page units)*/ }, 1276/* GPANIC_SEL 9 Panic Tss Descriptor */ 1277{ (int) &dblfault_tss, /* segment base address */ 1278 sizeof(struct i386tss)-1,/* length - all address space */ 1279 SDT_SYS386TSS, /* segment type */ 1280 0, /* segment descriptor priority level */ 1281 1, /* segment descriptor present */ 1282 0, 0, 1283 0, /* unused - default 32 vs 16 bit size */ 1284 0 /* limit granularity (byte/page units)*/ }, 1285/* GBIOSCODE32_SEL 10 BIOS 32-bit interface (32bit Code) */ 1286{ 0, /* segment base address (overwritten) */ 1287 0xfffff, /* length */ 1288 SDT_MEMERA, /* segment type */ 1289 0, /* segment descriptor priority level */ 1290 1, /* segment descriptor present */ 1291 0, 0, 1292 0, /* default 32 vs 16 bit size */ 1293 1 /* limit granularity (byte/page units)*/ }, 1294/* GBIOSCODE16_SEL 11 BIOS 32-bit interface (16bit Code) */ 1295{ 0, /* segment base address (overwritten) */ 1296 0xfffff, /* length */ 1297 SDT_MEMERA, /* segment type */ 1298 0, /* segment descriptor priority level */ 1299 1, /* segment descriptor present */ 1300 0, 0, 1301 0, /* default 32 vs 16 bit size */ 1302 1 /* limit granularity (byte/page units)*/ }, 1303/* GBIOSDATA_SEL 12 BIOS 32-bit interface (Data) */ 1304{ 0, /* segment base address (overwritten) */ 1305 0xfffff, /* length */ 1306 SDT_MEMRWA, /* segment type */ 1307 0, /* segment descriptor priority level */ 1308 1, /* segment descriptor present */ 1309 0, 0, 1310 1, /* default 32 vs 16 bit size */ 1311 1 /* limit granularity (byte/page units)*/ }, 1312/* GBIOSUTIL_SEL 13 BIOS 16-bit interface (Utility) */ 1313{ 0, /* segment base address (overwritten) */ 1314 0xfffff, /* length */ 1315 SDT_MEMRWA, /* segment type */ 1316 0, /* segment descriptor priority level */ 1317 1, /* segment descriptor present */ 1318 0, 0, 1319 0, /* default 32 vs 16 bit size */ 1320 1 /* limit granularity (byte/page units)*/ }, 1321/* GBIOSARGS_SEL 14 BIOS 16-bit interface (Arguments) */ 1322{ 0, /* segment base address (overwritten) */ 1323 0xfffff, /* length */ 1324 SDT_MEMRWA, /* segment type */ 1325 0, /* segment descriptor priority level */ 1326 1, /* segment descriptor present */ 1327 0, 0, 1328 0, /* default 32 vs 16 bit size */ 1329 1 /* limit granularity (byte/page units)*/ }, 1330}; 1331 1332static struct soft_segment_descriptor ldt_segs[] = { 1333 /* Null Descriptor - overwritten by call gate */ 1334{ 0x0, /* segment base address */ 1335 0x0, /* length - all address space */ 1336 0, /* segment type */ 1337 0, /* segment descriptor priority level */ 1338 0, /* segment descriptor present */ 1339 0, 0, 1340 0, /* default 32 vs 16 bit size */ 1341 0 /* limit granularity (byte/page units)*/ }, 1342 /* Null Descriptor - overwritten by call gate */ 1343{ 0x0, /* segment base address */ 1344 0x0, /* length - all address space */ 1345 0, /* segment type */ 1346 0, /* segment descriptor priority level */ 1347 0, /* segment descriptor present */ 1348 0, 0, 1349 0, /* default 32 vs 16 bit size */ 1350 0 /* limit granularity (byte/page units)*/ }, 1351 /* Null Descriptor - overwritten by call gate */ 1352{ 0x0, /* segment base address */ 1353 0x0, /* length - all address space */ 1354 0, /* segment type */ 1355 0, /* segment descriptor priority level */ 1356 0, /* segment descriptor present */ 1357 0, 0, 1358 0, /* default 32 vs 16 bit size */ 1359 0 /* limit granularity (byte/page units)*/ }, 1360 /* Code Descriptor for user */ 1361{ 0x0, /* segment base address */ 1362 0xfffff, /* length - all address space */ 1363 SDT_MEMERA, /* segment type */ 1364 SEL_UPL, /* segment descriptor priority level */ 1365 1, /* segment descriptor present */ 1366 0, 0, 1367 1, /* default 32 vs 16 bit size */ 1368 1 /* limit granularity (byte/page units)*/ }, 1369 /* Null Descriptor - overwritten by call gate */ 1370{ 0x0, /* segment base address */ 1371 0x0, /* length - all address space */ 1372 0, /* segment type */ 1373 0, /* segment descriptor priority level */ 1374 0, /* segment descriptor present */ 1375 0, 0, 1376 0, /* default 32 vs 16 bit size */ 1377 0 /* limit granularity (byte/page units)*/ }, 1378 /* Data Descriptor for user */ 1379{ 0x0, /* segment base address */ 1380 0xfffff, /* length - all address space */ 1381 SDT_MEMRWA, /* segment type */ 1382 SEL_UPL, /* segment descriptor priority level */ 1383 1, /* segment descriptor present */ 1384 0, 0, 1385 1, /* default 32 vs 16 bit size */ 1386 1 /* limit granularity (byte/page units)*/ }, 1387}; 1388 1389void 1390setidt(idx, func, typ, dpl, selec) 1391 int idx; 1392 inthand_t *func; 1393 int typ; 1394 int dpl; 1395 int selec; 1396{ 1397 struct gate_descriptor *ip; 1398 1399 ip = idt + idx; 1400 ip->gd_looffset = (int)func; 1401 ip->gd_selector = selec; 1402 ip->gd_stkcpy = 0; 1403 ip->gd_xx = 0; 1404 ip->gd_type = typ; 1405 ip->gd_dpl = dpl; 1406 ip->gd_p = 1; 1407 ip->gd_hioffset = ((int)func)>>16 ; 1408} 1409 1410#define IDTVEC(name) __CONCAT(X,name) 1411 1412extern inthand_t 1413 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 1414 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 1415 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 1416 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 1417 IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall); 1418 1419void 1420sdtossd(sd, ssd) 1421 struct segment_descriptor *sd; 1422 struct soft_segment_descriptor *ssd; 1423{ 1424 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 1425 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 1426 ssd->ssd_type = sd->sd_type; 1427 ssd->ssd_dpl = sd->sd_dpl; 1428 ssd->ssd_p = sd->sd_p; 1429 ssd->ssd_def32 = sd->sd_def32; 1430 ssd->ssd_gran = sd->sd_gran; 1431} 1432 1433#define PHYSMAP_SIZE (2 * 8) 1434 1435/* 1436 * Populate the (physmap) array with base/bound pairs describing the 1437 * available physical memory in the system, then test this memory and 1438 * build the phys_avail array describing the actually-available memory. 1439 * 1440 * If we cannot accurately determine the physical memory map, then use 1441 * value from the 0xE801 call, and failing that, the RTC. 1442 * 1443 * Total memory size may be set by the kernel environment variable 1444 * hw.physmem or the compile-time define MAXMEM. 1445 */ 1446static void 1447getmemsize(int first) 1448{ 1449 int i, physmap_idx, pa_indx; 1450 u_int basemem, extmem; 1451 struct vm86frame vmf; 1452 struct vm86context vmc; 1453 vm_offset_t pa, physmap[PHYSMAP_SIZE]; 1454 pt_entry_t pte; 1455 const char *cp; 1456 struct bios_smap *smap; 1457 1458 bzero(&vmf, sizeof(struct vm86frame)); 1459 bzero(physmap, sizeof(physmap)); 1460 1461 /* 1462 * Perform "base memory" related probes & setup 1463 */ 1464 vm86_intcall(0x12, &vmf); 1465 basemem = vmf.vmf_ax; 1466 if (basemem > 640) { 1467 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", 1468 basemem); 1469 basemem = 640; 1470 } 1471 1472 /* 1473 * XXX if biosbasemem is now < 640, there is a `hole' 1474 * between the end of base memory and the start of 1475 * ISA memory. The hole may be empty or it may 1476 * contain BIOS code or data. Map it read/write so 1477 * that the BIOS can write to it. (Memory from 0 to 1478 * the physical end of the kernel is mapped read-only 1479 * to begin with and then parts of it are remapped. 1480 * The parts that aren't remapped form holes that 1481 * remain read-only and are unused by the kernel. 1482 * The base memory area is below the physical end of 1483 * the kernel and right now forms a read-only hole. 1484 * The part of it from PAGE_SIZE to 1485 * (trunc_page(biosbasemem * 1024) - 1) will be 1486 * remapped and used by the kernel later.) 1487 * 1488 * This code is similar to the code used in 1489 * pmap_mapdev, but since no memory needs to be 1490 * allocated we simply change the mapping. 1491 */ 1492 for (pa = trunc_page(basemem * 1024); 1493 pa < ISA_HOLE_START; pa += PAGE_SIZE) { 1494 pte = (pt_entry_t)vtopte(pa + KERNBASE); 1495 *pte = pa | PG_RW | PG_V; 1496 } 1497 1498 /* 1499 * if basemem != 640, map pages r/w into vm86 page table so 1500 * that the bios can scribble on it. 1501 */ 1502 pte = (pt_entry_t)vm86paddr; 1503 for (i = basemem / 4; i < 160; i++) 1504 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; 1505 1506 /* 1507 * map page 1 R/W into the kernel page table so we can use it 1508 * as a buffer. The kernel will unmap this page later. 1509 */ 1510 pte = (pt_entry_t)vtopte(KERNBASE + (1 << PAGE_SHIFT)); 1511 *pte = (1 << PAGE_SHIFT) | PG_RW | PG_V; 1512 1513 /* 1514 * get memory map with INT 15:E820 1515 */ 1516 vmc.npages = 0; 1517 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT)); 1518 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di); 1519 1520 physmap_idx = 0; 1521 vmf.vmf_ebx = 0; 1522 do { 1523 vmf.vmf_eax = 0xE820; 1524 vmf.vmf_edx = SMAP_SIG; 1525 vmf.vmf_ecx = sizeof(struct bios_smap); 1526 i = vm86_datacall(0x15, &vmf, &vmc); 1527 if (i || vmf.vmf_eax != SMAP_SIG) 1528 break; 1529 if (boothowto & RB_VERBOSE) 1530 printf("SMAP type=%02x base=%08x %08x len=%08x %08x\n", 1531 smap->type, 1532 *(u_int32_t *)((char *)&smap->base + 4), 1533 (u_int32_t)smap->base, 1534 *(u_int32_t *)((char *)&smap->length + 4), 1535 (u_int32_t)smap->length); 1536 1537 if (smap->type != 0x01) 1538 goto next_run; 1539 1540 if (smap->length == 0) 1541 goto next_run; 1542 1543 if (smap->base >= 0xffffffff) { 1544 printf("%uK of memory above 4GB ignored\n", 1545 (u_int)(smap->length / 1024)); 1546 goto next_run; 1547 } 1548 1549 for (i = 0; i <= physmap_idx; i += 2) { 1550 if (smap->base < physmap[i + 1]) { 1551 if (boothowto & RB_VERBOSE) 1552 printf( 1553 "Overlapping or non-montonic memory region, ignoring second region\n"); 1554 goto next_run; 1555 } 1556 } 1557 1558 if (smap->base == physmap[physmap_idx + 1]) { 1559 physmap[physmap_idx + 1] += smap->length; 1560 goto next_run; 1561 } 1562 1563 physmap_idx += 2; 1564 if (physmap_idx == PHYSMAP_SIZE) { 1565 printf( 1566 "Too many segments in the physical address map, giving up\n"); 1567 break; 1568 } 1569 physmap[physmap_idx] = smap->base; 1570 physmap[physmap_idx + 1] = smap->base + smap->length; 1571next_run: 1572 } while (vmf.vmf_ebx != 0); 1573 1574 if (physmap[1] != 0) 1575 goto physmap_done; 1576 1577 /* 1578 * If we failed above, try memory map with INT 15:E801 1579 */ 1580 vmf.vmf_ax = 0xE801; 1581 if (vm86_intcall(0x15, &vmf) == 0) { 1582 extmem = vmf.vmf_cx + vmf.vmf_dx * 64; 1583 } else { 1584#if 0 1585 vmf.vmf_ah = 0x88; 1586 vm86_intcall(0x15, &vmf); 1587 extmem = vmf.vmf_ax; 1588#else 1589 /* 1590 * Prefer the RTC value for extended memory. 1591 */ 1592 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8); 1593#endif 1594 } 1595 1596 /* 1597 * Special hack for chipsets that still remap the 384k hole when 1598 * there's 16MB of memory - this really confuses people that 1599 * are trying to use bus mastering ISA controllers with the 1600 * "16MB limit"; they only have 16MB, but the remapping puts 1601 * them beyond the limit. 1602 * 1603 * If extended memory is between 15-16MB (16-17MB phys address range), 1604 * chop it to 15MB. 1605 */ 1606 if ((extmem > 15 * 1024) && (extmem < 16 * 1024)) 1607 extmem = 15 * 1024; 1608 1609 physmap[0] = 0; 1610 physmap[1] = basemem * 1024; 1611 physmap_idx = 2; 1612 physmap[physmap_idx] = 0x100000; 1613 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024; 1614 1615physmap_done: 1616 /* 1617 * Now, physmap contains a map of physical memory. 1618 */ 1619 1620#ifdef SMP 1621 /* make hole for AP bootstrap code */ 1622 physmap[1] = mp_bootaddress(physmap[1] / 1024); 1623 1624 /* look for the MP hardware - needed for apic addresses */ 1625 i386_mp_probe(); 1626#endif 1627 1628 /* 1629 * Maxmem isn't the "maximum memory", it's one larger than the 1630 * highest page of the physical address space. It should be 1631 * called something like "Maxphyspage". We may adjust this 1632 * based on ``hw.physmem'' and the results of the memory test. 1633 */ 1634 Maxmem = atop(physmap[physmap_idx + 1]); 1635 1636#ifdef MAXMEM 1637 Maxmem = MAXMEM / 4; 1638#endif 1639 1640 /* 1641 * hw.maxmem is a size in bytes; we also allow k, m, and g suffixes 1642 * for the appropriate modifiers. This overrides MAXMEM. 1643 */ 1644 if ((cp = getenv("hw.physmem")) != NULL) { 1645 u_int64_t AllowMem, sanity; 1646 char *ep; 1647 1648 sanity = AllowMem = strtouq(cp, &ep, 0); 1649 if ((ep != cp) && (*ep != 0)) { 1650 switch(*ep) { 1651 case 'g': 1652 case 'G': 1653 AllowMem <<= 10; 1654 case 'm': 1655 case 'M': 1656 AllowMem <<= 10; 1657 case 'k': 1658 case 'K': 1659 AllowMem <<= 10; 1660 break; 1661 default: 1662 AllowMem = sanity = 0; 1663 } 1664 if (AllowMem < sanity) 1665 AllowMem = 0; 1666 } 1667 if (AllowMem == 0) 1668 printf("Ignoring invalid memory size of '%s'\n", cp); 1669 else 1670 Maxmem = atop(AllowMem); 1671 } 1672 1673 if (atop(physmap[physmap_idx + 1]) != Maxmem && 1674 (boothowto & RB_VERBOSE)) 1675 printf("Physical memory use set to %uK\n", Maxmem * 4); 1676 1677 /* 1678 * If Maxmem has been increased beyond what the system has detected, 1679 * extend the last memory segment to the new limit. 1680 */ 1681 if (atop(physmap[physmap_idx + 1]) < Maxmem) 1682 physmap[physmap_idx + 1] = ptoa(Maxmem); 1683 1684 /* call pmap initialization to make new kernel address space */ 1685 pmap_bootstrap(first, 0); 1686 1687 /* 1688 * Size up each available chunk of physical memory. 1689 */ 1690 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 1691 pa_indx = 0; 1692 phys_avail[pa_indx++] = physmap[0]; 1693 phys_avail[pa_indx] = physmap[0]; 1694#if 0 1695 pte = (pt_entry_t)vtopte(KERNBASE); 1696#else 1697 pte = (pt_entry_t)CMAP1; 1698#endif 1699 1700 /* 1701 * physmap is in bytes, so when converting to page boundaries, 1702 * round up the start address and round down the end address. 1703 */ 1704 for (i = 0; i <= physmap_idx; i += 2) { 1705 vm_offset_t end; 1706 1707 end = ptoa(Maxmem); 1708 if (physmap[i + 1] < end) 1709 end = trunc_page(physmap[i + 1]); 1710 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 1711 int tmp, page_bad; 1712#if 0 1713 int *ptr = 0; 1714#else 1715 int *ptr = (int *)CADDR1; 1716#endif 1717 1718 /* 1719 * block out kernel memory as not available. 1720 */ 1721 if (pa >= 0x100000 && pa < first) 1722 continue; 1723 1724 page_bad = FALSE; 1725 1726 /* 1727 * map page into kernel: valid, read/write,non-cacheable 1728 */ 1729 *pte = pa | PG_V | PG_RW | PG_N; 1730 invltlb(); 1731 1732 tmp = *(int *)ptr; 1733 /* 1734 * Test for alternating 1's and 0's 1735 */ 1736 *(volatile int *)ptr = 0xaaaaaaaa; 1737 if (*(volatile int *)ptr != 0xaaaaaaaa) { 1738 page_bad = TRUE; 1739 } 1740 /* 1741 * Test for alternating 0's and 1's 1742 */ 1743 *(volatile int *)ptr = 0x55555555; 1744 if (*(volatile int *)ptr != 0x55555555) { 1745 page_bad = TRUE; 1746 } 1747 /* 1748 * Test for all 1's 1749 */ 1750 *(volatile int *)ptr = 0xffffffff; 1751 if (*(volatile int *)ptr != 0xffffffff) { 1752 page_bad = TRUE; 1753 } 1754 /* 1755 * Test for all 0's 1756 */ 1757 *(volatile int *)ptr = 0x0; 1758 if (*(volatile int *)ptr != 0x0) { 1759 page_bad = TRUE; 1760 } 1761 /* 1762 * Restore original value. 1763 */ 1764 *(int *)ptr = tmp; 1765 1766 /* 1767 * Adjust array of valid/good pages. 1768 */ 1769 if (page_bad == TRUE) { 1770 continue; 1771 } 1772 /* 1773 * If this good page is a continuation of the 1774 * previous set of good pages, then just increase 1775 * the end pointer. Otherwise start a new chunk. 1776 * Note that "end" points one higher than end, 1777 * making the range >= start and < end. 1778 * If we're also doing a speculative memory 1779 * test and we at or past the end, bump up Maxmem 1780 * so that we keep going. The first bad page 1781 * will terminate the loop. 1782 */ 1783 if (phys_avail[pa_indx] == pa) { 1784 phys_avail[pa_indx] += PAGE_SIZE; 1785 } else { 1786 pa_indx++; 1787 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1788 printf("Too many holes in the physical address space, giving up\n"); 1789 pa_indx--; 1790 break; 1791 } 1792 phys_avail[pa_indx++] = pa; /* start */ 1793 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1794 } 1795 physmem++; 1796 } 1797 } 1798 *pte = 0; 1799 invltlb(); 1800 1801 /* 1802 * XXX 1803 * The last chunk must contain at least one page plus the message 1804 * buffer to avoid complicating other code (message buffer address 1805 * calculation, etc.). 1806 */ 1807 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1808 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { 1809 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1810 phys_avail[pa_indx--] = 0; 1811 phys_avail[pa_indx--] = 0; 1812 } 1813 1814 Maxmem = atop(phys_avail[pa_indx]); 1815 1816 /* Trim off space for the message buffer. */ 1817 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); 1818 1819 avail_end = phys_avail[pa_indx]; 1820} 1821 1822void 1823init386(first) 1824 int first; 1825{ 1826 int x; 1827 struct gate_descriptor *gdp; 1828 int gsel_tss; 1829#ifndef SMP 1830 /* table descriptors - used to load tables by microp */ 1831 struct region_descriptor r_gdt, r_idt; 1832#endif 1833 int off; 1834 1835 proc0.p_addr = proc0paddr; 1836 1837 atdevbase = ISA_HOLE_START + KERNBASE; 1838 1839 if (bootinfo.bi_modulep) { 1840 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE; 1841 preload_bootstrap_relocate(KERNBASE); 1842 } else { 1843 printf("WARNING: loader(8) metadata is missing!\n"); 1844 } 1845 if (bootinfo.bi_envp) 1846 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE; 1847 1848 /* 1849 * make gdt memory segments, the code segment goes up to end of the 1850 * page with etext in it, the data segment goes to the end of 1851 * the address space 1852 */ 1853 /* 1854 * XXX text protection is temporarily (?) disabled. The limit was 1855 * i386_btop(round_page(etext)) - 1. 1856 */ 1857 gdt_segs[GCODE_SEL].ssd_limit = i386_btop(0) - 1; 1858 gdt_segs[GDATA_SEL].ssd_limit = i386_btop(0) - 1; 1859#ifdef SMP 1860 gdt_segs[GPRIV_SEL].ssd_limit = 1861 i386_btop(sizeof(struct privatespace)) - 1; 1862 gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[0]; 1863 gdt_segs[GPROC0_SEL].ssd_base = 1864 (int) &SMP_prvspace[0].globaldata.gd_common_tss; 1865 SMP_prvspace[0].globaldata.gd_prvspace = &SMP_prvspace[0].globaldata; 1866#else 1867 gdt_segs[GPRIV_SEL].ssd_limit = 1868 i386_btop(sizeof(struct globaldata)) - 1; 1869 gdt_segs[GPRIV_SEL].ssd_base = (int) &__globaldata; 1870 gdt_segs[GPROC0_SEL].ssd_base = 1871 (int) &__globaldata.gd_common_tss; 1872 __globaldata.gd_prvspace = &__globaldata; 1873#endif 1874 1875 for (x = 0; x < NGDT; x++) { 1876#ifdef BDE_DEBUGGER 1877 /* avoid overwriting db entries with APM ones */ 1878 if (x >= GAPMCODE32_SEL && x <= GAPMDATA_SEL) 1879 continue; 1880#endif 1881 ssdtosd(&gdt_segs[x], &gdt[x].sd); 1882 } 1883 1884 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1885 r_gdt.rd_base = (int) gdt; 1886 lgdt(&r_gdt); 1887 1888 /* setup curproc so that mutexes work */ 1889 PCPU_SET(curproc, &proc0); 1890 PCPU_SET(spinlocks, NULL); 1891 1892 LIST_INIT(&proc0.p_contested); 1893 1894 mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE); 1895#ifdef SMP 1896 /* 1897 * Interrupts can happen very early, so initialize imen_mtx here, rather 1898 * than in init_locks(). 1899 */ 1900 mtx_init(&imen_mtx, "imen", MTX_SPIN); 1901#endif 1902 1903 /* 1904 * Giant is used early for at least debugger traps and unexpected traps. 1905 */ 1906 mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE); 1907 mtx_init(&proc0.p_mtx, "process lock", MTX_DEF); 1908 mtx_lock(&Giant); 1909 1910 /* make ldt memory segments */ 1911 /* 1912 * The data segment limit must not cover the user area because we 1913 * don't want the user area to be writable in copyout() etc. (page 1914 * level protection is lost in kernel mode on 386's). Also, we 1915 * don't want the user area to be writable directly (page level 1916 * protection of the user area is not available on 486's with 1917 * CR0_WP set, because there is no user-read/kernel-write mode). 1918 * 1919 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it 1920 * should be spelled ...MAX_USER... 1921 */ 1922#define VM_END_USER_RW_ADDRESS VM_MAXUSER_ADDRESS 1923 /* 1924 * The code segment limit has to cover the user area until we move 1925 * the signal trampoline out of the user area. This is safe because 1926 * the code segment cannot be written to directly. 1927 */ 1928#define VM_END_USER_R_ADDRESS (VM_END_USER_RW_ADDRESS + UPAGES * PAGE_SIZE) 1929 ldt_segs[LUCODE_SEL].ssd_limit = i386_btop(VM_END_USER_R_ADDRESS) - 1; 1930 ldt_segs[LUDATA_SEL].ssd_limit = i386_btop(VM_END_USER_RW_ADDRESS) - 1; 1931 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++) 1932 ssdtosd(&ldt_segs[x], &ldt[x].sd); 1933 1934 _default_ldt = GSEL(GLDT_SEL, SEL_KPL); 1935 lldt(_default_ldt); 1936 PCPU_SET(currentldt, _default_ldt); 1937 1938 /* exceptions */ 1939 for (x = 0; x < NIDT; x++) 1940 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1941 setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1942 setidt(1, &IDTVEC(dbg), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1943 setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1944 setidt(3, &IDTVEC(bpt), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1945 setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1946 setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1947 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1948 setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1949 setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL)); 1950 setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1951 setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1952 setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1953 setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1954 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1955 setidt(14, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1956 setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1957 setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1958 setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1959 setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1960 setidt(0x80, &IDTVEC(int0x80_syscall), 1961 SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1962 1963 r_idt.rd_limit = sizeof(idt0) - 1; 1964 r_idt.rd_base = (int) idt; 1965 lidt(&r_idt); 1966 1967 /* 1968 * We need this mutex before the console probe. 1969 */ 1970 mtx_init(&clock_lock, "clk", MTX_SPIN | MTX_RECURSE); 1971 1972 /* 1973 * Initialize the console before we print anything out. 1974 */ 1975 cninit(); 1976 1977#ifdef DEV_ISA 1978 isa_defaultirq(); 1979#endif 1980 1981#ifdef DDB 1982 kdb_init(); 1983 if (boothowto & RB_KDB) 1984 Debugger("Boot flags requested debugger"); 1985#endif 1986 1987 finishidentcpu(); /* Final stage of CPU initialization */ 1988 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1989 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1990 initializecpu(); /* Initialize CPU registers */ 1991 1992 /* make an initial tss so cpu can get interrupt stack on syscall! */ 1993 PCPU_SET(common_tss.tss_esp0, 1994 (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16); 1995 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); 1996 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1997 private_tss = 0; 1998 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd); 1999 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 2000 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16); 2001 ltr(gsel_tss); 2002 2003 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = 2004 dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)]; 2005 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = 2006 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); 2007 dblfault_tss.tss_cr3 = (int)IdlePTD; 2008 dblfault_tss.tss_eip = (int) dblfault_handler; 2009 dblfault_tss.tss_eflags = PSL_KERNEL; 2010 dblfault_tss.tss_ds = dblfault_tss.tss_es = 2011 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); 2012 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL); 2013 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); 2014 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); 2015 2016 vm86_initialize(); 2017 getmemsize(first); 2018 2019 /* now running on new page tables, configured,and u/iom is accessible */ 2020 2021 /* Map the message buffer. */ 2022 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) 2023 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off); 2024 2025 msgbufinit(msgbufp, MSGBUF_SIZE); 2026 2027 /* make a call gate to reenter kernel with */ 2028 gdp = &ldt[LSYS5CALLS_SEL].gd; 2029 2030 x = (int) &IDTVEC(lcall_syscall); 2031 gdp->gd_looffset = x; 2032 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL); 2033 gdp->gd_stkcpy = 1; 2034 gdp->gd_type = SDT_SYS386CGT; 2035 gdp->gd_dpl = SEL_UPL; 2036 gdp->gd_p = 1; 2037 gdp->gd_hioffset = x >> 16; 2038 2039 /* XXX does this work? */ 2040 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL]; 2041 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL]; 2042 2043 /* transfer to user mode */ 2044 2045 _ucodesel = LSEL(LUCODE_SEL, SEL_UPL); 2046 _udatasel = LSEL(LUDATA_SEL, SEL_UPL); 2047 2048 /* setup proc 0's pcb */ 2049 proc0.p_addr->u_pcb.pcb_flags = 0; 2050 proc0.p_addr->u_pcb.pcb_cr3 = (int)IdlePTD; 2051 proc0.p_addr->u_pcb.pcb_ext = 0; 2052 proc0.p_md.md_regs = &proc0_tf; 2053} 2054 2055#if defined(I586_CPU) && !defined(NO_F00F_HACK) 2056static void f00f_hack(void *unused); 2057SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL); 2058 2059static void 2060f00f_hack(void *unused) { 2061 struct gate_descriptor *new_idt; 2062#ifndef SMP 2063 struct region_descriptor r_idt; 2064#endif 2065 vm_offset_t tmp; 2066 2067 if (!has_f00f_bug) 2068 return; 2069 2070 printf("Intel Pentium detected, installing workaround for F00F bug\n"); 2071 2072 r_idt.rd_limit = sizeof(idt0) - 1; 2073 2074 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2); 2075 if (tmp == 0) 2076 panic("kmem_alloc returned 0"); 2077 if (((unsigned int)tmp & (PAGE_SIZE-1)) != 0) 2078 panic("kmem_alloc returned non-page-aligned memory"); 2079 /* Put the first seven entries in the lower page */ 2080 new_idt = (struct gate_descriptor*)(tmp + PAGE_SIZE - (7*8)); 2081 bcopy(idt, new_idt, sizeof(idt0)); 2082 r_idt.rd_base = (int)new_idt; 2083 lidt(&r_idt); 2084 idt = new_idt; 2085 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE, 2086 VM_PROT_READ, FALSE) != KERN_SUCCESS) 2087 panic("vm_map_protect failed"); 2088 return; 2089} 2090#endif /* defined(I586_CPU) && !NO_F00F_HACK */ 2091 2092int 2093ptrace_set_pc(p, addr) 2094 struct proc *p; 2095 unsigned long addr; 2096{ 2097 p->p_md.md_regs->tf_eip = addr; 2098 return (0); 2099} 2100 2101int 2102ptrace_single_step(p) 2103 struct proc *p; 2104{ 2105 p->p_md.md_regs->tf_eflags |= PSL_T; 2106 return (0); 2107} 2108 2109int ptrace_read_u_check(p, addr, len) 2110 struct proc *p; 2111 vm_offset_t addr; 2112 size_t len; 2113{ 2114 vm_offset_t gap; 2115 2116 if ((vm_offset_t) (addr + len) < addr) 2117 return EPERM; 2118 if ((vm_offset_t) (addr + len) <= sizeof(struct user)) 2119 return 0; 2120 2121 gap = (char *) p->p_md.md_regs - (char *) p->p_addr; 2122 2123 if ((vm_offset_t) addr < gap) 2124 return EPERM; 2125 if ((vm_offset_t) (addr + len) <= 2126 (vm_offset_t) (gap + sizeof(struct trapframe))) 2127 return 0; 2128 return EPERM; 2129} 2130 2131int ptrace_write_u(p, off, data) 2132 struct proc *p; 2133 vm_offset_t off; 2134 long data; 2135{ 2136 struct trapframe frame_copy; 2137 vm_offset_t min; 2138 struct trapframe *tp; 2139 2140 /* 2141 * Privileged kernel state is scattered all over the user area. 2142 * Only allow write access to parts of regs and to fpregs. 2143 */ 2144 min = (char *)p->p_md.md_regs - (char *)p->p_addr; 2145 if (off >= min && off <= min + sizeof(struct trapframe) - sizeof(int)) { 2146 tp = p->p_md.md_regs; 2147 frame_copy = *tp; 2148 *(int *)((char *)&frame_copy + (off - min)) = data; 2149 if (!EFL_SECURE(frame_copy.tf_eflags, tp->tf_eflags) || 2150 !CS_SECURE(frame_copy.tf_cs)) 2151 return (EINVAL); 2152 *(int*)((char *)p->p_addr + off) = data; 2153 return (0); 2154 } 2155 min = offsetof(struct user, u_pcb) + offsetof(struct pcb, pcb_savefpu); 2156 if (off >= min && off <= min + sizeof(struct save87) - sizeof(int)) { 2157 *(int*)((char *)p->p_addr + off) = data; 2158 return (0); 2159 } 2160 return (EFAULT); 2161} 2162 2163int 2164fill_regs(p, regs) 2165 struct proc *p; 2166 struct reg *regs; 2167{ 2168 struct pcb *pcb; 2169 struct trapframe *tp; 2170 2171 tp = p->p_md.md_regs; 2172 regs->r_fs = tp->tf_fs; 2173 regs->r_es = tp->tf_es; 2174 regs->r_ds = tp->tf_ds; 2175 regs->r_edi = tp->tf_edi; 2176 regs->r_esi = tp->tf_esi; 2177 regs->r_ebp = tp->tf_ebp; 2178 regs->r_ebx = tp->tf_ebx; 2179 regs->r_edx = tp->tf_edx; 2180 regs->r_ecx = tp->tf_ecx; 2181 regs->r_eax = tp->tf_eax; 2182 regs->r_eip = tp->tf_eip; 2183 regs->r_cs = tp->tf_cs; 2184 regs->r_eflags = tp->tf_eflags; 2185 regs->r_esp = tp->tf_esp; 2186 regs->r_ss = tp->tf_ss; 2187 pcb = &p->p_addr->u_pcb; 2188 regs->r_gs = pcb->pcb_gs; 2189 return (0); 2190} 2191 2192int 2193set_regs(p, regs) 2194 struct proc *p; 2195 struct reg *regs; 2196{ 2197 struct pcb *pcb; 2198 struct trapframe *tp; 2199 2200 tp = p->p_md.md_regs; 2201 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) || 2202 !CS_SECURE(regs->r_cs)) 2203 return (EINVAL); 2204 tp->tf_fs = regs->r_fs; 2205 tp->tf_es = regs->r_es; 2206 tp->tf_ds = regs->r_ds; 2207 tp->tf_edi = regs->r_edi; 2208 tp->tf_esi = regs->r_esi; 2209 tp->tf_ebp = regs->r_ebp; 2210 tp->tf_ebx = regs->r_ebx; 2211 tp->tf_edx = regs->r_edx; 2212 tp->tf_ecx = regs->r_ecx; 2213 tp->tf_eax = regs->r_eax; 2214 tp->tf_eip = regs->r_eip; 2215 tp->tf_cs = regs->r_cs; 2216 tp->tf_eflags = regs->r_eflags; 2217 tp->tf_esp = regs->r_esp; 2218 tp->tf_ss = regs->r_ss; 2219 pcb = &p->p_addr->u_pcb; 2220 pcb->pcb_gs = regs->r_gs; 2221 return (0); 2222} 2223 2224int 2225fill_fpregs(p, fpregs) 2226 struct proc *p; 2227 struct fpreg *fpregs; 2228{ 2229 bcopy(&p->p_addr->u_pcb.pcb_savefpu, fpregs, sizeof *fpregs); 2230 return (0); 2231} 2232 2233int 2234set_fpregs(p, fpregs) 2235 struct proc *p; 2236 struct fpreg *fpregs; 2237{ 2238 bcopy(fpregs, &p->p_addr->u_pcb.pcb_savefpu, sizeof *fpregs); 2239 return (0); 2240} 2241 2242int 2243fill_dbregs(p, dbregs) 2244 struct proc *p; 2245 struct dbreg *dbregs; 2246{ 2247 struct pcb *pcb; 2248 2249 pcb = &p->p_addr->u_pcb; 2250 dbregs->dr0 = pcb->pcb_dr0; 2251 dbregs->dr1 = pcb->pcb_dr1; 2252 dbregs->dr2 = pcb->pcb_dr2; 2253 dbregs->dr3 = pcb->pcb_dr3; 2254 dbregs->dr4 = 0; 2255 dbregs->dr5 = 0; 2256 dbregs->dr6 = pcb->pcb_dr6; 2257 dbregs->dr7 = pcb->pcb_dr7; 2258 return (0); 2259} 2260 2261int 2262set_dbregs(p, dbregs) 2263 struct proc *p; 2264 struct dbreg *dbregs; 2265{ 2266 struct pcb *pcb; 2267 int i; 2268 u_int32_t mask1, mask2; 2269 2270 /* 2271 * Don't let an illegal value for dr7 get set. Specifically, 2272 * check for undefined settings. Setting these bit patterns 2273 * result in undefined behaviour and can lead to an unexpected 2274 * TRCTRAP. 2275 */ 2276 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 8; 2277 i++, mask1 <<= 2, mask2 <<= 2) 2278 if ((dbregs->dr7 & mask1) == mask2) 2279 return (EINVAL); 2280 2281 if (dbregs->dr7 & 0x0000fc00) 2282 return (EINVAL); 2283 2284 2285 2286 pcb = &p->p_addr->u_pcb; 2287 2288 /* 2289 * Don't let a process set a breakpoint that is not within the 2290 * process's address space. If a process could do this, it 2291 * could halt the system by setting a breakpoint in the kernel 2292 * (if ddb was enabled). Thus, we need to check to make sure 2293 * that no breakpoints are being enabled for addresses outside 2294 * process's address space, unless, perhaps, we were called by 2295 * uid 0. 2296 * 2297 * XXX - what about when the watched area of the user's 2298 * address space is written into from within the kernel 2299 * ... wouldn't that still cause a breakpoint to be generated 2300 * from within kernel mode? 2301 */ 2302 2303 if (suser(p) != 0) { 2304 if (dbregs->dr7 & 0x3) { 2305 /* dr0 is enabled */ 2306 if (dbregs->dr0 >= VM_MAXUSER_ADDRESS) 2307 return (EINVAL); 2308 } 2309 2310 if (dbregs->dr7 & (0x3<<2)) { 2311 /* dr1 is enabled */ 2312 if (dbregs->dr1 >= VM_MAXUSER_ADDRESS) 2313 return (EINVAL); 2314 } 2315 2316 if (dbregs->dr7 & (0x3<<4)) { 2317 /* dr2 is enabled */ 2318 if (dbregs->dr2 >= VM_MAXUSER_ADDRESS) 2319 return (EINVAL); 2320 } 2321 2322 if (dbregs->dr7 & (0x3<<6)) { 2323 /* dr3 is enabled */ 2324 if (dbregs->dr3 >= VM_MAXUSER_ADDRESS) 2325 return (EINVAL); 2326 } 2327 } 2328 2329 pcb->pcb_dr0 = dbregs->dr0; 2330 pcb->pcb_dr1 = dbregs->dr1; 2331 pcb->pcb_dr2 = dbregs->dr2; 2332 pcb->pcb_dr3 = dbregs->dr3; 2333 pcb->pcb_dr6 = dbregs->dr6; 2334 pcb->pcb_dr7 = dbregs->dr7; 2335 2336 pcb->pcb_flags |= PCB_DBREGS; 2337 2338 return (0); 2339} 2340 2341/* 2342 * Return > 0 if a hardware breakpoint has been hit, and the 2343 * breakpoint was in user space. Return 0, otherwise. 2344 */ 2345int 2346user_dbreg_trap(void) 2347{ 2348 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */ 2349 u_int32_t bp; /* breakpoint bits extracted from dr6 */ 2350 int nbp; /* number of breakpoints that triggered */ 2351 caddr_t addr[4]; /* breakpoint addresses */ 2352 int i; 2353 2354 dr7 = rdr7(); 2355 if ((dr7 & 0x000000ff) == 0) { 2356 /* 2357 * all GE and LE bits in the dr7 register are zero, 2358 * thus the trap couldn't have been caused by the 2359 * hardware debug registers 2360 */ 2361 return 0; 2362 } 2363 2364 nbp = 0; 2365 dr6 = rdr6(); 2366 bp = dr6 & 0x0000000f; 2367 2368 if (!bp) { 2369 /* 2370 * None of the breakpoint bits are set meaning this 2371 * trap was not caused by any of the debug registers 2372 */ 2373 return 0; 2374 } 2375 2376 /* 2377 * at least one of the breakpoints were hit, check to see 2378 * which ones and if any of them are user space addresses 2379 */ 2380 2381 if (bp & 0x01) { 2382 addr[nbp++] = (caddr_t)rdr0(); 2383 } 2384 if (bp & 0x02) { 2385 addr[nbp++] = (caddr_t)rdr1(); 2386 } 2387 if (bp & 0x04) { 2388 addr[nbp++] = (caddr_t)rdr2(); 2389 } 2390 if (bp & 0x08) { 2391 addr[nbp++] = (caddr_t)rdr3(); 2392 } 2393 2394 for (i=0; i<nbp; i++) { 2395 if (addr[i] < 2396 (caddr_t)VM_MAXUSER_ADDRESS) { 2397 /* 2398 * addr[i] is in user space 2399 */ 2400 return nbp; 2401 } 2402 } 2403 2404 /* 2405 * None of the breakpoints are in user space. 2406 */ 2407 return 0; 2408} 2409 2410 2411#ifndef DDB 2412void 2413Debugger(const char *msg) 2414{ 2415 printf("Debugger(\"%s\") called.\n", msg); 2416} 2417#endif /* no DDB */ 2418 2419#include <sys/disklabel.h> 2420 2421/* 2422 * Determine the size of the transfer, and make sure it is 2423 * within the boundaries of the partition. Adjust transfer 2424 * if needed, and signal errors or early completion. 2425 */ 2426int 2427bounds_check_with_label(struct bio *bp, struct disklabel *lp, int wlabel) 2428{ 2429 struct partition *p = lp->d_partitions + dkpart(bp->bio_dev); 2430 int labelsect = lp->d_partitions[0].p_offset; 2431 int maxsz = p->p_size, 2432 sz = (bp->bio_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT; 2433 2434 /* overwriting disk label ? */ 2435 /* XXX should also protect bootstrap in first 8K */ 2436 if (bp->bio_blkno + p->p_offset <= LABELSECTOR + labelsect && 2437#if LABELSECTOR != 0 2438 bp->bio_blkno + p->p_offset + sz > LABELSECTOR + labelsect && 2439#endif 2440 (bp->bio_cmd == BIO_WRITE) && wlabel == 0) { 2441 bp->bio_error = EROFS; 2442 goto bad; 2443 } 2444 2445#if defined(DOSBBSECTOR) && defined(notyet) 2446 /* overwriting master boot record? */ 2447 if (bp->bio_blkno + p->p_offset <= DOSBBSECTOR && 2448 (bp->bio_cmd == BIO_WRITE) && wlabel == 0) { 2449 bp->bio_error = EROFS; 2450 goto bad; 2451 } 2452#endif 2453 2454 /* beyond partition? */ 2455 if (bp->bio_blkno < 0 || bp->bio_blkno + sz > maxsz) { 2456 /* if exactly at end of disk, return an EOF */ 2457 if (bp->bio_blkno == maxsz) { 2458 bp->bio_resid = bp->bio_bcount; 2459 return(0); 2460 } 2461 /* or truncate if part of it fits */ 2462 sz = maxsz - bp->bio_blkno; 2463 if (sz <= 0) { 2464 bp->bio_error = EINVAL; 2465 goto bad; 2466 } 2467 bp->bio_bcount = sz << DEV_BSHIFT; 2468 } 2469 2470 bp->bio_pblkno = bp->bio_blkno + p->p_offset; 2471 return(1); 2472 2473bad: 2474 bp->bio_flags |= BIO_ERROR; 2475 return(-1); 2476} 2477 2478#ifdef DDB 2479 2480/* 2481 * Provide inb() and outb() as functions. They are normally only 2482 * available as macros calling inlined functions, thus cannot be 2483 * called inside DDB. 2484 * 2485 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 2486 */ 2487 2488#undef inb 2489#undef outb 2490 2491/* silence compiler warnings */ 2492u_char inb(u_int); 2493void outb(u_int, u_char); 2494 2495u_char 2496inb(u_int port) 2497{ 2498 u_char data; 2499 /* 2500 * We use %%dx and not %1 here because i/o is done at %dx and not at 2501 * %edx, while gcc generates inferior code (movw instead of movl) 2502 * if we tell it to load (u_short) port. 2503 */ 2504 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 2505 return (data); 2506} 2507 2508void 2509outb(u_int port, u_char data) 2510{ 2511 u_char al; 2512 /* 2513 * Use an unnecessary assignment to help gcc's register allocator. 2514 * This make a large difference for gcc-1.40 and a tiny difference 2515 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 2516 * best results. gcc-2.6.0 can't handle this. 2517 */ 2518 al = data; 2519 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 2520} 2521 2522#endif /* DDB */ 2523