machdep.c revision 71785
1/*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 38 * $FreeBSD: head/sys/amd64/amd64/machdep.c 71785 2001-01-29 09:38:39Z peter $ 39 */ 40 41#include "opt_atalk.h" 42#include "opt_compat.h" 43#include "opt_cpu.h" 44#include "opt_ddb.h" 45#include "opt_inet.h" 46#include "opt_ipx.h" 47#include "opt_isa.h" 48#include "opt_maxmem.h" 49#include "opt_msgbuf.h" 50#include "opt_npx.h" 51#include "opt_perfmon.h" 52#include "opt_user_ldt.h" 53#include "opt_userconfig.h" 54 55#include <sys/param.h> 56#include <sys/systm.h> 57#include <sys/sysproto.h> 58#include <sys/signalvar.h> 59#include <sys/ipl.h> 60#include <sys/kernel.h> 61#include <sys/ktr.h> 62#include <sys/linker.h> 63#include <sys/malloc.h> 64#include <sys/mutex.h> 65#include <sys/proc.h> 66#include <sys/bio.h> 67#include <sys/buf.h> 68#include <sys/reboot.h> 69#include <sys/callout.h> 70#include <sys/msgbuf.h> 71#include <sys/sysent.h> 72#include <sys/sysctl.h> 73#include <sys/vmmeter.h> 74#include <sys/bus.h> 75#include <sys/eventhandler.h> 76 77#include <vm/vm.h> 78#include <vm/vm_param.h> 79#include <sys/lock.h> 80#include <vm/vm_kern.h> 81#include <vm/vm_object.h> 82#include <vm/vm_page.h> 83#include <vm/vm_map.h> 84#include <vm/vm_pager.h> 85#include <vm/vm_extern.h> 86 87#include <sys/user.h> 88#include <sys/exec.h> 89#include <sys/cons.h> 90 91#include <ddb/ddb.h> 92 93#include <net/netisr.h> 94 95#include <machine/cpu.h> 96#include <machine/cputypes.h> 97#include <machine/reg.h> 98#include <machine/clock.h> 99#include <machine/specialreg.h> 100#include <machine/bootinfo.h> 101#include <machine/md_var.h> 102#include <machine/pc/bios.h> 103#include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */ 104#include <machine/globaldata.h> 105#include <machine/globals.h> 106#ifdef SMP 107#include <machine/smp.h> 108#endif 109#ifdef PERFMON 110#include <machine/perfmon.h> 111#endif 112 113#ifdef OLD_BUS_ARCH 114#include <i386/isa/isa_device.h> 115#endif 116#include <i386/isa/icu.h> 117#include <i386/isa/intr_machdep.h> 118#include <isa/rtc.h> 119#include <machine/vm86.h> 120#include <sys/ptrace.h> 121#include <machine/sigframe.h> 122 123extern void init386 __P((int first)); 124extern void dblfault_handler __P((void)); 125 126extern void printcpuinfo(void); /* XXX header file */ 127extern void earlysetcpuclass(void); /* same header file */ 128extern void finishidentcpu(void); 129extern void panicifcpuunsupported(void); 130extern void initializecpu(void); 131 132#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 133#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 134 135static void cpu_startup __P((void *)); 136SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) 137 138int _udatasel, _ucodesel; 139u_int atdevbase; 140 141#if defined(SWTCH_OPTIM_STATS) 142extern int swtch_optim_stats; 143SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, 144 CTLFLAG_RD, &swtch_optim_stats, 0, ""); 145SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, 146 CTLFLAG_RD, &tlb_flush_count, 0, ""); 147#endif 148 149#ifdef PC98 150static int ispc98 = 1; 151#else 152static int ispc98 = 0; 153#endif 154SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, ""); 155 156int physmem = 0; 157int cold = 1; 158 159static void osendsig __P((sig_t catcher, int sig, sigset_t *mask, u_long code)); 160 161static int 162sysctl_hw_physmem(SYSCTL_HANDLER_ARGS) 163{ 164 int error = sysctl_handle_int(oidp, 0, ctob(physmem), req); 165 return (error); 166} 167 168SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD, 169 0, 0, sysctl_hw_physmem, "I", ""); 170 171static int 172sysctl_hw_usermem(SYSCTL_HANDLER_ARGS) 173{ 174 int error = sysctl_handle_int(oidp, 0, 175 ctob(physmem - cnt.v_wire_count), req); 176 return (error); 177} 178 179SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD, 180 0, 0, sysctl_hw_usermem, "I", ""); 181 182static int 183sysctl_hw_availpages(SYSCTL_HANDLER_ARGS) 184{ 185 int error = sysctl_handle_int(oidp, 0, 186 i386_btop(avail_end - avail_start), req); 187 return (error); 188} 189 190SYSCTL_PROC(_hw, OID_AUTO, availpages, CTLTYPE_INT|CTLFLAG_RD, 191 0, 0, sysctl_hw_availpages, "I", ""); 192 193static int 194sysctl_machdep_msgbuf(SYSCTL_HANDLER_ARGS) 195{ 196 int error; 197 198 /* Unwind the buffer, so that it's linear (possibly starting with 199 * some initial nulls). 200 */ 201 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr+msgbufp->msg_bufr, 202 msgbufp->msg_size-msgbufp->msg_bufr,req); 203 if(error) return(error); 204 if(msgbufp->msg_bufr>0) { 205 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr, 206 msgbufp->msg_bufr,req); 207 } 208 return(error); 209} 210 211SYSCTL_PROC(_machdep, OID_AUTO, msgbuf, CTLTYPE_STRING|CTLFLAG_RD, 212 0, 0, sysctl_machdep_msgbuf, "A","Contents of kernel message buffer"); 213 214static int msgbuf_clear; 215 216static int 217sysctl_machdep_msgbuf_clear(SYSCTL_HANDLER_ARGS) 218{ 219 int error; 220 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 221 req); 222 if (!error && req->newptr) { 223 /* Clear the buffer and reset write pointer */ 224 bzero(msgbufp->msg_ptr,msgbufp->msg_size); 225 msgbufp->msg_bufr=msgbufp->msg_bufx=0; 226 msgbuf_clear=0; 227 } 228 return (error); 229} 230 231SYSCTL_PROC(_machdep, OID_AUTO, msgbuf_clear, CTLTYPE_INT|CTLFLAG_RW, 232 &msgbuf_clear, 0, sysctl_machdep_msgbuf_clear, "I", 233 "Clear kernel message buffer"); 234 235int bootverbose = 0, Maxmem = 0; 236long dumplo; 237 238vm_offset_t phys_avail[10]; 239 240/* must be 2 less so 0 0 can signal end of chunks */ 241#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2) 242 243static vm_offset_t buffer_sva, buffer_eva; 244vm_offset_t clean_sva, clean_eva; 245static vm_offset_t pager_sva, pager_eva; 246static struct trapframe proc0_tf; 247#ifndef SMP 248static struct globaldata __globaldata; 249#endif 250 251struct cpuhead cpuhead; 252 253struct mtx sched_lock; 254struct mtx Giant; 255 256static void 257cpu_startup(dummy) 258 void *dummy; 259{ 260 register unsigned i; 261 register caddr_t v; 262 vm_offset_t maxaddr; 263 vm_size_t size = 0; 264 int firstaddr; 265 vm_offset_t minaddr; 266 int physmem_est; 267 268 if (boothowto & RB_VERBOSE) 269 bootverbose++; 270 271 /* 272 * Good {morning,afternoon,evening,night}. 273 */ 274 printf("%s", version); 275 earlysetcpuclass(); 276 startrtclock(); 277 printcpuinfo(); 278 panicifcpuunsupported(); 279#ifdef PERFMON 280 perfmon_init(); 281#endif 282 printf("real memory = %u (%uK bytes)\n", ptoa(Maxmem), ptoa(Maxmem) / 1024); 283 /* 284 * Display any holes after the first chunk of extended memory. 285 */ 286 if (bootverbose) { 287 int indx; 288 289 printf("Physical memory chunk(s):\n"); 290 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 291 unsigned int size1 = phys_avail[indx + 1] - phys_avail[indx]; 292 293 printf("0x%08x - 0x%08x, %u bytes (%u pages)\n", 294 phys_avail[indx], phys_avail[indx + 1] - 1, size1, 295 size1 / PAGE_SIZE); 296 } 297 } 298 299 /* 300 * Calculate callout wheel size 301 */ 302 for (callwheelsize = 1, callwheelbits = 0; 303 callwheelsize < ncallout; 304 callwheelsize <<= 1, ++callwheelbits) 305 ; 306 callwheelmask = callwheelsize - 1; 307 308 /* 309 * Allocate space for system data structures. 310 * The first available kernel virtual address is in "v". 311 * As pages of kernel virtual memory are allocated, "v" is incremented. 312 * As pages of memory are allocated and cleared, 313 * "firstaddr" is incremented. 314 * An index into the kernel page table corresponding to the 315 * virtual memory address maintained in "v" is kept in "mapaddr". 316 */ 317 318 /* 319 * Make two passes. The first pass calculates how much memory is 320 * needed and allocates it. The second pass assigns virtual 321 * addresses to the various data structures. 322 */ 323 firstaddr = 0; 324again: 325 v = (caddr_t)firstaddr; 326 327#define valloc(name, type, num) \ 328 (name) = (type *)v; v = (caddr_t)((name)+(num)) 329#define valloclim(name, type, num, lim) \ 330 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num))) 331 332 valloc(callout, struct callout, ncallout); 333 valloc(callwheel, struct callout_tailq, callwheelsize); 334 335 /* 336 * Discount the physical memory larger than the size of kernel_map 337 * to avoid eating up all of KVA space. 338 */ 339 if (kernel_map->first_free == NULL) { 340 printf("Warning: no free entries in kernel_map.\n"); 341 physmem_est = physmem; 342 } else 343 physmem_est = min(physmem, kernel_map->max_offset - kernel_map->min_offset); 344 345 /* 346 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE. 347 * For the first 64MB of ram nominally allocate sufficient buffers to 348 * cover 1/4 of our ram. Beyond the first 64MB allocate additional 349 * buffers to cover 1/20 of our ram over 64MB. 350 * 351 * factor represents the 1/4 x ram conversion. 352 */ 353 if (nbuf == 0) { 354 int factor = 4 * BKVASIZE / PAGE_SIZE; 355 356 nbuf = 50; 357 if (physmem_est > 1024) 358 nbuf += min((physmem_est - 1024) / factor, 16384 / factor); 359 if (physmem_est > 16384) 360 nbuf += (physmem_est - 16384) * 2 / (factor * 5); 361 } 362 363 /* 364 * Do not allow the buffer_map to be more then 1/2 the size of the 365 * kernel_map. 366 */ 367 if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) / 368 (BKVASIZE * 2)) { 369 nbuf = (kernel_map->max_offset - kernel_map->min_offset) / 370 (BKVASIZE * 2); 371 printf("Warning: nbufs capped at %d\n", nbuf); 372 } 373 374 nswbuf = max(min(nbuf/4, 256), 16); 375 376 valloc(swbuf, struct buf, nswbuf); 377 valloc(buf, struct buf, nbuf); 378 v = bufhashinit(v); 379 380 /* 381 * End of first pass, size has been calculated so allocate memory 382 */ 383 if (firstaddr == 0) { 384 size = (vm_size_t)(v - firstaddr); 385 firstaddr = (int)kmem_alloc(kernel_map, round_page(size)); 386 if (firstaddr == 0) 387 panic("startup: no room for tables"); 388 goto again; 389 } 390 391 /* 392 * End of second pass, addresses have been assigned 393 */ 394 if ((vm_size_t)(v - firstaddr) != size) 395 panic("startup: table size inconsistency"); 396 397 clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva, 398 (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size); 399 buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva, 400 (nbuf*BKVASIZE)); 401 pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva, 402 (nswbuf*MAXPHYS) + pager_map_size); 403 pager_map->system_map = 1; 404 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 405 (16*(ARG_MAX+(PAGE_SIZE*3)))); 406 407 /* 408 * XXX: Mbuf system machine-specific initializations should 409 * go here, if anywhere. 410 */ 411 412 /* 413 * Initialize callouts 414 */ 415 SLIST_INIT(&callfree); 416 for (i = 0; i < ncallout; i++) { 417 callout_init(&callout[i], 0); 418 callout[i].c_flags = CALLOUT_LOCAL_ALLOC; 419 SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); 420 } 421 422 for (i = 0; i < callwheelsize; i++) { 423 TAILQ_INIT(&callwheel[i]); 424 } 425 426 mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE); 427 428#if defined(USERCONFIG) 429 userconfig(); 430 cninit(); /* the preferred console may have changed */ 431#endif 432 433 printf("avail memory = %u (%uK bytes)\n", ptoa(cnt.v_free_count), 434 ptoa(cnt.v_free_count) / 1024); 435 436 /* 437 * Set up buffers, so they can be used to read disk labels. 438 */ 439 bufinit(); 440 vm_pager_bufferinit(); 441 442 SLIST_INIT(&cpuhead); 443 SLIST_INSERT_HEAD(&cpuhead, GLOBALDATA, gd_allcpu); 444 445#ifdef SMP 446 /* 447 * OK, enough kmem_alloc/malloc state should be up, lets get on with it! 448 */ 449 mp_start(); /* fire up the APs and APICs */ 450 mp_announce(); 451#endif /* SMP */ 452 cpu_setregs(); 453} 454 455/* 456 * Send an interrupt to process. 457 * 458 * Stack is set up to allow sigcode stored 459 * at top to call routine, followed by kcall 460 * to sigreturn routine below. After sigreturn 461 * resets the signal mask, the stack, and the 462 * frame pointer, it returns to the user 463 * specified pc, psl. 464 */ 465static void 466osendsig(catcher, sig, mask, code) 467 sig_t catcher; 468 int sig; 469 sigset_t *mask; 470 u_long code; 471{ 472 struct osigframe sf; 473 struct osigframe *fp; 474 struct proc *p; 475 struct sigacts *psp; 476 struct trapframe *regs; 477 int oonstack; 478 479 p = curproc; 480 PROC_LOCK(p); 481 psp = p->p_sigacts; 482 regs = p->p_md.md_regs; 483 oonstack = sigonstack(regs->tf_esp); 484 485 /* Allocate and validate space for the signal handler context. */ 486 if ((p->p_flag & P_ALTSTACK) && !oonstack && 487 SIGISMEMBER(psp->ps_sigonstack, sig)) { 488 fp = (struct osigframe *)(p->p_sigstk.ss_sp + 489 p->p_sigstk.ss_size - sizeof(struct osigframe)); 490#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 491 p->p_sigstk.ss_flags |= SS_ONSTACK; 492#endif 493 } else 494 fp = (struct osigframe *)regs->tf_esp - 1; 495 PROC_UNLOCK(p); 496 497 /* 498 * grow_stack() will return 0 if *fp does not fit inside the stack 499 * and the stack can not be grown. 500 * useracc() will return FALSE if access is denied. 501 */ 502 if (grow_stack(p, (int)fp) == 0 || 503 !useracc((caddr_t)fp, sizeof(*fp), VM_PROT_WRITE)) { 504 /* 505 * Process has trashed its stack; give it an illegal 506 * instruction to halt it in its tracks. 507 */ 508 PROC_LOCK(p); 509 SIGACTION(p, SIGILL) = SIG_DFL; 510 SIGDELSET(p->p_sigignore, SIGILL); 511 SIGDELSET(p->p_sigcatch, SIGILL); 512 SIGDELSET(p->p_sigmask, SIGILL); 513 PROC_UNLOCK(p); 514 psignal(p, SIGILL); 515 return; 516 } 517 518 /* Translate the signal if appropriate. */ 519 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 520 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 521 522 /* Build the argument list for the signal handler. */ 523 sf.sf_signum = sig; 524 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc; 525 PROC_LOCK(p); 526 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 527 /* Signal handler installed with SA_SIGINFO. */ 528 sf.sf_arg2 = (register_t)&fp->sf_siginfo; 529 sf.sf_siginfo.si_signo = sig; 530 sf.sf_siginfo.si_code = code; 531 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher; 532 } else { 533 /* Old FreeBSD-style arguments. */ 534 sf.sf_arg2 = code; 535 sf.sf_addr = regs->tf_err; 536 sf.sf_ahu.sf_handler = catcher; 537 } 538 PROC_UNLOCK(p); 539 540 /* Save most if not all of trap frame. */ 541 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax; 542 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx; 543 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx; 544 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx; 545 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi; 546 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi; 547 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs; 548 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds; 549 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss; 550 sf.sf_siginfo.si_sc.sc_es = regs->tf_es; 551 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs; 552 sf.sf_siginfo.si_sc.sc_gs = rgs(); 553 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp; 554 555 /* Build the signal context to be used by osigreturn(). */ 556 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0; 557 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask); 558 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp; 559 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp; 560 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip; 561 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags; 562 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno; 563 sf.sf_siginfo.si_sc.sc_err = regs->tf_err; 564 565 /* 566 * If we're a vm86 process, we want to save the segment registers. 567 * We also change eflags to be our emulated eflags, not the actual 568 * eflags. 569 */ 570 if (regs->tf_eflags & PSL_VM) { 571 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */ 572 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 573 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 574 575 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs; 576 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs; 577 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es; 578 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds; 579 580 if (vm86->vm86_has_vme == 0) 581 sf.sf_siginfo.si_sc.sc_ps = 582 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 583 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 584 585 /* See sendsig() for comments. */ 586 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP); 587 } 588 589 /* Copy the sigframe out to the user's stack. */ 590 if (copyout(&sf, fp, sizeof(*fp)) != 0) { 591 /* 592 * Something is wrong with the stack pointer. 593 * ...Kill the process. 594 */ 595 sigexit(p, SIGILL); 596 } 597 598 regs->tf_esp = (int)fp; 599 regs->tf_eip = PS_STRINGS - szosigcode; 600 regs->tf_cs = _ucodesel; 601 regs->tf_ds = _udatasel; 602 regs->tf_es = _udatasel; 603 regs->tf_fs = _udatasel; 604 load_gs(_udatasel); 605 regs->tf_ss = _udatasel; 606} 607 608void 609sendsig(catcher, sig, mask, code) 610 sig_t catcher; 611 int sig; 612 sigset_t *mask; 613 u_long code; 614{ 615 struct sigframe sf; 616 struct proc *p; 617 struct sigacts *psp; 618 struct trapframe *regs; 619 struct sigframe *sfp; 620 int oonstack; 621 622 p = curproc; 623 PROC_LOCK(p); 624 psp = p->p_sigacts; 625 if (SIGISMEMBER(psp->ps_osigset, sig)) { 626 PROC_UNLOCK(p); 627 osendsig(catcher, sig, mask, code); 628 return; 629 } 630 regs = p->p_md.md_regs; 631 oonstack = sigonstack(regs->tf_esp); 632 633 /* Save user context. */ 634 bzero(&sf, sizeof(sf)); 635 sf.sf_uc.uc_sigmask = *mask; 636 sf.sf_uc.uc_stack = p->p_sigstk; 637 sf.sf_uc.uc_stack.ss_flags = (p->p_flag & P_ALTSTACK) 638 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 639 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 640 sf.sf_uc.uc_mcontext.mc_gs = rgs(); 641 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs)); 642 643 /* Allocate and validate space for the signal handler context. */ 644 if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack && 645 SIGISMEMBER(psp->ps_sigonstack, sig)) { 646 sfp = (struct sigframe *)(p->p_sigstk.ss_sp + 647 p->p_sigstk.ss_size - sizeof(struct sigframe)); 648#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 649 p->p_sigstk.ss_flags |= SS_ONSTACK; 650#endif 651 } else 652 sfp = (struct sigframe *)regs->tf_esp - 1; 653 PROC_UNLOCK(p); 654 655 /* 656 * grow_stack() will return 0 if *sfp does not fit inside the stack 657 * and the stack can not be grown. 658 * useracc() will return FALSE if access is denied. 659 */ 660 if (grow_stack(p, (int)sfp) == 0 || 661 !useracc((caddr_t)sfp, sizeof(*sfp), VM_PROT_WRITE)) { 662 /* 663 * Process has trashed its stack; give it an illegal 664 * instruction to halt it in its tracks. 665 */ 666#ifdef DEBUG 667 printf("process %d has trashed its stack\n", p->p_pid); 668#endif 669 PROC_LOCK(p); 670 SIGACTION(p, SIGILL) = SIG_DFL; 671 SIGDELSET(p->p_sigignore, SIGILL); 672 SIGDELSET(p->p_sigcatch, SIGILL); 673 SIGDELSET(p->p_sigmask, SIGILL); 674 PROC_UNLOCK(p); 675 psignal(p, SIGILL); 676 return; 677 } 678 679 /* Translate the signal if appropriate. */ 680 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 681 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 682 683 /* Build the argument list for the signal handler. */ 684 sf.sf_signum = sig; 685 sf.sf_ucontext = (register_t)&sfp->sf_uc; 686 PROC_LOCK(p); 687 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 688 /* Signal handler installed with SA_SIGINFO. */ 689 sf.sf_siginfo = (register_t)&sfp->sf_si; 690 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 691 692 /* Fill siginfo structure. */ 693 sf.sf_si.si_signo = sig; 694 sf.sf_si.si_code = code; 695 sf.sf_si.si_addr = (void *)regs->tf_err; 696 } else { 697 /* Old FreeBSD-style arguments. */ 698 sf.sf_siginfo = code; 699 sf.sf_addr = regs->tf_err; 700 sf.sf_ahu.sf_handler = catcher; 701 } 702 PROC_UNLOCK(p); 703 704 /* 705 * If we're a vm86 process, we want to save the segment registers. 706 * We also change eflags to be our emulated eflags, not the actual 707 * eflags. 708 */ 709 if (regs->tf_eflags & PSL_VM) { 710 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 711 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 712 713 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 714 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 715 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 716 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 717 718 if (vm86->vm86_has_vme == 0) 719 sf.sf_uc.uc_mcontext.mc_eflags = 720 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 721 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 722 723 /* 724 * We should never have PSL_T set when returning from vm86 725 * mode. It may be set here if we deliver a signal before 726 * getting to vm86 mode, so turn it off. 727 * 728 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 729 * syscalls made by the signal handler. This just avoids 730 * wasting time for our lazy fixup of such faults. PSL_NT 731 * does nothing in vm86 mode, but vm86 programs can set it 732 * almost legitimately in probes for old cpu types. 733 */ 734 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP); 735 } 736 737 /* Copy the sigframe out to the user's stack. */ 738 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { 739 /* 740 * Something is wrong with the stack pointer. 741 * ...Kill the process. 742 */ 743 sigexit(p, SIGILL); 744 } 745 746 regs->tf_esp = (int)sfp; 747 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 748 regs->tf_cs = _ucodesel; 749 regs->tf_ds = _udatasel; 750 regs->tf_es = _udatasel; 751 regs->tf_fs = _udatasel; 752 load_gs(_udatasel); 753 regs->tf_ss = _udatasel; 754} 755 756/* 757 * System call to cleanup state after a signal 758 * has been taken. Reset signal mask and 759 * stack state from context left by sendsig (above). 760 * Return to previous pc and psl as specified by 761 * context left by sendsig. Check carefully to 762 * make sure that the user has not modified the 763 * state to gain improper privileges. 764 */ 765int 766osigreturn(p, uap) 767 struct proc *p; 768 struct osigreturn_args /* { 769 struct osigcontext *sigcntxp; 770 } */ *uap; 771{ 772 struct trapframe *regs; 773 struct osigcontext *scp; 774 int eflags; 775 776 regs = p->p_md.md_regs; 777 scp = uap->sigcntxp; 778 if (!useracc((caddr_t)scp, sizeof(*scp), VM_PROT_READ)) 779 return (EFAULT); 780 eflags = scp->sc_ps; 781 if (eflags & PSL_VM) { 782 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 783 struct vm86_kernel *vm86; 784 785 /* 786 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 787 * set up the vm86 area, and we can't enter vm86 mode. 788 */ 789 if (p->p_addr->u_pcb.pcb_ext == 0) 790 return (EINVAL); 791 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 792 if (vm86->vm86_inited == 0) 793 return (EINVAL); 794 795 /* Go back to user mode if both flags are set. */ 796 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 797 trapsignal(p, SIGBUS, 0); 798 799 if (vm86->vm86_has_vme) { 800 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 801 (eflags & VME_USERCHANGE) | PSL_VM; 802 } else { 803 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 804 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 805 } 806 tf->tf_vm86_ds = scp->sc_ds; 807 tf->tf_vm86_es = scp->sc_es; 808 tf->tf_vm86_fs = scp->sc_fs; 809 tf->tf_vm86_gs = scp->sc_gs; 810 tf->tf_ds = _udatasel; 811 tf->tf_es = _udatasel; 812 tf->tf_fs = _udatasel; 813 } else { 814 /* 815 * Don't allow users to change privileged or reserved flags. 816 */ 817 /* 818 * XXX do allow users to change the privileged flag PSL_RF. 819 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 820 * should sometimes set it there too. tf_eflags is kept in 821 * the signal context during signal handling and there is no 822 * other place to remember it, so the PSL_RF bit may be 823 * corrupted by the signal handler without us knowing. 824 * Corruption of the PSL_RF bit at worst causes one more or 825 * one less debugger trap, so allowing it is fairly harmless. 826 */ 827 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 828 return (EINVAL); 829 } 830 831 /* 832 * Don't allow users to load a valid privileged %cs. Let the 833 * hardware check for invalid selectors, excess privilege in 834 * other selectors, invalid %eip's and invalid %esp's. 835 */ 836 if (!CS_SECURE(scp->sc_cs)) { 837 trapsignal(p, SIGBUS, T_PROTFLT); 838 return (EINVAL); 839 } 840 regs->tf_ds = scp->sc_ds; 841 regs->tf_es = scp->sc_es; 842 regs->tf_fs = scp->sc_fs; 843 } 844 845 /* Restore remaining registers. */ 846 regs->tf_eax = scp->sc_eax; 847 regs->tf_ebx = scp->sc_ebx; 848 regs->tf_ecx = scp->sc_ecx; 849 regs->tf_edx = scp->sc_edx; 850 regs->tf_esi = scp->sc_esi; 851 regs->tf_edi = scp->sc_edi; 852 regs->tf_cs = scp->sc_cs; 853 regs->tf_ss = scp->sc_ss; 854 regs->tf_isp = scp->sc_isp; 855 856 PROC_LOCK(p); 857#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 858 if (scp->sc_onstack & 1) 859 p->p_sigstk.ss_flags |= SS_ONSTACK; 860 else 861 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 862#endif 863 864 SIGSETOLD(p->p_sigmask, scp->sc_mask); 865 SIG_CANTMASK(p->p_sigmask); 866 PROC_UNLOCK(p); 867 regs->tf_ebp = scp->sc_fp; 868 regs->tf_esp = scp->sc_sp; 869 regs->tf_eip = scp->sc_pc; 870 regs->tf_eflags = eflags; 871 return (EJUSTRETURN); 872} 873 874int 875sigreturn(p, uap) 876 struct proc *p; 877 struct sigreturn_args /* { 878 ucontext_t *sigcntxp; 879 } */ *uap; 880{ 881 struct trapframe *regs; 882 ucontext_t *ucp; 883 int cs, eflags; 884 885 ucp = uap->sigcntxp; 886 if (!useracc((caddr_t)ucp, sizeof(struct osigcontext), VM_PROT_READ)) 887 return (EFAULT); 888 if (((struct osigcontext *)ucp)->sc_trapno == 0x01d516) 889 return (osigreturn(p, (struct osigreturn_args *)uap)); 890 891 /* 892 * Since ucp is not an osigcontext but a ucontext_t, we have to 893 * check again if all of it is accessible. A ucontext_t is 894 * much larger, so instead of just checking for the pointer 895 * being valid for the size of an osigcontext, now check for 896 * it being valid for a whole, new-style ucontext_t. 897 */ 898 if (!useracc((caddr_t)ucp, sizeof(*ucp), VM_PROT_READ)) 899 return (EFAULT); 900 901 regs = p->p_md.md_regs; 902 eflags = ucp->uc_mcontext.mc_eflags; 903 if (eflags & PSL_VM) { 904 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 905 struct vm86_kernel *vm86; 906 907 /* 908 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 909 * set up the vm86 area, and we can't enter vm86 mode. 910 */ 911 if (p->p_addr->u_pcb.pcb_ext == 0) 912 return (EINVAL); 913 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 914 if (vm86->vm86_inited == 0) 915 return (EINVAL); 916 917 /* Go back to user mode if both flags are set. */ 918 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 919 trapsignal(p, SIGBUS, 0); 920 921 if (vm86->vm86_has_vme) { 922 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 923 (eflags & VME_USERCHANGE) | PSL_VM; 924 } else { 925 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 926 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 927 } 928 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe)); 929 tf->tf_eflags = eflags; 930 tf->tf_vm86_ds = tf->tf_ds; 931 tf->tf_vm86_es = tf->tf_es; 932 tf->tf_vm86_fs = tf->tf_fs; 933 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs; 934 tf->tf_ds = _udatasel; 935 tf->tf_es = _udatasel; 936 tf->tf_fs = _udatasel; 937 } else { 938 /* 939 * Don't allow users to change privileged or reserved flags. 940 */ 941 /* 942 * XXX do allow users to change the privileged flag PSL_RF. 943 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 944 * should sometimes set it there too. tf_eflags is kept in 945 * the signal context during signal handling and there is no 946 * other place to remember it, so the PSL_RF bit may be 947 * corrupted by the signal handler without us knowing. 948 * Corruption of the PSL_RF bit at worst causes one more or 949 * one less debugger trap, so allowing it is fairly harmless. 950 */ 951 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 952 printf("sigreturn: eflags = 0x%x\n", eflags); 953 return (EINVAL); 954 } 955 956 /* 957 * Don't allow users to load a valid privileged %cs. Let the 958 * hardware check for invalid selectors, excess privilege in 959 * other selectors, invalid %eip's and invalid %esp's. 960 */ 961 cs = ucp->uc_mcontext.mc_cs; 962 if (!CS_SECURE(cs)) { 963 printf("sigreturn: cs = 0x%x\n", cs); 964 trapsignal(p, SIGBUS, T_PROTFLT); 965 return (EINVAL); 966 } 967 968 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs)); 969 } 970 971 PROC_LOCK(p); 972#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 973 if (ucp->uc_mcontext.mc_onstack & 1) 974 p->p_sigstk.ss_flags |= SS_ONSTACK; 975 else 976 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 977#endif 978 979 p->p_sigmask = ucp->uc_sigmask; 980 SIG_CANTMASK(p->p_sigmask); 981 PROC_UNLOCK(p); 982 return (EJUSTRETURN); 983} 984 985/* 986 * Machine dependent boot() routine 987 * 988 * I haven't seen anything to put here yet 989 * Possibly some stuff might be grafted back here from boot() 990 */ 991void 992cpu_boot(int howto) 993{ 994} 995 996/* 997 * Shutdown the CPU as much as possible 998 */ 999void 1000cpu_halt(void) 1001{ 1002 for (;;) 1003 __asm__ ("hlt"); 1004} 1005 1006/* 1007 * Hook to idle the CPU when possible. This currently only works in 1008 * the !SMP case, as there is no clean way to ensure that a CPU will be 1009 * woken when there is work available for it. 1010 */ 1011static int cpu_idle_hlt = 1; 1012SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, 1013 &cpu_idle_hlt, 0, "Idle loop HLT enable"); 1014 1015/* 1016 * Note that we have to be careful here to avoid a race between checking 1017 * procrunnable() and actually halting. If we don't do this, we may waste 1018 * the time between calling hlt and the next interrupt even though there 1019 * is a runnable process. 1020 */ 1021void 1022cpu_idle(void) 1023{ 1024#ifndef SMP 1025 if (cpu_idle_hlt) { 1026 disable_intr(); 1027 if (procrunnable()) 1028 enable_intr(); 1029 else { 1030 enable_intr(); 1031 __asm __volatile("hlt"); 1032 } 1033 } 1034#endif 1035} 1036 1037/* 1038 * Clear registers on exec 1039 */ 1040void 1041setregs(p, entry, stack, ps_strings) 1042 struct proc *p; 1043 u_long entry; 1044 u_long stack; 1045 u_long ps_strings; 1046{ 1047 struct trapframe *regs = p->p_md.md_regs; 1048 struct pcb *pcb = &p->p_addr->u_pcb; 1049 1050#ifdef USER_LDT 1051 /* was i386_user_cleanup() in NetBSD */ 1052 user_ldt_free(pcb); 1053#endif 1054 1055 bzero((char *)regs, sizeof(struct trapframe)); 1056 regs->tf_eip = entry; 1057 regs->tf_esp = stack; 1058 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T); 1059 regs->tf_ss = _udatasel; 1060 regs->tf_ds = _udatasel; 1061 regs->tf_es = _udatasel; 1062 regs->tf_fs = _udatasel; 1063 regs->tf_cs = _ucodesel; 1064 1065 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */ 1066 regs->tf_ebx = ps_strings; 1067 1068 /* reset %gs as well */ 1069 if (pcb == PCPU_GET(curpcb)) 1070 load_gs(_udatasel); 1071 else 1072 pcb->pcb_gs = _udatasel; 1073 1074 /* 1075 * Reset the hardware debug registers if they were in use. 1076 * They won't have any meaning for the newly exec'd process. 1077 */ 1078 if (pcb->pcb_flags & PCB_DBREGS) { 1079 pcb->pcb_dr0 = 0; 1080 pcb->pcb_dr1 = 0; 1081 pcb->pcb_dr2 = 0; 1082 pcb->pcb_dr3 = 0; 1083 pcb->pcb_dr6 = 0; 1084 pcb->pcb_dr7 = 0; 1085 if (pcb == PCPU_GET(curpcb)) { 1086 /* 1087 * Clear the debug registers on the running 1088 * CPU, otherwise they will end up affecting 1089 * the next process we switch to. 1090 */ 1091 reset_dbregs(); 1092 } 1093 pcb->pcb_flags &= ~PCB_DBREGS; 1094 } 1095 1096 /* 1097 * Initialize the math emulator (if any) for the current process. 1098 * Actually, just clear the bit that says that the emulator has 1099 * been initialized. Initialization is delayed until the process 1100 * traps to the emulator (if it is done at all) mainly because 1101 * emulators don't provide an entry point for initialization. 1102 */ 1103 p->p_addr->u_pcb.pcb_flags &= ~FP_SOFTFP; 1104 1105 /* 1106 * Arrange to trap the next npx or `fwait' instruction (see npx.c 1107 * for why fwait must be trapped at least if there is an npx or an 1108 * emulator). This is mainly to handle the case where npx0 is not 1109 * configured, since the npx routines normally set up the trap 1110 * otherwise. It should be done only at boot time, but doing it 1111 * here allows modifying `npx_exists' for testing the emulator on 1112 * systems with an npx. 1113 */ 1114 load_cr0(rcr0() | CR0_MP | CR0_TS); 1115 1116#ifdef DEV_NPX 1117 /* Initialize the npx (if any) for the current process. */ 1118 npxinit(__INITIAL_NPXCW__); 1119#endif 1120 1121 /* 1122 * XXX - Linux emulator 1123 * Make sure sure edx is 0x0 on entry. Linux binaries depend 1124 * on it. 1125 */ 1126 p->p_retval[1] = 0; 1127} 1128 1129void 1130cpu_setregs(void) 1131{ 1132 unsigned int cr0; 1133 1134 cr0 = rcr0(); 1135 cr0 |= CR0_NE; /* Done by npxinit() */ 1136 cr0 |= CR0_MP | CR0_TS; /* Done at every execve() too. */ 1137#ifndef I386_CPU 1138 cr0 |= CR0_WP | CR0_AM; 1139#endif 1140 load_cr0(cr0); 1141 load_gs(_udatasel); 1142} 1143 1144static int 1145sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS) 1146{ 1147 int error; 1148 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 1149 req); 1150 if (!error && req->newptr) 1151 resettodr(); 1152 return (error); 1153} 1154 1155SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 1156 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 1157 1158SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, 1159 CTLFLAG_RW, &disable_rtc_set, 0, ""); 1160 1161SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo, 1162 CTLFLAG_RD, &bootinfo, bootinfo, ""); 1163 1164SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, 1165 CTLFLAG_RW, &wall_cmos_clock, 0, ""); 1166 1167/* 1168 * Initialize 386 and configure to run kernel 1169 */ 1170 1171/* 1172 * Initialize segments & interrupt table 1173 */ 1174 1175int _default_ldt; 1176union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */ 1177static struct gate_descriptor idt0[NIDT]; 1178struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 1179union descriptor ldt[NLDT]; /* local descriptor table */ 1180#ifdef SMP 1181/* table descriptors - used to load tables by microp */ 1182struct region_descriptor r_gdt, r_idt; 1183#endif 1184 1185int private_tss; /* flag indicating private tss */ 1186 1187#if defined(I586_CPU) && !defined(NO_F00F_HACK) 1188extern int has_f00f_bug; 1189#endif 1190 1191static struct i386tss dblfault_tss; 1192static char dblfault_stack[PAGE_SIZE]; 1193 1194extern struct user *proc0paddr; 1195 1196 1197/* software prototypes -- in more palatable form */ 1198struct soft_segment_descriptor gdt_segs[] = { 1199/* GNULL_SEL 0 Null Descriptor */ 1200{ 0x0, /* segment base address */ 1201 0x0, /* length */ 1202 0, /* segment type */ 1203 0, /* segment descriptor priority level */ 1204 0, /* segment descriptor present */ 1205 0, 0, 1206 0, /* default 32 vs 16 bit size */ 1207 0 /* limit granularity (byte/page units)*/ }, 1208/* GCODE_SEL 1 Code Descriptor for kernel */ 1209{ 0x0, /* segment base address */ 1210 0xfffff, /* length - all address space */ 1211 SDT_MEMERA, /* segment type */ 1212 0, /* segment descriptor priority level */ 1213 1, /* segment descriptor present */ 1214 0, 0, 1215 1, /* default 32 vs 16 bit size */ 1216 1 /* limit granularity (byte/page units)*/ }, 1217/* GDATA_SEL 2 Data Descriptor for kernel */ 1218{ 0x0, /* segment base address */ 1219 0xfffff, /* length - all address space */ 1220 SDT_MEMRWA, /* segment type */ 1221 0, /* segment descriptor priority level */ 1222 1, /* segment descriptor present */ 1223 0, 0, 1224 1, /* default 32 vs 16 bit size */ 1225 1 /* limit granularity (byte/page units)*/ }, 1226/* GPRIV_SEL 3 SMP Per-Processor Private Data Descriptor */ 1227{ 0x0, /* segment base address */ 1228 0xfffff, /* length - all address space */ 1229 SDT_MEMRWA, /* segment type */ 1230 0, /* segment descriptor priority level */ 1231 1, /* segment descriptor present */ 1232 0, 0, 1233 1, /* default 32 vs 16 bit size */ 1234 1 /* limit granularity (byte/page units)*/ }, 1235/* GPROC0_SEL 4 Proc 0 Tss Descriptor */ 1236{ 1237 0x0, /* segment base address */ 1238 sizeof(struct i386tss)-1,/* length - all address space */ 1239 SDT_SYS386TSS, /* segment type */ 1240 0, /* segment descriptor priority level */ 1241 1, /* segment descriptor present */ 1242 0, 0, 1243 0, /* unused - default 32 vs 16 bit size */ 1244 0 /* limit granularity (byte/page units)*/ }, 1245/* GLDT_SEL 5 LDT Descriptor */ 1246{ (int) ldt, /* segment base address */ 1247 sizeof(ldt)-1, /* length - all address space */ 1248 SDT_SYSLDT, /* segment type */ 1249 SEL_UPL, /* segment descriptor priority level */ 1250 1, /* segment descriptor present */ 1251 0, 0, 1252 0, /* unused - default 32 vs 16 bit size */ 1253 0 /* limit granularity (byte/page units)*/ }, 1254/* GUSERLDT_SEL 6 User LDT Descriptor per process */ 1255{ (int) ldt, /* segment base address */ 1256 (512 * sizeof(union descriptor)-1), /* length */ 1257 SDT_SYSLDT, /* segment type */ 1258 0, /* segment descriptor priority level */ 1259 1, /* segment descriptor present */ 1260 0, 0, 1261 0, /* unused - default 32 vs 16 bit size */ 1262 0 /* limit granularity (byte/page units)*/ }, 1263/* GTGATE_SEL 7 Null Descriptor - Placeholder */ 1264{ 0x0, /* segment base address */ 1265 0x0, /* length - all address space */ 1266 0, /* segment type */ 1267 0, /* segment descriptor priority level */ 1268 0, /* segment descriptor present */ 1269 0, 0, 1270 0, /* default 32 vs 16 bit size */ 1271 0 /* limit granularity (byte/page units)*/ }, 1272/* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */ 1273{ 0x400, /* segment base address */ 1274 0xfffff, /* length */ 1275 SDT_MEMRWA, /* segment type */ 1276 0, /* segment descriptor priority level */ 1277 1, /* segment descriptor present */ 1278 0, 0, 1279 1, /* default 32 vs 16 bit size */ 1280 1 /* limit granularity (byte/page units)*/ }, 1281/* GPANIC_SEL 9 Panic Tss Descriptor */ 1282{ (int) &dblfault_tss, /* segment base address */ 1283 sizeof(struct i386tss)-1,/* length - all address space */ 1284 SDT_SYS386TSS, /* segment type */ 1285 0, /* segment descriptor priority level */ 1286 1, /* segment descriptor present */ 1287 0, 0, 1288 0, /* unused - default 32 vs 16 bit size */ 1289 0 /* limit granularity (byte/page units)*/ }, 1290/* GBIOSCODE32_SEL 10 BIOS 32-bit interface (32bit Code) */ 1291{ 0, /* segment base address (overwritten) */ 1292 0xfffff, /* length */ 1293 SDT_MEMERA, /* segment type */ 1294 0, /* segment descriptor priority level */ 1295 1, /* segment descriptor present */ 1296 0, 0, 1297 0, /* default 32 vs 16 bit size */ 1298 1 /* limit granularity (byte/page units)*/ }, 1299/* GBIOSCODE16_SEL 11 BIOS 32-bit interface (16bit Code) */ 1300{ 0, /* segment base address (overwritten) */ 1301 0xfffff, /* length */ 1302 SDT_MEMERA, /* segment type */ 1303 0, /* segment descriptor priority level */ 1304 1, /* segment descriptor present */ 1305 0, 0, 1306 0, /* default 32 vs 16 bit size */ 1307 1 /* limit granularity (byte/page units)*/ }, 1308/* GBIOSDATA_SEL 12 BIOS 32-bit interface (Data) */ 1309{ 0, /* segment base address (overwritten) */ 1310 0xfffff, /* length */ 1311 SDT_MEMRWA, /* segment type */ 1312 0, /* segment descriptor priority level */ 1313 1, /* segment descriptor present */ 1314 0, 0, 1315 1, /* default 32 vs 16 bit size */ 1316 1 /* limit granularity (byte/page units)*/ }, 1317/* GBIOSUTIL_SEL 13 BIOS 16-bit interface (Utility) */ 1318{ 0, /* segment base address (overwritten) */ 1319 0xfffff, /* length */ 1320 SDT_MEMRWA, /* segment type */ 1321 0, /* segment descriptor priority level */ 1322 1, /* segment descriptor present */ 1323 0, 0, 1324 0, /* default 32 vs 16 bit size */ 1325 1 /* limit granularity (byte/page units)*/ }, 1326/* GBIOSARGS_SEL 14 BIOS 16-bit interface (Arguments) */ 1327{ 0, /* segment base address (overwritten) */ 1328 0xfffff, /* length */ 1329 SDT_MEMRWA, /* segment type */ 1330 0, /* segment descriptor priority level */ 1331 1, /* segment descriptor present */ 1332 0, 0, 1333 0, /* default 32 vs 16 bit size */ 1334 1 /* limit granularity (byte/page units)*/ }, 1335}; 1336 1337static struct soft_segment_descriptor ldt_segs[] = { 1338 /* Null Descriptor - overwritten by call gate */ 1339{ 0x0, /* segment base address */ 1340 0x0, /* length - all address space */ 1341 0, /* segment type */ 1342 0, /* segment descriptor priority level */ 1343 0, /* segment descriptor present */ 1344 0, 0, 1345 0, /* default 32 vs 16 bit size */ 1346 0 /* limit granularity (byte/page units)*/ }, 1347 /* Null Descriptor - overwritten by call gate */ 1348{ 0x0, /* segment base address */ 1349 0x0, /* length - all address space */ 1350 0, /* segment type */ 1351 0, /* segment descriptor priority level */ 1352 0, /* segment descriptor present */ 1353 0, 0, 1354 0, /* default 32 vs 16 bit size */ 1355 0 /* limit granularity (byte/page units)*/ }, 1356 /* Null Descriptor - overwritten by call gate */ 1357{ 0x0, /* segment base address */ 1358 0x0, /* length - all address space */ 1359 0, /* segment type */ 1360 0, /* segment descriptor priority level */ 1361 0, /* segment descriptor present */ 1362 0, 0, 1363 0, /* default 32 vs 16 bit size */ 1364 0 /* limit granularity (byte/page units)*/ }, 1365 /* Code Descriptor for user */ 1366{ 0x0, /* segment base address */ 1367 0xfffff, /* length - all address space */ 1368 SDT_MEMERA, /* segment type */ 1369 SEL_UPL, /* segment descriptor priority level */ 1370 1, /* segment descriptor present */ 1371 0, 0, 1372 1, /* default 32 vs 16 bit size */ 1373 1 /* limit granularity (byte/page units)*/ }, 1374 /* Null Descriptor - overwritten by call gate */ 1375{ 0x0, /* segment base address */ 1376 0x0, /* length - all address space */ 1377 0, /* segment type */ 1378 0, /* segment descriptor priority level */ 1379 0, /* segment descriptor present */ 1380 0, 0, 1381 0, /* default 32 vs 16 bit size */ 1382 0 /* limit granularity (byte/page units)*/ }, 1383 /* Data Descriptor for user */ 1384{ 0x0, /* segment base address */ 1385 0xfffff, /* length - all address space */ 1386 SDT_MEMRWA, /* segment type */ 1387 SEL_UPL, /* segment descriptor priority level */ 1388 1, /* segment descriptor present */ 1389 0, 0, 1390 1, /* default 32 vs 16 bit size */ 1391 1 /* limit granularity (byte/page units)*/ }, 1392}; 1393 1394void 1395setidt(idx, func, typ, dpl, selec) 1396 int idx; 1397 inthand_t *func; 1398 int typ; 1399 int dpl; 1400 int selec; 1401{ 1402 struct gate_descriptor *ip; 1403 1404 ip = idt + idx; 1405 ip->gd_looffset = (int)func; 1406 ip->gd_selector = selec; 1407 ip->gd_stkcpy = 0; 1408 ip->gd_xx = 0; 1409 ip->gd_type = typ; 1410 ip->gd_dpl = dpl; 1411 ip->gd_p = 1; 1412 ip->gd_hioffset = ((int)func)>>16 ; 1413} 1414 1415#define IDTVEC(name) __CONCAT(X,name) 1416 1417extern inthand_t 1418 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 1419 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 1420 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 1421 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 1422 IDTVEC(syscall), IDTVEC(int0x80_syscall); 1423 1424void 1425sdtossd(sd, ssd) 1426 struct segment_descriptor *sd; 1427 struct soft_segment_descriptor *ssd; 1428{ 1429 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 1430 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 1431 ssd->ssd_type = sd->sd_type; 1432 ssd->ssd_dpl = sd->sd_dpl; 1433 ssd->ssd_p = sd->sd_p; 1434 ssd->ssd_def32 = sd->sd_def32; 1435 ssd->ssd_gran = sd->sd_gran; 1436} 1437 1438#define PHYSMAP_SIZE (2 * 8) 1439 1440/* 1441 * Populate the (physmap) array with base/bound pairs describing the 1442 * available physical memory in the system, then test this memory and 1443 * build the phys_avail array describing the actually-available memory. 1444 * 1445 * If we cannot accurately determine the physical memory map, then use 1446 * value from the 0xE801 call, and failing that, the RTC. 1447 * 1448 * Total memory size may be set by the kernel environment variable 1449 * hw.physmem or the compile-time define MAXMEM. 1450 */ 1451static void 1452getmemsize(int first) 1453{ 1454 int i, physmap_idx, pa_indx; 1455 u_int basemem, extmem; 1456 struct vm86frame vmf; 1457 struct vm86context vmc; 1458 vm_offset_t pa, physmap[PHYSMAP_SIZE]; 1459 pt_entry_t pte; 1460 const char *cp; 1461 struct bios_smap *smap; 1462 1463 bzero(&vmf, sizeof(struct vm86frame)); 1464 bzero(physmap, sizeof(physmap)); 1465 1466 /* 1467 * Perform "base memory" related probes & setup 1468 */ 1469 vm86_intcall(0x12, &vmf); 1470 basemem = vmf.vmf_ax; 1471 if (basemem > 640) { 1472 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", 1473 basemem); 1474 basemem = 640; 1475 } 1476 1477 /* 1478 * XXX if biosbasemem is now < 640, there is a `hole' 1479 * between the end of base memory and the start of 1480 * ISA memory. The hole may be empty or it may 1481 * contain BIOS code or data. Map it read/write so 1482 * that the BIOS can write to it. (Memory from 0 to 1483 * the physical end of the kernel is mapped read-only 1484 * to begin with and then parts of it are remapped. 1485 * The parts that aren't remapped form holes that 1486 * remain read-only and are unused by the kernel. 1487 * The base memory area is below the physical end of 1488 * the kernel and right now forms a read-only hole. 1489 * The part of it from PAGE_SIZE to 1490 * (trunc_page(biosbasemem * 1024) - 1) will be 1491 * remapped and used by the kernel later.) 1492 * 1493 * This code is similar to the code used in 1494 * pmap_mapdev, but since no memory needs to be 1495 * allocated we simply change the mapping. 1496 */ 1497 for (pa = trunc_page(basemem * 1024); 1498 pa < ISA_HOLE_START; pa += PAGE_SIZE) { 1499 pte = (pt_entry_t)vtopte(pa + KERNBASE); 1500 *pte = pa | PG_RW | PG_V; 1501 } 1502 1503 /* 1504 * if basemem != 640, map pages r/w into vm86 page table so 1505 * that the bios can scribble on it. 1506 */ 1507 pte = (pt_entry_t)vm86paddr; 1508 for (i = basemem / 4; i < 160; i++) 1509 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; 1510 1511 /* 1512 * map page 1 R/W into the kernel page table so we can use it 1513 * as a buffer. The kernel will unmap this page later. 1514 */ 1515 pte = (pt_entry_t)vtopte(KERNBASE + (1 << PAGE_SHIFT)); 1516 *pte = (1 << PAGE_SHIFT) | PG_RW | PG_V; 1517 1518 /* 1519 * get memory map with INT 15:E820 1520 */ 1521 vmc.npages = 0; 1522 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT)); 1523 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di); 1524 1525 physmap_idx = 0; 1526 vmf.vmf_ebx = 0; 1527 do { 1528 vmf.vmf_eax = 0xE820; 1529 vmf.vmf_edx = SMAP_SIG; 1530 vmf.vmf_ecx = sizeof(struct bios_smap); 1531 i = vm86_datacall(0x15, &vmf, &vmc); 1532 if (i || vmf.vmf_eax != SMAP_SIG) 1533 break; 1534 if (boothowto & RB_VERBOSE) 1535 printf("SMAP type=%02x base=%08x %08x len=%08x %08x\n", 1536 smap->type, 1537 *(u_int32_t *)((char *)&smap->base + 4), 1538 (u_int32_t)smap->base, 1539 *(u_int32_t *)((char *)&smap->length + 4), 1540 (u_int32_t)smap->length); 1541 1542 if (smap->type != 0x01) 1543 goto next_run; 1544 1545 if (smap->length == 0) 1546 goto next_run; 1547 1548 if (smap->base >= 0xffffffff) { 1549 printf("%uK of memory above 4GB ignored\n", 1550 (u_int)(smap->length / 1024)); 1551 goto next_run; 1552 } 1553 1554 for (i = 0; i <= physmap_idx; i += 2) { 1555 if (smap->base < physmap[i + 1]) { 1556 if (boothowto & RB_VERBOSE) 1557 printf( 1558 "Overlapping or non-montonic memory region, ignoring second region\n"); 1559 goto next_run; 1560 } 1561 } 1562 1563 if (smap->base == physmap[physmap_idx + 1]) { 1564 physmap[physmap_idx + 1] += smap->length; 1565 goto next_run; 1566 } 1567 1568 physmap_idx += 2; 1569 if (physmap_idx == PHYSMAP_SIZE) { 1570 printf( 1571 "Too many segments in the physical address map, giving up\n"); 1572 break; 1573 } 1574 physmap[physmap_idx] = smap->base; 1575 physmap[physmap_idx + 1] = smap->base + smap->length; 1576next_run: 1577 } while (vmf.vmf_ebx != 0); 1578 1579 if (physmap[1] != 0) 1580 goto physmap_done; 1581 1582 /* 1583 * If we failed above, try memory map with INT 15:E801 1584 */ 1585 vmf.vmf_ax = 0xE801; 1586 if (vm86_intcall(0x15, &vmf) == 0) { 1587 extmem = vmf.vmf_cx + vmf.vmf_dx * 64; 1588 } else { 1589#if 0 1590 vmf.vmf_ah = 0x88; 1591 vm86_intcall(0x15, &vmf); 1592 extmem = vmf.vmf_ax; 1593#else 1594 /* 1595 * Prefer the RTC value for extended memory. 1596 */ 1597 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8); 1598#endif 1599 } 1600 1601 /* 1602 * Special hack for chipsets that still remap the 384k hole when 1603 * there's 16MB of memory - this really confuses people that 1604 * are trying to use bus mastering ISA controllers with the 1605 * "16MB limit"; they only have 16MB, but the remapping puts 1606 * them beyond the limit. 1607 * 1608 * If extended memory is between 15-16MB (16-17MB phys address range), 1609 * chop it to 15MB. 1610 */ 1611 if ((extmem > 15 * 1024) && (extmem < 16 * 1024)) 1612 extmem = 15 * 1024; 1613 1614 physmap[0] = 0; 1615 physmap[1] = basemem * 1024; 1616 physmap_idx = 2; 1617 physmap[physmap_idx] = 0x100000; 1618 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024; 1619 1620physmap_done: 1621 /* 1622 * Now, physmap contains a map of physical memory. 1623 */ 1624 1625#ifdef SMP 1626 /* make hole for AP bootstrap code */ 1627 physmap[1] = mp_bootaddress(physmap[1] / 1024); 1628 1629 /* look for the MP hardware - needed for apic addresses */ 1630 mp_probe(); 1631#endif 1632 1633 /* 1634 * Maxmem isn't the "maximum memory", it's one larger than the 1635 * highest page of the physical address space. It should be 1636 * called something like "Maxphyspage". We may adjust this 1637 * based on ``hw.physmem'' and the results of the memory test. 1638 */ 1639 Maxmem = atop(physmap[physmap_idx + 1]); 1640 1641#ifdef MAXMEM 1642 Maxmem = MAXMEM / 4; 1643#endif 1644 1645 /* 1646 * hw.maxmem is a size in bytes; we also allow k, m, and g suffixes 1647 * for the appropriate modifiers. This overrides MAXMEM. 1648 */ 1649 if ((cp = getenv("hw.physmem")) != NULL) { 1650 u_int64_t AllowMem, sanity; 1651 char *ep; 1652 1653 sanity = AllowMem = strtouq(cp, &ep, 0); 1654 if ((ep != cp) && (*ep != 0)) { 1655 switch(*ep) { 1656 case 'g': 1657 case 'G': 1658 AllowMem <<= 10; 1659 case 'm': 1660 case 'M': 1661 AllowMem <<= 10; 1662 case 'k': 1663 case 'K': 1664 AllowMem <<= 10; 1665 break; 1666 default: 1667 AllowMem = sanity = 0; 1668 } 1669 if (AllowMem < sanity) 1670 AllowMem = 0; 1671 } 1672 if (AllowMem == 0) 1673 printf("Ignoring invalid memory size of '%s'\n", cp); 1674 else 1675 Maxmem = atop(AllowMem); 1676 } 1677 1678 if (atop(physmap[physmap_idx + 1]) != Maxmem && 1679 (boothowto & RB_VERBOSE)) 1680 printf("Physical memory use set to %uK\n", Maxmem * 4); 1681 1682 /* 1683 * If Maxmem has been increased beyond what the system has detected, 1684 * extend the last memory segment to the new limit. 1685 */ 1686 if (atop(physmap[physmap_idx + 1]) < Maxmem) 1687 physmap[physmap_idx + 1] = ptoa(Maxmem); 1688 1689 /* call pmap initialization to make new kernel address space */ 1690 pmap_bootstrap(first, 0); 1691 1692 /* 1693 * Size up each available chunk of physical memory. 1694 */ 1695 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 1696 pa_indx = 0; 1697 phys_avail[pa_indx++] = physmap[0]; 1698 phys_avail[pa_indx] = physmap[0]; 1699#if 0 1700 pte = (pt_entry_t)vtopte(KERNBASE); 1701#else 1702 pte = (pt_entry_t)CMAP1; 1703#endif 1704 1705 /* 1706 * physmap is in bytes, so when converting to page boundaries, 1707 * round up the start address and round down the end address. 1708 */ 1709 for (i = 0; i <= physmap_idx; i += 2) { 1710 vm_offset_t end; 1711 1712 end = ptoa(Maxmem); 1713 if (physmap[i + 1] < end) 1714 end = trunc_page(physmap[i + 1]); 1715 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 1716 int tmp, page_bad; 1717#if 0 1718 int *ptr = 0; 1719#else 1720 int *ptr = (int *)CADDR1; 1721#endif 1722 1723 /* 1724 * block out kernel memory as not available. 1725 */ 1726 if (pa >= 0x100000 && pa < first) 1727 continue; 1728 1729 page_bad = FALSE; 1730 1731 /* 1732 * map page into kernel: valid, read/write,non-cacheable 1733 */ 1734 *pte = pa | PG_V | PG_RW | PG_N; 1735 invltlb(); 1736 1737 tmp = *(int *)ptr; 1738 /* 1739 * Test for alternating 1's and 0's 1740 */ 1741 *(volatile int *)ptr = 0xaaaaaaaa; 1742 if (*(volatile int *)ptr != 0xaaaaaaaa) { 1743 page_bad = TRUE; 1744 } 1745 /* 1746 * Test for alternating 0's and 1's 1747 */ 1748 *(volatile int *)ptr = 0x55555555; 1749 if (*(volatile int *)ptr != 0x55555555) { 1750 page_bad = TRUE; 1751 } 1752 /* 1753 * Test for all 1's 1754 */ 1755 *(volatile int *)ptr = 0xffffffff; 1756 if (*(volatile int *)ptr != 0xffffffff) { 1757 page_bad = TRUE; 1758 } 1759 /* 1760 * Test for all 0's 1761 */ 1762 *(volatile int *)ptr = 0x0; 1763 if (*(volatile int *)ptr != 0x0) { 1764 page_bad = TRUE; 1765 } 1766 /* 1767 * Restore original value. 1768 */ 1769 *(int *)ptr = tmp; 1770 1771 /* 1772 * Adjust array of valid/good pages. 1773 */ 1774 if (page_bad == TRUE) { 1775 continue; 1776 } 1777 /* 1778 * If this good page is a continuation of the 1779 * previous set of good pages, then just increase 1780 * the end pointer. Otherwise start a new chunk. 1781 * Note that "end" points one higher than end, 1782 * making the range >= start and < end. 1783 * If we're also doing a speculative memory 1784 * test and we at or past the end, bump up Maxmem 1785 * so that we keep going. The first bad page 1786 * will terminate the loop. 1787 */ 1788 if (phys_avail[pa_indx] == pa) { 1789 phys_avail[pa_indx] += PAGE_SIZE; 1790 } else { 1791 pa_indx++; 1792 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1793 printf("Too many holes in the physical address space, giving up\n"); 1794 pa_indx--; 1795 break; 1796 } 1797 phys_avail[pa_indx++] = pa; /* start */ 1798 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1799 } 1800 physmem++; 1801 } 1802 } 1803 *pte = 0; 1804 invltlb(); 1805 1806 /* 1807 * XXX 1808 * The last chunk must contain at least one page plus the message 1809 * buffer to avoid complicating other code (message buffer address 1810 * calculation, etc.). 1811 */ 1812 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1813 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { 1814 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1815 phys_avail[pa_indx--] = 0; 1816 phys_avail[pa_indx--] = 0; 1817 } 1818 1819 Maxmem = atop(phys_avail[pa_indx]); 1820 1821 /* Trim off space for the message buffer. */ 1822 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); 1823 1824 avail_end = phys_avail[pa_indx]; 1825} 1826 1827void 1828init386(first) 1829 int first; 1830{ 1831 int x; 1832 struct gate_descriptor *gdp; 1833 int gsel_tss; 1834#ifndef SMP 1835 /* table descriptors - used to load tables by microp */ 1836 struct region_descriptor r_gdt, r_idt; 1837#endif 1838 int off; 1839 1840 proc0.p_addr = proc0paddr; 1841 1842 atdevbase = ISA_HOLE_START + KERNBASE; 1843 1844 if (bootinfo.bi_modulep) { 1845 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE; 1846 preload_bootstrap_relocate(KERNBASE); 1847 } else { 1848 printf("WARNING: loader(8) metadata is missing!\n"); 1849 } 1850 if (bootinfo.bi_envp) 1851 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE; 1852 1853 /* 1854 * make gdt memory segments, the code segment goes up to end of the 1855 * page with etext in it, the data segment goes to the end of 1856 * the address space 1857 */ 1858 /* 1859 * XXX text protection is temporarily (?) disabled. The limit was 1860 * i386_btop(round_page(etext)) - 1. 1861 */ 1862 gdt_segs[GCODE_SEL].ssd_limit = i386_btop(0) - 1; 1863 gdt_segs[GDATA_SEL].ssd_limit = i386_btop(0) - 1; 1864#ifdef SMP 1865 gdt_segs[GPRIV_SEL].ssd_limit = 1866 i386_btop(sizeof(struct privatespace)) - 1; 1867 gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[0]; 1868 gdt_segs[GPROC0_SEL].ssd_base = 1869 (int) &SMP_prvspace[0].globaldata.gd_common_tss; 1870 SMP_prvspace[0].globaldata.gd_prvspace = &SMP_prvspace[0].globaldata; 1871#else 1872 gdt_segs[GPRIV_SEL].ssd_limit = 1873 i386_btop(sizeof(struct globaldata)) - 1; 1874 gdt_segs[GPRIV_SEL].ssd_base = (int) &__globaldata; 1875 gdt_segs[GPROC0_SEL].ssd_base = 1876 (int) &__globaldata.gd_common_tss; 1877 __globaldata.gd_prvspace = &__globaldata; 1878#endif 1879 1880 for (x = 0; x < NGDT; x++) { 1881#ifdef BDE_DEBUGGER 1882 /* avoid overwriting db entries with APM ones */ 1883 if (x >= GAPMCODE32_SEL && x <= GAPMDATA_SEL) 1884 continue; 1885#endif 1886 ssdtosd(&gdt_segs[x], &gdt[x].sd); 1887 } 1888 1889 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1890 r_gdt.rd_base = (int) gdt; 1891 lgdt(&r_gdt); 1892 1893 /* setup curproc so that mutexes work */ 1894 PCPU_SET(curproc, &proc0); 1895 1896 LIST_INIT(&proc0.p_heldmtx); 1897 LIST_INIT(&proc0.p_contested); 1898 1899 mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE); 1900#ifdef SMP 1901 /* 1902 * Interrupts can happen very early, so initialize imen_mtx here, rather 1903 * than in init_locks(). 1904 */ 1905 mtx_init(&imen_mtx, "imen", MTX_SPIN); 1906#endif 1907 1908 /* 1909 * Giant is used early for at least debugger traps and unexpected traps. 1910 */ 1911 mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE); 1912 mtx_enter(&Giant, MTX_DEF); 1913 1914 /* make ldt memory segments */ 1915 /* 1916 * The data segment limit must not cover the user area because we 1917 * don't want the user area to be writable in copyout() etc. (page 1918 * level protection is lost in kernel mode on 386's). Also, we 1919 * don't want the user area to be writable directly (page level 1920 * protection of the user area is not available on 486's with 1921 * CR0_WP set, because there is no user-read/kernel-write mode). 1922 * 1923 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it 1924 * should be spelled ...MAX_USER... 1925 */ 1926#define VM_END_USER_RW_ADDRESS VM_MAXUSER_ADDRESS 1927 /* 1928 * The code segment limit has to cover the user area until we move 1929 * the signal trampoline out of the user area. This is safe because 1930 * the code segment cannot be written to directly. 1931 */ 1932#define VM_END_USER_R_ADDRESS (VM_END_USER_RW_ADDRESS + UPAGES * PAGE_SIZE) 1933 ldt_segs[LUCODE_SEL].ssd_limit = i386_btop(VM_END_USER_R_ADDRESS) - 1; 1934 ldt_segs[LUDATA_SEL].ssd_limit = i386_btop(VM_END_USER_RW_ADDRESS) - 1; 1935 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++) 1936 ssdtosd(&ldt_segs[x], &ldt[x].sd); 1937 1938 _default_ldt = GSEL(GLDT_SEL, SEL_KPL); 1939 lldt(_default_ldt); 1940#ifdef USER_LDT 1941 PCPU_SET(currentldt, _default_ldt); 1942#endif 1943 1944 /* exceptions */ 1945 for (x = 0; x < NIDT; x++) 1946 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1947 setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1948 setidt(1, &IDTVEC(dbg), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1949 setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1950 setidt(3, &IDTVEC(bpt), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1951 setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1952 setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1953 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1954 setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1955 setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL)); 1956 setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1957 setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1958 setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1959 setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1960 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1961 setidt(14, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1962 setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1963 setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1964 setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1965 setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1966 setidt(0x80, &IDTVEC(int0x80_syscall), 1967 SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1968 1969 r_idt.rd_limit = sizeof(idt0) - 1; 1970 r_idt.rd_base = (int) idt; 1971 lidt(&r_idt); 1972 1973 /* 1974 * We need this mutex before the console probe. 1975 */ 1976 mtx_init(&clock_lock, "clk", MTX_SPIN | MTX_RECURSE); 1977 1978 /* 1979 * Initialize the console before we print anything out. 1980 */ 1981 cninit(); 1982 1983#ifdef DEV_ISA 1984 isa_defaultirq(); 1985#endif 1986 1987#ifdef DDB 1988 kdb_init(); 1989 if (boothowto & RB_KDB) 1990 Debugger("Boot flags requested debugger"); 1991#endif 1992 1993 finishidentcpu(); /* Final stage of CPU initialization */ 1994 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1995 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1996 initializecpu(); /* Initialize CPU registers */ 1997 1998 /* make an initial tss so cpu can get interrupt stack on syscall! */ 1999 PCPU_SET(common_tss.tss_esp0, 2000 (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16); 2001 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); 2002 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 2003 private_tss = 0; 2004 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd); 2005 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 2006 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16); 2007 ltr(gsel_tss); 2008 2009 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = 2010 dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)]; 2011 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = 2012 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); 2013 dblfault_tss.tss_cr3 = (int)IdlePTD; 2014 dblfault_tss.tss_eip = (int) dblfault_handler; 2015 dblfault_tss.tss_eflags = PSL_KERNEL; 2016 dblfault_tss.tss_ds = dblfault_tss.tss_es = 2017 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); 2018 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL); 2019 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); 2020 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); 2021 2022 vm86_initialize(); 2023 getmemsize(first); 2024 2025 /* now running on new page tables, configured,and u/iom is accessible */ 2026 2027 /* Map the message buffer. */ 2028 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) 2029 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off); 2030 2031 msgbufinit(msgbufp, MSGBUF_SIZE); 2032 2033 /* make a call gate to reenter kernel with */ 2034 gdp = &ldt[LSYS5CALLS_SEL].gd; 2035 2036 x = (int) &IDTVEC(syscall); 2037 gdp->gd_looffset = x++; 2038 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL); 2039 gdp->gd_stkcpy = 1; 2040 gdp->gd_type = SDT_SYS386CGT; 2041 gdp->gd_dpl = SEL_UPL; 2042 gdp->gd_p = 1; 2043 gdp->gd_hioffset = ((int) &IDTVEC(syscall)) >>16; 2044 2045 /* XXX does this work? */ 2046 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL]; 2047 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL]; 2048 2049 /* transfer to user mode */ 2050 2051 _ucodesel = LSEL(LUCODE_SEL, SEL_UPL); 2052 _udatasel = LSEL(LUDATA_SEL, SEL_UPL); 2053 2054 /* setup proc 0's pcb */ 2055 proc0.p_addr->u_pcb.pcb_flags = 0; 2056 proc0.p_addr->u_pcb.pcb_cr3 = (int)IdlePTD; 2057 proc0.p_addr->u_pcb.pcb_schednest = 0; 2058 proc0.p_addr->u_pcb.pcb_ext = 0; 2059 proc0.p_md.md_regs = &proc0_tf; 2060} 2061 2062#if defined(I586_CPU) && !defined(NO_F00F_HACK) 2063static void f00f_hack(void *unused); 2064SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL); 2065 2066static void 2067f00f_hack(void *unused) { 2068 struct gate_descriptor *new_idt; 2069#ifndef SMP 2070 struct region_descriptor r_idt; 2071#endif 2072 vm_offset_t tmp; 2073 2074 if (!has_f00f_bug) 2075 return; 2076 2077 printf("Intel Pentium detected, installing workaround for F00F bug\n"); 2078 2079 r_idt.rd_limit = sizeof(idt0) - 1; 2080 2081 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2); 2082 if (tmp == 0) 2083 panic("kmem_alloc returned 0"); 2084 if (((unsigned int)tmp & (PAGE_SIZE-1)) != 0) 2085 panic("kmem_alloc returned non-page-aligned memory"); 2086 /* Put the first seven entries in the lower page */ 2087 new_idt = (struct gate_descriptor*)(tmp + PAGE_SIZE - (7*8)); 2088 bcopy(idt, new_idt, sizeof(idt0)); 2089 r_idt.rd_base = (int)new_idt; 2090 lidt(&r_idt); 2091 idt = new_idt; 2092 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE, 2093 VM_PROT_READ, FALSE) != KERN_SUCCESS) 2094 panic("vm_map_protect failed"); 2095 return; 2096} 2097#endif /* defined(I586_CPU) && !NO_F00F_HACK */ 2098 2099int 2100ptrace_set_pc(p, addr) 2101 struct proc *p; 2102 unsigned long addr; 2103{ 2104 p->p_md.md_regs->tf_eip = addr; 2105 return (0); 2106} 2107 2108int 2109ptrace_single_step(p) 2110 struct proc *p; 2111{ 2112 p->p_md.md_regs->tf_eflags |= PSL_T; 2113 return (0); 2114} 2115 2116int ptrace_read_u_check(p, addr, len) 2117 struct proc *p; 2118 vm_offset_t addr; 2119 size_t len; 2120{ 2121 vm_offset_t gap; 2122 2123 if ((vm_offset_t) (addr + len) < addr) 2124 return EPERM; 2125 if ((vm_offset_t) (addr + len) <= sizeof(struct user)) 2126 return 0; 2127 2128 gap = (char *) p->p_md.md_regs - (char *) p->p_addr; 2129 2130 if ((vm_offset_t) addr < gap) 2131 return EPERM; 2132 if ((vm_offset_t) (addr + len) <= 2133 (vm_offset_t) (gap + sizeof(struct trapframe))) 2134 return 0; 2135 return EPERM; 2136} 2137 2138int ptrace_write_u(p, off, data) 2139 struct proc *p; 2140 vm_offset_t off; 2141 long data; 2142{ 2143 struct trapframe frame_copy; 2144 vm_offset_t min; 2145 struct trapframe *tp; 2146 2147 /* 2148 * Privileged kernel state is scattered all over the user area. 2149 * Only allow write access to parts of regs and to fpregs. 2150 */ 2151 min = (char *)p->p_md.md_regs - (char *)p->p_addr; 2152 if (off >= min && off <= min + sizeof(struct trapframe) - sizeof(int)) { 2153 tp = p->p_md.md_regs; 2154 frame_copy = *tp; 2155 *(int *)((char *)&frame_copy + (off - min)) = data; 2156 if (!EFL_SECURE(frame_copy.tf_eflags, tp->tf_eflags) || 2157 !CS_SECURE(frame_copy.tf_cs)) 2158 return (EINVAL); 2159 *(int*)((char *)p->p_addr + off) = data; 2160 return (0); 2161 } 2162 min = offsetof(struct user, u_pcb) + offsetof(struct pcb, pcb_savefpu); 2163 if (off >= min && off <= min + sizeof(struct save87) - sizeof(int)) { 2164 *(int*)((char *)p->p_addr + off) = data; 2165 return (0); 2166 } 2167 return (EFAULT); 2168} 2169 2170int 2171fill_regs(p, regs) 2172 struct proc *p; 2173 struct reg *regs; 2174{ 2175 struct pcb *pcb; 2176 struct trapframe *tp; 2177 2178 tp = p->p_md.md_regs; 2179 regs->r_fs = tp->tf_fs; 2180 regs->r_es = tp->tf_es; 2181 regs->r_ds = tp->tf_ds; 2182 regs->r_edi = tp->tf_edi; 2183 regs->r_esi = tp->tf_esi; 2184 regs->r_ebp = tp->tf_ebp; 2185 regs->r_ebx = tp->tf_ebx; 2186 regs->r_edx = tp->tf_edx; 2187 regs->r_ecx = tp->tf_ecx; 2188 regs->r_eax = tp->tf_eax; 2189 regs->r_eip = tp->tf_eip; 2190 regs->r_cs = tp->tf_cs; 2191 regs->r_eflags = tp->tf_eflags; 2192 regs->r_esp = tp->tf_esp; 2193 regs->r_ss = tp->tf_ss; 2194 pcb = &p->p_addr->u_pcb; 2195 regs->r_gs = pcb->pcb_gs; 2196 return (0); 2197} 2198 2199int 2200set_regs(p, regs) 2201 struct proc *p; 2202 struct reg *regs; 2203{ 2204 struct pcb *pcb; 2205 struct trapframe *tp; 2206 2207 tp = p->p_md.md_regs; 2208 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) || 2209 !CS_SECURE(regs->r_cs)) 2210 return (EINVAL); 2211 tp->tf_fs = regs->r_fs; 2212 tp->tf_es = regs->r_es; 2213 tp->tf_ds = regs->r_ds; 2214 tp->tf_edi = regs->r_edi; 2215 tp->tf_esi = regs->r_esi; 2216 tp->tf_ebp = regs->r_ebp; 2217 tp->tf_ebx = regs->r_ebx; 2218 tp->tf_edx = regs->r_edx; 2219 tp->tf_ecx = regs->r_ecx; 2220 tp->tf_eax = regs->r_eax; 2221 tp->tf_eip = regs->r_eip; 2222 tp->tf_cs = regs->r_cs; 2223 tp->tf_eflags = regs->r_eflags; 2224 tp->tf_esp = regs->r_esp; 2225 tp->tf_ss = regs->r_ss; 2226 pcb = &p->p_addr->u_pcb; 2227 pcb->pcb_gs = regs->r_gs; 2228 return (0); 2229} 2230 2231int 2232fill_fpregs(p, fpregs) 2233 struct proc *p; 2234 struct fpreg *fpregs; 2235{ 2236 bcopy(&p->p_addr->u_pcb.pcb_savefpu, fpregs, sizeof *fpregs); 2237 return (0); 2238} 2239 2240int 2241set_fpregs(p, fpregs) 2242 struct proc *p; 2243 struct fpreg *fpregs; 2244{ 2245 bcopy(fpregs, &p->p_addr->u_pcb.pcb_savefpu, sizeof *fpregs); 2246 return (0); 2247} 2248 2249int 2250fill_dbregs(p, dbregs) 2251 struct proc *p; 2252 struct dbreg *dbregs; 2253{ 2254 struct pcb *pcb; 2255 2256 pcb = &p->p_addr->u_pcb; 2257 dbregs->dr0 = pcb->pcb_dr0; 2258 dbregs->dr1 = pcb->pcb_dr1; 2259 dbregs->dr2 = pcb->pcb_dr2; 2260 dbregs->dr3 = pcb->pcb_dr3; 2261 dbregs->dr4 = 0; 2262 dbregs->dr5 = 0; 2263 dbregs->dr6 = pcb->pcb_dr6; 2264 dbregs->dr7 = pcb->pcb_dr7; 2265 return (0); 2266} 2267 2268int 2269set_dbregs(p, dbregs) 2270 struct proc *p; 2271 struct dbreg *dbregs; 2272{ 2273 struct pcb *pcb; 2274 int i; 2275 u_int32_t mask1, mask2; 2276 2277 /* 2278 * Don't let an illegal value for dr7 get set. Specifically, 2279 * check for undefined settings. Setting these bit patterns 2280 * result in undefined behaviour and can lead to an unexpected 2281 * TRCTRAP. 2282 */ 2283 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 8; 2284 i++, mask1 <<= 2, mask2 <<= 2) 2285 if ((dbregs->dr7 & mask1) == mask2) 2286 return (EINVAL); 2287 2288 if (dbregs->dr7 & 0x0000fc00) 2289 return (EINVAL); 2290 2291 2292 2293 pcb = &p->p_addr->u_pcb; 2294 2295 /* 2296 * Don't let a process set a breakpoint that is not within the 2297 * process's address space. If a process could do this, it 2298 * could halt the system by setting a breakpoint in the kernel 2299 * (if ddb was enabled). Thus, we need to check to make sure 2300 * that no breakpoints are being enabled for addresses outside 2301 * process's address space, unless, perhaps, we were called by 2302 * uid 0. 2303 * 2304 * XXX - what about when the watched area of the user's 2305 * address space is written into from within the kernel 2306 * ... wouldn't that still cause a breakpoint to be generated 2307 * from within kernel mode? 2308 */ 2309 2310 if (suser(p) != 0) { 2311 if (dbregs->dr7 & 0x3) { 2312 /* dr0 is enabled */ 2313 if (dbregs->dr0 >= VM_MAXUSER_ADDRESS) 2314 return (EINVAL); 2315 } 2316 2317 if (dbregs->dr7 & (0x3<<2)) { 2318 /* dr1 is enabled */ 2319 if (dbregs->dr1 >= VM_MAXUSER_ADDRESS) 2320 return (EINVAL); 2321 } 2322 2323 if (dbregs->dr7 & (0x3<<4)) { 2324 /* dr2 is enabled */ 2325 if (dbregs->dr2 >= VM_MAXUSER_ADDRESS) 2326 return (EINVAL); 2327 } 2328 2329 if (dbregs->dr7 & (0x3<<6)) { 2330 /* dr3 is enabled */ 2331 if (dbregs->dr3 >= VM_MAXUSER_ADDRESS) 2332 return (EINVAL); 2333 } 2334 } 2335 2336 pcb->pcb_dr0 = dbregs->dr0; 2337 pcb->pcb_dr1 = dbregs->dr1; 2338 pcb->pcb_dr2 = dbregs->dr2; 2339 pcb->pcb_dr3 = dbregs->dr3; 2340 pcb->pcb_dr6 = dbregs->dr6; 2341 pcb->pcb_dr7 = dbregs->dr7; 2342 2343 pcb->pcb_flags |= PCB_DBREGS; 2344 2345 return (0); 2346} 2347 2348/* 2349 * Return > 0 if a hardware breakpoint has been hit, and the 2350 * breakpoint was in user space. Return 0, otherwise. 2351 */ 2352int 2353user_dbreg_trap(void) 2354{ 2355 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */ 2356 u_int32_t bp; /* breakpoint bits extracted from dr6 */ 2357 int nbp; /* number of breakpoints that triggered */ 2358 caddr_t addr[4]; /* breakpoint addresses */ 2359 int i; 2360 2361 dr7 = rdr7(); 2362 if ((dr7 & 0x000000ff) == 0) { 2363 /* 2364 * all GE and LE bits in the dr7 register are zero, 2365 * thus the trap couldn't have been caused by the 2366 * hardware debug registers 2367 */ 2368 return 0; 2369 } 2370 2371 nbp = 0; 2372 dr6 = rdr6(); 2373 bp = dr6 & 0x0000000f; 2374 2375 if (!bp) { 2376 /* 2377 * None of the breakpoint bits are set meaning this 2378 * trap was not caused by any of the debug registers 2379 */ 2380 return 0; 2381 } 2382 2383 /* 2384 * at least one of the breakpoints were hit, check to see 2385 * which ones and if any of them are user space addresses 2386 */ 2387 2388 if (bp & 0x01) { 2389 addr[nbp++] = (caddr_t)rdr0(); 2390 } 2391 if (bp & 0x02) { 2392 addr[nbp++] = (caddr_t)rdr1(); 2393 } 2394 if (bp & 0x04) { 2395 addr[nbp++] = (caddr_t)rdr2(); 2396 } 2397 if (bp & 0x08) { 2398 addr[nbp++] = (caddr_t)rdr3(); 2399 } 2400 2401 for (i=0; i<nbp; i++) { 2402 if (addr[i] < 2403 (caddr_t)VM_MAXUSER_ADDRESS) { 2404 /* 2405 * addr[i] is in user space 2406 */ 2407 return nbp; 2408 } 2409 } 2410 2411 /* 2412 * None of the breakpoints are in user space. 2413 */ 2414 return 0; 2415} 2416 2417 2418#ifndef DDB 2419void 2420Debugger(const char *msg) 2421{ 2422 printf("Debugger(\"%s\") called.\n", msg); 2423} 2424#endif /* no DDB */ 2425 2426#include <sys/disklabel.h> 2427 2428/* 2429 * Determine the size of the transfer, and make sure it is 2430 * within the boundaries of the partition. Adjust transfer 2431 * if needed, and signal errors or early completion. 2432 */ 2433int 2434bounds_check_with_label(struct bio *bp, struct disklabel *lp, int wlabel) 2435{ 2436 struct partition *p = lp->d_partitions + dkpart(bp->bio_dev); 2437 int labelsect = lp->d_partitions[0].p_offset; 2438 int maxsz = p->p_size, 2439 sz = (bp->bio_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT; 2440 2441 /* overwriting disk label ? */ 2442 /* XXX should also protect bootstrap in first 8K */ 2443 if (bp->bio_blkno + p->p_offset <= LABELSECTOR + labelsect && 2444#if LABELSECTOR != 0 2445 bp->bio_blkno + p->p_offset + sz > LABELSECTOR + labelsect && 2446#endif 2447 (bp->bio_cmd == BIO_WRITE) && wlabel == 0) { 2448 bp->bio_error = EROFS; 2449 goto bad; 2450 } 2451 2452#if defined(DOSBBSECTOR) && defined(notyet) 2453 /* overwriting master boot record? */ 2454 if (bp->bio_blkno + p->p_offset <= DOSBBSECTOR && 2455 (bp->bio_cmd == BIO_WRITE) && wlabel == 0) { 2456 bp->bio_error = EROFS; 2457 goto bad; 2458 } 2459#endif 2460 2461 /* beyond partition? */ 2462 if (bp->bio_blkno < 0 || bp->bio_blkno + sz > maxsz) { 2463 /* if exactly at end of disk, return an EOF */ 2464 if (bp->bio_blkno == maxsz) { 2465 bp->bio_resid = bp->bio_bcount; 2466 return(0); 2467 } 2468 /* or truncate if part of it fits */ 2469 sz = maxsz - bp->bio_blkno; 2470 if (sz <= 0) { 2471 bp->bio_error = EINVAL; 2472 goto bad; 2473 } 2474 bp->bio_bcount = sz << DEV_BSHIFT; 2475 } 2476 2477 bp->bio_pblkno = bp->bio_blkno + p->p_offset; 2478 return(1); 2479 2480bad: 2481 bp->bio_flags |= BIO_ERROR; 2482 return(-1); 2483} 2484 2485#ifdef DDB 2486 2487/* 2488 * Provide inb() and outb() as functions. They are normally only 2489 * available as macros calling inlined functions, thus cannot be 2490 * called inside DDB. 2491 * 2492 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 2493 */ 2494 2495#undef inb 2496#undef outb 2497 2498/* silence compiler warnings */ 2499u_char inb(u_int); 2500void outb(u_int, u_char); 2501 2502u_char 2503inb(u_int port) 2504{ 2505 u_char data; 2506 /* 2507 * We use %%dx and not %1 here because i/o is done at %dx and not at 2508 * %edx, while gcc generates inferior code (movw instead of movl) 2509 * if we tell it to load (u_short) port. 2510 */ 2511 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 2512 return (data); 2513} 2514 2515void 2516outb(u_int port, u_char data) 2517{ 2518 u_char al; 2519 /* 2520 * Use an unnecessary assignment to help gcc's register allocator. 2521 * This make a large difference for gcc-1.40 and a tiny difference 2522 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 2523 * best results. gcc-2.6.0 can't handle this. 2524 */ 2525 al = data; 2526 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 2527} 2528 2529#endif /* DDB */ 2530