machdep.c revision 59249
1/*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 38 * $FreeBSD: head/sys/amd64/amd64/machdep.c 59249 2000-04-15 05:54:02Z phk $ 39 */ 40 41#include "apm.h" 42#include "ether.h" 43#include "npx.h" 44#include "opt_atalk.h" 45#include "opt_compat.h" 46#include "opt_cpu.h" 47#include "opt_ddb.h" 48#include "opt_inet.h" 49#include "opt_ipx.h" 50#include "opt_maxmem.h" 51#include "opt_msgbuf.h" 52#include "opt_perfmon.h" 53#include "opt_smp.h" 54#include "opt_sysvipc.h" 55#include "opt_user_ldt.h" 56#include "opt_userconfig.h" 57 58#include <sys/param.h> 59#include <sys/systm.h> 60#include <sys/sysproto.h> 61#include <sys/signalvar.h> 62#include <sys/kernel.h> 63#include <sys/linker.h> 64#include <sys/malloc.h> 65#include <sys/proc.h> 66#include <sys/buf.h> 67#include <sys/reboot.h> 68#include <sys/callout.h> 69#include <sys/mbuf.h> 70#include <sys/msgbuf.h> 71#include <sys/sysent.h> 72#include <sys/sysctl.h> 73#include <sys/vmmeter.h> 74#include <sys/bus.h> 75 76#ifdef SYSVMSG 77#include <sys/msg.h> 78#endif 79 80#ifdef SYSVSEM 81#include <sys/sem.h> 82#endif 83 84#include <vm/vm.h> 85#include <vm/vm_param.h> 86#include <sys/lock.h> 87#include <vm/vm_kern.h> 88#include <vm/vm_object.h> 89#include <vm/vm_page.h> 90#include <vm/vm_map.h> 91#include <vm/vm_pager.h> 92#include <vm/vm_extern.h> 93 94#include <sys/user.h> 95#include <sys/exec.h> 96#include <sys/cons.h> 97 98#include <ddb/ddb.h> 99 100#include <net/netisr.h> 101 102#include <machine/cpu.h> 103#include <machine/reg.h> 104#include <machine/clock.h> 105#include <machine/specialreg.h> 106#include <machine/bootinfo.h> 107#include <machine/ipl.h> 108#include <machine/md_var.h> 109#include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */ 110#ifdef SMP 111#include <machine/smp.h> 112#include <machine/globaldata.h> 113#endif 114#ifdef PERFMON 115#include <machine/perfmon.h> 116#endif 117 118#ifdef OLD_BUS_ARCH 119#include <i386/isa/isa_device.h> 120#endif 121#include <i386/isa/intr_machdep.h> 122#include <isa/rtc.h> 123#include <machine/vm86.h> 124#include <machine/random.h> 125#include <sys/ptrace.h> 126#include <machine/sigframe.h> 127 128extern void init386 __P((int first)); 129extern void dblfault_handler __P((void)); 130 131extern void printcpuinfo(void); /* XXX header file */ 132extern void earlysetcpuclass(void); /* same header file */ 133extern void finishidentcpu(void); 134extern void panicifcpuunsupported(void); 135extern void initializecpu(void); 136 137static void cpu_startup __P((void *)); 138SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) 139 140static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf"); 141 142int _udatasel, _ucodesel; 143u_int atdevbase; 144 145#if defined(SWTCH_OPTIM_STATS) 146extern int swtch_optim_stats; 147SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, 148 CTLFLAG_RD, &swtch_optim_stats, 0, ""); 149SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, 150 CTLFLAG_RD, &tlb_flush_count, 0, ""); 151#endif 152 153#ifdef PC98 154static int ispc98 = 1; 155#else 156static int ispc98 = 0; 157#endif 158SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, ""); 159 160int physmem = 0; 161int cold = 1; 162 163static int 164sysctl_hw_physmem SYSCTL_HANDLER_ARGS 165{ 166 int error = sysctl_handle_int(oidp, 0, ctob(physmem), req); 167 return (error); 168} 169 170SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD, 171 0, 0, sysctl_hw_physmem, "I", ""); 172 173static int 174sysctl_hw_usermem SYSCTL_HANDLER_ARGS 175{ 176 int error = sysctl_handle_int(oidp, 0, 177 ctob(physmem - cnt.v_wire_count), req); 178 return (error); 179} 180 181SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD, 182 0, 0, sysctl_hw_usermem, "I", ""); 183 184static int 185sysctl_hw_availpages SYSCTL_HANDLER_ARGS 186{ 187 int error = sysctl_handle_int(oidp, 0, 188 i386_btop(avail_end - avail_start), req); 189 return (error); 190} 191 192SYSCTL_PROC(_hw, OID_AUTO, availpages, CTLTYPE_INT|CTLFLAG_RD, 193 0, 0, sysctl_hw_availpages, "I", ""); 194 195static int 196sysctl_machdep_msgbuf SYSCTL_HANDLER_ARGS 197{ 198 int error; 199 200 /* Unwind the buffer, so that it's linear (possibly starting with 201 * some initial nulls). 202 */ 203 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr+msgbufp->msg_bufr, 204 msgbufp->msg_size-msgbufp->msg_bufr,req); 205 if(error) return(error); 206 if(msgbufp->msg_bufr>0) { 207 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr, 208 msgbufp->msg_bufr,req); 209 } 210 return(error); 211} 212 213SYSCTL_PROC(_machdep, OID_AUTO, msgbuf, CTLTYPE_STRING|CTLFLAG_RD, 214 0, 0, sysctl_machdep_msgbuf, "A","Contents of kernel message buffer"); 215 216static int msgbuf_clear; 217 218static int 219sysctl_machdep_msgbuf_clear SYSCTL_HANDLER_ARGS 220{ 221 int error; 222 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 223 req); 224 if (!error && req->newptr) { 225 /* Clear the buffer and reset write pointer */ 226 bzero(msgbufp->msg_ptr,msgbufp->msg_size); 227 msgbufp->msg_bufr=msgbufp->msg_bufx=0; 228 msgbuf_clear=0; 229 } 230 return (error); 231} 232 233SYSCTL_PROC(_machdep, OID_AUTO, msgbuf_clear, CTLTYPE_INT|CTLFLAG_RW, 234 &msgbuf_clear, 0, sysctl_machdep_msgbuf_clear, "I", 235 "Clear kernel message buffer"); 236 237int bootverbose = 0, Maxmem = 0; 238long dumplo; 239 240vm_offset_t phys_avail[10]; 241 242/* must be 2 less so 0 0 can signal end of chunks */ 243#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2) 244 245static vm_offset_t buffer_sva, buffer_eva; 246vm_offset_t clean_sva, clean_eva; 247static vm_offset_t pager_sva, pager_eva; 248 249#define offsetof(type, member) ((size_t)(&((type *)0)->member)) 250 251static void 252cpu_startup(dummy) 253 void *dummy; 254{ 255 register unsigned i; 256 register caddr_t v; 257 vm_offset_t maxaddr; 258 vm_size_t size = 0; 259 int firstaddr; 260 vm_offset_t minaddr; 261 262 if (boothowto & RB_VERBOSE) 263 bootverbose++; 264 265 /* 266 * Good {morning,afternoon,evening,night}. 267 */ 268 printf(version); 269 earlysetcpuclass(); 270 startrtclock(); 271 printcpuinfo(); 272 panicifcpuunsupported(); 273#ifdef PERFMON 274 perfmon_init(); 275#endif 276 printf("real memory = %u (%uK bytes)\n", ptoa(Maxmem), ptoa(Maxmem) / 1024); 277 /* 278 * Display any holes after the first chunk of extended memory. 279 */ 280 if (bootverbose) { 281 int indx; 282 283 printf("Physical memory chunk(s):\n"); 284 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 285 int size1 = phys_avail[indx + 1] - phys_avail[indx]; 286 287 printf("0x%08x - 0x%08x, %u bytes (%u pages)\n", 288 phys_avail[indx], phys_avail[indx + 1] - 1, size1, 289 size1 / PAGE_SIZE); 290 } 291 } 292 293 /* 294 * Calculate callout wheel size 295 */ 296 for (callwheelsize = 1, callwheelbits = 0; 297 callwheelsize < ncallout; 298 callwheelsize <<= 1, ++callwheelbits) 299 ; 300 callwheelmask = callwheelsize - 1; 301 302 /* 303 * Allocate space for system data structures. 304 * The first available kernel virtual address is in "v". 305 * As pages of kernel virtual memory are allocated, "v" is incremented. 306 * As pages of memory are allocated and cleared, 307 * "firstaddr" is incremented. 308 * An index into the kernel page table corresponding to the 309 * virtual memory address maintained in "v" is kept in "mapaddr". 310 */ 311 312 /* 313 * Make two passes. The first pass calculates how much memory is 314 * needed and allocates it. The second pass assigns virtual 315 * addresses to the various data structures. 316 */ 317 firstaddr = 0; 318again: 319 v = (caddr_t)firstaddr; 320 321#define valloc(name, type, num) \ 322 (name) = (type *)v; v = (caddr_t)((name)+(num)) 323#define valloclim(name, type, num, lim) \ 324 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num))) 325 326 valloc(callout, struct callout, ncallout); 327 valloc(callwheel, struct callout_tailq, callwheelsize); 328#ifdef SYSVSEM 329 valloc(sema, struct semid_ds, seminfo.semmni); 330 valloc(sem, struct sem, seminfo.semmns); 331 /* This is pretty disgusting! */ 332 valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int)); 333#endif 334#ifdef SYSVMSG 335 valloc(msgpool, char, msginfo.msgmax); 336 valloc(msgmaps, struct msgmap, msginfo.msgseg); 337 valloc(msghdrs, struct msg, msginfo.msgtql); 338 valloc(msqids, struct msqid_ds, msginfo.msgmni); 339#endif 340 341 /* 342 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE. 343 * For the first 64MB of ram nominally allocate sufficient buffers to 344 * cover 1/4 of our ram. Beyond the first 64MB allocate additional 345 * buffers to cover 1/20 of our ram over 64MB. 346 * 347 * factor represents the 1/4 x ram conversion. 348 */ 349 if (nbuf == 0) { 350 int factor = 4 * BKVASIZE / PAGE_SIZE; 351 352 nbuf = 50; 353 if (physmem > 1024) 354 nbuf += min((physmem - 1024) / factor, 16384 / factor); 355 if (physmem > 16384) 356 nbuf += (physmem - 16384) * 2 / (factor * 5); 357 } 358 359 /* 360 * Do not allow the buffer_map to be more then 1/2 the size of the 361 * kernel_map. 362 */ 363 if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) / 364 (BKVASIZE * 2)) { 365 nbuf = (kernel_map->max_offset - kernel_map->min_offset) / 366 (BKVASIZE * 2); 367 printf("Warning: nbufs capped at %d\n", nbuf); 368 } 369 370 nswbuf = max(min(nbuf/4, 256), 16); 371 372 valloc(swbuf, struct buf, nswbuf); 373 valloc(buf, struct buf, nbuf); 374 v = bufhashinit(v); 375 376 /* 377 * End of first pass, size has been calculated so allocate memory 378 */ 379 if (firstaddr == 0) { 380 size = (vm_size_t)(v - firstaddr); 381 firstaddr = (int)kmem_alloc(kernel_map, round_page(size)); 382 if (firstaddr == 0) 383 panic("startup: no room for tables"); 384 goto again; 385 } 386 387 /* 388 * End of second pass, addresses have been assigned 389 */ 390 if ((vm_size_t)(v - firstaddr) != size) 391 panic("startup: table size inconsistency"); 392 393 clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva, 394 (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size); 395 buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva, 396 (nbuf*BKVASIZE)); 397 pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva, 398 (nswbuf*MAXPHYS) + pager_map_size); 399 pager_map->system_map = 1; 400 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 401 (16*(ARG_MAX+(PAGE_SIZE*3)))); 402 403 /* 404 * Finally, allocate mbuf pool. Since mclrefcnt is an off-size 405 * we use the more space efficient malloc in place of kmem_alloc. 406 */ 407 { 408 vm_offset_t mb_map_size; 409 410 mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES; 411 mb_map_size = roundup2(mb_map_size, max(MCLBYTES, PAGE_SIZE)); 412 mclrefcnt = malloc(mb_map_size / MCLBYTES, M_MBUF, M_NOWAIT); 413 bzero(mclrefcnt, mb_map_size / MCLBYTES); 414 mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr, 415 mb_map_size); 416 mb_map->system_map = 1; 417 } 418 419 /* 420 * Initialize callouts 421 */ 422 SLIST_INIT(&callfree); 423 for (i = 0; i < ncallout; i++) { 424 callout_init(&callout[i]); 425 callout[i].c_flags = CALLOUT_LOCAL_ALLOC; 426 SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); 427 } 428 429 for (i = 0; i < callwheelsize; i++) { 430 TAILQ_INIT(&callwheel[i]); 431 } 432 433#if defined(USERCONFIG) 434 userconfig(); 435 cninit(); /* the preferred console may have changed */ 436#endif 437 438 printf("avail memory = %u (%uK bytes)\n", ptoa(cnt.v_free_count), 439 ptoa(cnt.v_free_count) / 1024); 440 441 /* 442 * Set up buffers, so they can be used to read disk labels. 443 */ 444 bufinit(); 445 vm_pager_bufferinit(); 446 447#ifdef SMP 448 /* 449 * OK, enough kmem_alloc/malloc state should be up, lets get on with it! 450 */ 451 mp_start(); /* fire up the APs and APICs */ 452 mp_announce(); 453#endif /* SMP */ 454} 455 456int 457register_netisr(num, handler) 458 int num; 459 netisr_t *handler; 460{ 461 462 if (num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs)) ) { 463 printf("register_netisr: bad isr number: %d\n", num); 464 return (EINVAL); 465 } 466 netisrs[num] = handler; 467 return (0); 468} 469 470int 471unregister_netisr(num) 472 int num; 473{ 474 475 if (num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs)) ) { 476 printf("unregister_netisr: bad isr number: %d\n", num); 477 return (EINVAL); 478 } 479 netisrs[num] = NULL; 480 return (0); 481} 482 483/* 484 * Send an interrupt to process. 485 * 486 * Stack is set up to allow sigcode stored 487 * at top to call routine, followed by kcall 488 * to sigreturn routine below. After sigreturn 489 * resets the signal mask, the stack, and the 490 * frame pointer, it returns to the user 491 * specified pc, psl. 492 */ 493static void 494osendsig(sig_t catcher, int sig, sigset_t *mask, u_long code) 495{ 496 register struct proc *p = curproc; 497 register struct trapframe *regs; 498 register struct osigframe *fp; 499 struct osigframe sf; 500 struct sigacts *psp = p->p_sigacts; 501 int oonstack; 502 503 regs = p->p_md.md_regs; 504 oonstack = (p->p_sigstk.ss_flags & SS_ONSTACK) ? 1 : 0; 505 506 /* Allocate and validate space for the signal handler context. */ 507 if ((p->p_flag & P_ALTSTACK) && !oonstack && 508 SIGISMEMBER(psp->ps_sigonstack, sig)) { 509 fp = (struct osigframe *)(p->p_sigstk.ss_sp + 510 p->p_sigstk.ss_size - sizeof(struct osigframe)); 511 p->p_sigstk.ss_flags |= SS_ONSTACK; 512 } 513 else 514 fp = (struct osigframe *)regs->tf_esp - 1; 515 516 /* 517 * grow() will return FALSE if the fp will not fit inside the stack 518 * and the stack can not be grown. useracc will return FALSE 519 * if access is denied. 520 */ 521 if (grow_stack(p, (int)fp) == FALSE || 522 !useracc((caddr_t)fp, sizeof(struct osigframe), VM_PROT_WRITE)) { 523 /* 524 * Process has trashed its stack; give it an illegal 525 * instruction to halt it in its tracks. 526 */ 527 SIGACTION(p, SIGILL) = SIG_DFL; 528 SIGDELSET(p->p_sigignore, SIGILL); 529 SIGDELSET(p->p_sigcatch, SIGILL); 530 SIGDELSET(p->p_sigmask, SIGILL); 531 psignal(p, SIGILL); 532 return; 533 } 534 535 /* Translate the signal if appropriate */ 536 if (p->p_sysent->sv_sigtbl) { 537 if (sig <= p->p_sysent->sv_sigsize) 538 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 539 } 540 541 /* Build the argument list for the signal handler. */ 542 sf.sf_signum = sig; 543 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc; 544 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 545 /* Signal handler installed with SA_SIGINFO. */ 546 sf.sf_arg2 = (register_t)&fp->sf_siginfo; 547 sf.sf_siginfo.si_signo = sig; 548 sf.sf_siginfo.si_code = code; 549 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher; 550 } 551 else { 552 /* Old FreeBSD-style arguments. */ 553 sf.sf_arg2 = code; 554 sf.sf_addr = regs->tf_err; 555 sf.sf_ahu.sf_handler = catcher; 556 } 557 558 /* save scratch registers */ 559 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax; 560 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx; 561 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx; 562 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx; 563 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi; 564 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi; 565 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs; 566 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds; 567 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss; 568 sf.sf_siginfo.si_sc.sc_es = regs->tf_es; 569 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs; 570 sf.sf_siginfo.si_sc.sc_gs = rgs(); 571 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp; 572 573 /* Build the signal context to be used by sigreturn. */ 574 sf.sf_siginfo.si_sc.sc_onstack = oonstack; 575 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask); 576 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp; 577 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp; 578 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip; 579 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags; 580 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno; 581 sf.sf_siginfo.si_sc.sc_err = regs->tf_err; 582 583 /* 584 * If we're a vm86 process, we want to save the segment registers. 585 * We also change eflags to be our emulated eflags, not the actual 586 * eflags. 587 */ 588 if (regs->tf_eflags & PSL_VM) { 589 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 590 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 591 592 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs; 593 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs; 594 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es; 595 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds; 596 597 if (vm86->vm86_has_vme == 0) 598 sf.sf_siginfo.si_sc.sc_ps = 599 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) 600 | (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 601 /* see sendsig for comment */ 602 tf->tf_eflags &= ~(PSL_VM|PSL_NT|PSL_T|PSL_VIF|PSL_VIP); 603 } 604 605 /* Copy the sigframe out to the user's stack. */ 606 if (copyout(&sf, fp, sizeof(struct osigframe)) != 0) { 607 /* 608 * Something is wrong with the stack pointer. 609 * ...Kill the process. 610 */ 611 sigexit(p, SIGILL); 612 } 613 614 regs->tf_esp = (int)fp; 615 regs->tf_eip = PS_STRINGS - szosigcode; 616 regs->tf_cs = _ucodesel; 617 regs->tf_ds = _udatasel; 618 regs->tf_es = _udatasel; 619 regs->tf_fs = _udatasel; 620 load_gs(_udatasel); 621 regs->tf_ss = _udatasel; 622} 623 624void 625sendsig(catcher, sig, mask, code) 626 sig_t catcher; 627 int sig; 628 sigset_t *mask; 629 u_long code; 630{ 631 struct proc *p = curproc; 632 struct trapframe *regs; 633 struct sigacts *psp = p->p_sigacts; 634 struct sigframe sf, *sfp; 635 int oonstack; 636 637 if (SIGISMEMBER(psp->ps_osigset, sig)) { 638 osendsig(catcher, sig, mask, code); 639 return; 640 } 641 642 regs = p->p_md.md_regs; 643 oonstack = (p->p_sigstk.ss_flags & SS_ONSTACK) ? 1 : 0; 644 645 /* save user context */ 646 bzero(&sf, sizeof(struct sigframe)); 647 sf.sf_uc.uc_sigmask = *mask; 648 sf.sf_uc.uc_stack = p->p_sigstk; 649 sf.sf_uc.uc_mcontext.mc_onstack = oonstack; 650 sf.sf_uc.uc_mcontext.mc_gs = rgs(); 651 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(struct trapframe)); 652 653 /* Allocate and validate space for the signal handler context. */ 654 if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack && 655 SIGISMEMBER(psp->ps_sigonstack, sig)) { 656 sfp = (struct sigframe *)(p->p_sigstk.ss_sp + 657 p->p_sigstk.ss_size - sizeof(struct sigframe)); 658 p->p_sigstk.ss_flags |= SS_ONSTACK; 659 } 660 else 661 sfp = (struct sigframe *)regs->tf_esp - 1; 662 663 /* 664 * grow() will return FALSE if the sfp will not fit inside the stack 665 * and the stack can not be grown. useracc will return FALSE if 666 * access is denied. 667 */ 668 if (grow_stack(p, (int)sfp) == FALSE || 669 !useracc((caddr_t)sfp, sizeof(struct sigframe), VM_PROT_WRITE)) { 670 /* 671 * Process has trashed its stack; give it an illegal 672 * instruction to halt it in its tracks. 673 */ 674#ifdef DEBUG 675 printf("process %d has trashed its stack\n", p->p_pid); 676#endif 677 SIGACTION(p, SIGILL) = SIG_DFL; 678 SIGDELSET(p->p_sigignore, SIGILL); 679 SIGDELSET(p->p_sigcatch, SIGILL); 680 SIGDELSET(p->p_sigmask, SIGILL); 681 psignal(p, SIGILL); 682 return; 683 } 684 685 /* Translate the signal is appropriate */ 686 if (p->p_sysent->sv_sigtbl) { 687 if (sig <= p->p_sysent->sv_sigsize) 688 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 689 } 690 691 /* Build the argument list for the signal handler. */ 692 sf.sf_signum = sig; 693 sf.sf_ucontext = (register_t)&sfp->sf_uc; 694 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 695 /* Signal handler installed with SA_SIGINFO. */ 696 sf.sf_siginfo = (register_t)&sfp->sf_si; 697 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 698 699 /* fill siginfo structure */ 700 sf.sf_si.si_signo = sig; 701 sf.sf_si.si_code = code; 702 sf.sf_si.si_addr = (void*)regs->tf_err; 703 } 704 else { 705 /* Old FreeBSD-style arguments. */ 706 sf.sf_siginfo = code; 707 sf.sf_addr = regs->tf_err; 708 sf.sf_ahu.sf_handler = catcher; 709 } 710 711 /* 712 * If we're a vm86 process, we want to save the segment registers. 713 * We also change eflags to be our emulated eflags, not the actual 714 * eflags. 715 */ 716 if (regs->tf_eflags & PSL_VM) { 717 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 718 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 719 720 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 721 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 722 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 723 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 724 725 if (vm86->vm86_has_vme == 0) 726 sf.sf_uc.uc_mcontext.mc_eflags = 727 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 728 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 729 730 /* 731 * We should never have PSL_T set when returning from vm86 732 * mode. It may be set here if we deliver a signal before 733 * getting to vm86 mode, so turn it off. 734 * 735 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 736 * syscalls made by the signal handler. This just avoids 737 * wasting time for our lazy fixup of such faults. PSL_NT 738 * does nothing in vm86 mode, but vm86 programs can set it 739 * almost legitimately in probes for old cpu types. 740 */ 741 tf->tf_eflags &= ~(PSL_VM|PSL_NT|PSL_T|PSL_VIF|PSL_VIP); 742 } 743 744 /* 745 * Copy the sigframe out to the user's stack. 746 */ 747 if (copyout(&sf, sfp, sizeof(struct sigframe)) != 0) { 748 /* 749 * Something is wrong with the stack pointer. 750 * ...Kill the process. 751 */ 752 sigexit(p, SIGILL); 753 } 754 755 regs->tf_esp = (int)sfp; 756 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 757 regs->tf_cs = _ucodesel; 758 regs->tf_ds = _udatasel; 759 regs->tf_es = _udatasel; 760 regs->tf_fs = _udatasel; 761 load_gs(_udatasel); 762 regs->tf_ss = _udatasel; 763} 764 765/* 766 * System call to cleanup state after a signal 767 * has been taken. Reset signal mask and 768 * stack state from context left by sendsig (above). 769 * Return to previous pc and psl as specified by 770 * context left by sendsig. Check carefully to 771 * make sure that the user has not modified the 772 * state to gain improper privileges. 773 */ 774#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 775#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 776 777int 778osigreturn(p, uap) 779 struct proc *p; 780 struct osigreturn_args /* { 781 struct osigcontext *sigcntxp; 782 } */ *uap; 783{ 784 register struct osigcontext *scp; 785 register struct trapframe *regs = p->p_md.md_regs; 786 int eflags; 787 788 scp = uap->sigcntxp; 789 790 if (!useracc((caddr_t)scp, sizeof (struct osigcontext), VM_PROT_READ)) 791 return(EFAULT); 792 793 eflags = scp->sc_ps; 794 if (eflags & PSL_VM) { 795 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 796 struct vm86_kernel *vm86; 797 798 /* 799 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 800 * set up the vm86 area, and we can't enter vm86 mode. 801 */ 802 if (p->p_addr->u_pcb.pcb_ext == 0) 803 return (EINVAL); 804 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 805 if (vm86->vm86_inited == 0) 806 return (EINVAL); 807 808 /* go back to user mode if both flags are set */ 809 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 810 trapsignal(p, SIGBUS, 0); 811 812 if (vm86->vm86_has_vme) { 813 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 814 (eflags & VME_USERCHANGE) | PSL_VM; 815 } else { 816 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 817 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 818 } 819 tf->tf_vm86_ds = scp->sc_ds; 820 tf->tf_vm86_es = scp->sc_es; 821 tf->tf_vm86_fs = scp->sc_fs; 822 tf->tf_vm86_gs = scp->sc_gs; 823 tf->tf_ds = _udatasel; 824 tf->tf_es = _udatasel; 825 tf->tf_fs = _udatasel; 826 } else { 827 /* 828 * Don't allow users to change privileged or reserved flags. 829 */ 830 /* 831 * XXX do allow users to change the privileged flag PSL_RF. 832 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 833 * should sometimes set it there too. tf_eflags is kept in 834 * the signal context during signal handling and there is no 835 * other place to remember it, so the PSL_RF bit may be 836 * corrupted by the signal handler without us knowing. 837 * Corruption of the PSL_RF bit at worst causes one more or 838 * one less debugger trap, so allowing it is fairly harmless. 839 */ 840 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 841 return(EINVAL); 842 } 843 844 /* 845 * Don't allow users to load a valid privileged %cs. Let the 846 * hardware check for invalid selectors, excess privilege in 847 * other selectors, invalid %eip's and invalid %esp's. 848 */ 849 if (!CS_SECURE(scp->sc_cs)) { 850 trapsignal(p, SIGBUS, T_PROTFLT); 851 return(EINVAL); 852 } 853 regs->tf_ds = scp->sc_ds; 854 regs->tf_es = scp->sc_es; 855 regs->tf_fs = scp->sc_fs; 856 } 857 858 /* restore scratch registers */ 859 regs->tf_eax = scp->sc_eax; 860 regs->tf_ebx = scp->sc_ebx; 861 regs->tf_ecx = scp->sc_ecx; 862 regs->tf_edx = scp->sc_edx; 863 regs->tf_esi = scp->sc_esi; 864 regs->tf_edi = scp->sc_edi; 865 regs->tf_cs = scp->sc_cs; 866 regs->tf_ss = scp->sc_ss; 867 regs->tf_isp = scp->sc_isp; 868 869 if (scp->sc_onstack & 01) 870 p->p_sigstk.ss_flags |= SS_ONSTACK; 871 else 872 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 873 874 SIGSETOLD(p->p_sigmask, scp->sc_mask); 875 SIG_CANTMASK(p->p_sigmask); 876 regs->tf_ebp = scp->sc_fp; 877 regs->tf_esp = scp->sc_sp; 878 regs->tf_eip = scp->sc_pc; 879 regs->tf_eflags = eflags; 880 return(EJUSTRETURN); 881} 882 883int 884sigreturn(p, uap) 885 struct proc *p; 886 struct sigreturn_args /* { 887 ucontext_t *sigcntxp; 888 } */ *uap; 889{ 890 struct trapframe *regs; 891 ucontext_t *ucp; 892 int cs, eflags; 893 894 ucp = uap->sigcntxp; 895 896 if (!useracc((caddr_t)ucp, sizeof(struct osigcontext), VM_PROT_READ)) 897 return (EFAULT); 898 if (((struct osigcontext *)ucp)->sc_trapno == 0x01d516) 899 return (osigreturn(p, (struct osigreturn_args *)uap)); 900 901 /* 902 * Since ucp is not an osigcontext but a ucontext_t, we have to 903 * check again if all of it is accessible. A ucontext_t is 904 * much larger, so instead of just checking for the pointer 905 * being valid for the size of an osigcontext, now check for 906 * it being valid for a whole, new-style ucontext_t. 907 */ 908 if (!useracc((caddr_t)ucp, sizeof(ucontext_t), VM_PROT_READ)) 909 return (EFAULT); 910 911 regs = p->p_md.md_regs; 912 eflags = ucp->uc_mcontext.mc_eflags; 913 914 if (eflags & PSL_VM) { 915 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 916 struct vm86_kernel *vm86; 917 918 /* 919 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 920 * set up the vm86 area, and we can't enter vm86 mode. 921 */ 922 if (p->p_addr->u_pcb.pcb_ext == 0) 923 return (EINVAL); 924 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 925 if (vm86->vm86_inited == 0) 926 return (EINVAL); 927 928 /* go back to user mode if both flags are set */ 929 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 930 trapsignal(p, SIGBUS, 0); 931 932 if (vm86->vm86_has_vme) { 933 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 934 (eflags & VME_USERCHANGE) | PSL_VM; 935 } else { 936 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 937 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 938 } 939 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe)); 940 tf->tf_eflags = eflags; 941 tf->tf_vm86_ds = tf->tf_ds; 942 tf->tf_vm86_es = tf->tf_es; 943 tf->tf_vm86_fs = tf->tf_fs; 944 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs; 945 tf->tf_ds = _udatasel; 946 tf->tf_es = _udatasel; 947 tf->tf_fs = _udatasel; 948 } else { 949 /* 950 * Don't allow users to change privileged or reserved flags. 951 */ 952 /* 953 * XXX do allow users to change the privileged flag PSL_RF. 954 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 955 * should sometimes set it there too. tf_eflags is kept in 956 * the signal context during signal handling and there is no 957 * other place to remember it, so the PSL_RF bit may be 958 * corrupted by the signal handler without us knowing. 959 * Corruption of the PSL_RF bit at worst causes one more or 960 * one less debugger trap, so allowing it is fairly harmless. 961 */ 962 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 963 printf("sigreturn: eflags = 0x%x\n", eflags); 964 return(EINVAL); 965 } 966 967 /* 968 * Don't allow users to load a valid privileged %cs. Let the 969 * hardware check for invalid selectors, excess privilege in 970 * other selectors, invalid %eip's and invalid %esp's. 971 */ 972 cs = ucp->uc_mcontext.mc_cs; 973 if (!CS_SECURE(cs)) { 974 printf("sigreturn: cs = 0x%x\n", cs); 975 trapsignal(p, SIGBUS, T_PROTFLT); 976 return(EINVAL); 977 } 978 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(struct trapframe)); 979 } 980 981 if (ucp->uc_mcontext.mc_onstack & 1) 982 p->p_sigstk.ss_flags |= SS_ONSTACK; 983 else 984 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 985 986 p->p_sigmask = ucp->uc_sigmask; 987 SIG_CANTMASK(p->p_sigmask); 988 return(EJUSTRETURN); 989} 990 991/* 992 * Machine dependent boot() routine 993 * 994 * I haven't seen anything to put here yet 995 * Possibly some stuff might be grafted back here from boot() 996 */ 997void 998cpu_boot(int howto) 999{ 1000} 1001 1002/* 1003 * Shutdown the CPU as much as possible 1004 */ 1005void 1006cpu_halt(void) 1007{ 1008 for (;;) 1009 __asm__ ("hlt"); 1010} 1011 1012/* 1013 * Clear registers on exec 1014 */ 1015void 1016setregs(p, entry, stack, ps_strings) 1017 struct proc *p; 1018 u_long entry; 1019 u_long stack; 1020 u_long ps_strings; 1021{ 1022 struct trapframe *regs = p->p_md.md_regs; 1023 struct pcb *pcb = &p->p_addr->u_pcb; 1024 1025#ifdef USER_LDT 1026 /* was i386_user_cleanup() in NetBSD */ 1027 user_ldt_free(pcb); 1028#endif 1029 1030 bzero((char *)regs, sizeof(struct trapframe)); 1031 regs->tf_eip = entry; 1032 regs->tf_esp = stack; 1033 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T); 1034 regs->tf_ss = _udatasel; 1035 regs->tf_ds = _udatasel; 1036 regs->tf_es = _udatasel; 1037 regs->tf_fs = _udatasel; 1038 regs->tf_cs = _ucodesel; 1039 1040 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */ 1041 regs->tf_ebx = ps_strings; 1042 1043 /* reset %gs as well */ 1044 if (pcb == curpcb) 1045 load_gs(_udatasel); 1046 else 1047 pcb->pcb_gs = _udatasel; 1048 1049 /* 1050 * Reset the hardware debug registers if they were in use. 1051 * They won't have any meaning for the newly exec'd process. 1052 */ 1053 if (pcb->pcb_flags & PCB_DBREGS) { 1054 pcb->pcb_dr0 = 0; 1055 pcb->pcb_dr1 = 0; 1056 pcb->pcb_dr2 = 0; 1057 pcb->pcb_dr3 = 0; 1058 pcb->pcb_dr6 = 0; 1059 pcb->pcb_dr7 = 0; 1060 if (pcb == curpcb) { 1061 /* 1062 * Clear the debug registers on the running 1063 * CPU, otherwise they will end up affecting 1064 * the next process we switch to. 1065 */ 1066 reset_dbregs(); 1067 } 1068 pcb->pcb_flags &= ~PCB_DBREGS; 1069 } 1070 1071 /* 1072 * Initialize the math emulator (if any) for the current process. 1073 * Actually, just clear the bit that says that the emulator has 1074 * been initialized. Initialization is delayed until the process 1075 * traps to the emulator (if it is done at all) mainly because 1076 * emulators don't provide an entry point for initialization. 1077 */ 1078 p->p_addr->u_pcb.pcb_flags &= ~FP_SOFTFP; 1079 1080 /* 1081 * Arrange to trap the next npx or `fwait' instruction (see npx.c 1082 * for why fwait must be trapped at least if there is an npx or an 1083 * emulator). This is mainly to handle the case where npx0 is not 1084 * configured, since the npx routines normally set up the trap 1085 * otherwise. It should be done only at boot time, but doing it 1086 * here allows modifying `npx_exists' for testing the emulator on 1087 * systems with an npx. 1088 */ 1089 load_cr0(rcr0() | CR0_MP | CR0_TS); 1090 1091#if NNPX > 0 1092 /* Initialize the npx (if any) for the current process. */ 1093 npxinit(__INITIAL_NPXCW__); 1094#endif 1095 1096 /* 1097 * XXX - Linux emulator 1098 * Make sure sure edx is 0x0 on entry. Linux binaries depend 1099 * on it. 1100 */ 1101 p->p_retval[1] = 0; 1102} 1103 1104static int 1105sysctl_machdep_adjkerntz SYSCTL_HANDLER_ARGS 1106{ 1107 int error; 1108 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 1109 req); 1110 if (!error && req->newptr) 1111 resettodr(); 1112 return (error); 1113} 1114 1115SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 1116 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 1117 1118SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, 1119 CTLFLAG_RW, &disable_rtc_set, 0, ""); 1120 1121SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo, 1122 CTLFLAG_RD, &bootinfo, bootinfo, ""); 1123 1124SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, 1125 CTLFLAG_RW, &wall_cmos_clock, 0, ""); 1126 1127/* 1128 * Initialize 386 and configure to run kernel 1129 */ 1130 1131/* 1132 * Initialize segments & interrupt table 1133 */ 1134 1135int _default_ldt; 1136#ifdef SMP 1137union descriptor gdt[NGDT * NCPU]; /* global descriptor table */ 1138#else 1139union descriptor gdt[NGDT]; /* global descriptor table */ 1140#endif 1141static struct gate_descriptor idt0[NIDT]; 1142struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 1143union descriptor ldt[NLDT]; /* local descriptor table */ 1144#ifdef SMP 1145/* table descriptors - used to load tables by microp */ 1146struct region_descriptor r_gdt, r_idt; 1147#endif 1148 1149#ifndef SMP 1150extern struct segment_descriptor common_tssd, *tss_gdt; 1151#endif 1152int private_tss; /* flag indicating private tss */ 1153 1154#if defined(I586_CPU) && !defined(NO_F00F_HACK) 1155extern int has_f00f_bug; 1156#endif 1157 1158static struct i386tss dblfault_tss; 1159static char dblfault_stack[PAGE_SIZE]; 1160 1161extern struct user *proc0paddr; 1162 1163 1164/* software prototypes -- in more palatable form */ 1165struct soft_segment_descriptor gdt_segs[] = { 1166/* GNULL_SEL 0 Null Descriptor */ 1167{ 0x0, /* segment base address */ 1168 0x0, /* length */ 1169 0, /* segment type */ 1170 0, /* segment descriptor priority level */ 1171 0, /* segment descriptor present */ 1172 0, 0, 1173 0, /* default 32 vs 16 bit size */ 1174 0 /* limit granularity (byte/page units)*/ }, 1175/* GCODE_SEL 1 Code Descriptor for kernel */ 1176{ 0x0, /* segment base address */ 1177 0xfffff, /* length - all address space */ 1178 SDT_MEMERA, /* segment type */ 1179 0, /* segment descriptor priority level */ 1180 1, /* segment descriptor present */ 1181 0, 0, 1182 1, /* default 32 vs 16 bit size */ 1183 1 /* limit granularity (byte/page units)*/ }, 1184/* GDATA_SEL 2 Data Descriptor for kernel */ 1185{ 0x0, /* segment base address */ 1186 0xfffff, /* length - all address space */ 1187 SDT_MEMRWA, /* segment type */ 1188 0, /* segment descriptor priority level */ 1189 1, /* segment descriptor present */ 1190 0, 0, 1191 1, /* default 32 vs 16 bit size */ 1192 1 /* limit granularity (byte/page units)*/ }, 1193/* GPRIV_SEL 3 SMP Per-Processor Private Data Descriptor */ 1194{ 0x0, /* segment base address */ 1195 0xfffff, /* length - all address space */ 1196 SDT_MEMRWA, /* segment type */ 1197 0, /* segment descriptor priority level */ 1198 1, /* segment descriptor present */ 1199 0, 0, 1200 1, /* default 32 vs 16 bit size */ 1201 1 /* limit granularity (byte/page units)*/ }, 1202/* GPROC0_SEL 4 Proc 0 Tss Descriptor */ 1203{ 1204 0x0, /* segment base address */ 1205 sizeof(struct i386tss)-1,/* length - all address space */ 1206 SDT_SYS386TSS, /* segment type */ 1207 0, /* segment descriptor priority level */ 1208 1, /* segment descriptor present */ 1209 0, 0, 1210 0, /* unused - default 32 vs 16 bit size */ 1211 0 /* limit granularity (byte/page units)*/ }, 1212/* GLDT_SEL 5 LDT Descriptor */ 1213{ (int) ldt, /* segment base address */ 1214 sizeof(ldt)-1, /* length - all address space */ 1215 SDT_SYSLDT, /* segment type */ 1216 SEL_UPL, /* segment descriptor priority level */ 1217 1, /* segment descriptor present */ 1218 0, 0, 1219 0, /* unused - default 32 vs 16 bit size */ 1220 0 /* limit granularity (byte/page units)*/ }, 1221/* GUSERLDT_SEL 6 User LDT Descriptor per process */ 1222{ (int) ldt, /* segment base address */ 1223 (512 * sizeof(union descriptor)-1), /* length */ 1224 SDT_SYSLDT, /* segment type */ 1225 0, /* segment descriptor priority level */ 1226 1, /* segment descriptor present */ 1227 0, 0, 1228 0, /* unused - default 32 vs 16 bit size */ 1229 0 /* limit granularity (byte/page units)*/ }, 1230/* GTGATE_SEL 7 Null Descriptor - Placeholder */ 1231{ 0x0, /* segment base address */ 1232 0x0, /* length - all address space */ 1233 0, /* segment type */ 1234 0, /* segment descriptor priority level */ 1235 0, /* segment descriptor present */ 1236 0, 0, 1237 0, /* default 32 vs 16 bit size */ 1238 0 /* limit granularity (byte/page units)*/ }, 1239/* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */ 1240{ 0x400, /* segment base address */ 1241 0xfffff, /* length */ 1242 SDT_MEMRWA, /* segment type */ 1243 0, /* segment descriptor priority level */ 1244 1, /* segment descriptor present */ 1245 0, 0, 1246 1, /* default 32 vs 16 bit size */ 1247 1 /* limit granularity (byte/page units)*/ }, 1248/* GPANIC_SEL 9 Panic Tss Descriptor */ 1249{ (int) &dblfault_tss, /* segment base address */ 1250 sizeof(struct i386tss)-1,/* length - all address space */ 1251 SDT_SYS386TSS, /* segment type */ 1252 0, /* segment descriptor priority level */ 1253 1, /* segment descriptor present */ 1254 0, 0, 1255 0, /* unused - default 32 vs 16 bit size */ 1256 0 /* limit granularity (byte/page units)*/ }, 1257/* GBIOSCODE32_SEL 10 BIOS 32-bit interface (32bit Code) */ 1258{ 0, /* segment base address (overwritten) */ 1259 0xfffff, /* length */ 1260 SDT_MEMERA, /* segment type */ 1261 0, /* segment descriptor priority level */ 1262 1, /* segment descriptor present */ 1263 0, 0, 1264 0, /* default 32 vs 16 bit size */ 1265 1 /* limit granularity (byte/page units)*/ }, 1266/* GBIOSCODE16_SEL 11 BIOS 32-bit interface (16bit Code) */ 1267{ 0, /* segment base address (overwritten) */ 1268 0xfffff, /* length */ 1269 SDT_MEMERA, /* segment type */ 1270 0, /* segment descriptor priority level */ 1271 1, /* segment descriptor present */ 1272 0, 0, 1273 0, /* default 32 vs 16 bit size */ 1274 1 /* limit granularity (byte/page units)*/ }, 1275/* GBIOSDATA_SEL 12 BIOS 32-bit interface (Data) */ 1276{ 0, /* segment base address (overwritten) */ 1277 0xfffff, /* length */ 1278 SDT_MEMRWA, /* segment type */ 1279 0, /* segment descriptor priority level */ 1280 1, /* segment descriptor present */ 1281 0, 0, 1282 1, /* default 32 vs 16 bit size */ 1283 1 /* limit granularity (byte/page units)*/ }, 1284/* GBIOSUTIL_SEL 13 BIOS 16-bit interface (Utility) */ 1285{ 0, /* segment base address (overwritten) */ 1286 0xfffff, /* length */ 1287 SDT_MEMRWA, /* segment type */ 1288 0, /* segment descriptor priority level */ 1289 1, /* segment descriptor present */ 1290 0, 0, 1291 0, /* default 32 vs 16 bit size */ 1292 1 /* limit granularity (byte/page units)*/ }, 1293/* GBIOSARGS_SEL 14 BIOS 16-bit interface (Arguments) */ 1294{ 0, /* segment base address (overwritten) */ 1295 0xfffff, /* length */ 1296 SDT_MEMRWA, /* segment type */ 1297 0, /* segment descriptor priority level */ 1298 1, /* segment descriptor present */ 1299 0, 0, 1300 0, /* default 32 vs 16 bit size */ 1301 1 /* limit granularity (byte/page units)*/ }, 1302}; 1303 1304static struct soft_segment_descriptor ldt_segs[] = { 1305 /* Null Descriptor - overwritten by call gate */ 1306{ 0x0, /* segment base address */ 1307 0x0, /* length - all address space */ 1308 0, /* segment type */ 1309 0, /* segment descriptor priority level */ 1310 0, /* segment descriptor present */ 1311 0, 0, 1312 0, /* default 32 vs 16 bit size */ 1313 0 /* limit granularity (byte/page units)*/ }, 1314 /* Null Descriptor - overwritten by call gate */ 1315{ 0x0, /* segment base address */ 1316 0x0, /* length - all address space */ 1317 0, /* segment type */ 1318 0, /* segment descriptor priority level */ 1319 0, /* segment descriptor present */ 1320 0, 0, 1321 0, /* default 32 vs 16 bit size */ 1322 0 /* limit granularity (byte/page units)*/ }, 1323 /* Null Descriptor - overwritten by call gate */ 1324{ 0x0, /* segment base address */ 1325 0x0, /* length - all address space */ 1326 0, /* segment type */ 1327 0, /* segment descriptor priority level */ 1328 0, /* segment descriptor present */ 1329 0, 0, 1330 0, /* default 32 vs 16 bit size */ 1331 0 /* limit granularity (byte/page units)*/ }, 1332 /* Code Descriptor for user */ 1333{ 0x0, /* segment base address */ 1334 0xfffff, /* length - all address space */ 1335 SDT_MEMERA, /* segment type */ 1336 SEL_UPL, /* segment descriptor priority level */ 1337 1, /* segment descriptor present */ 1338 0, 0, 1339 1, /* default 32 vs 16 bit size */ 1340 1 /* limit granularity (byte/page units)*/ }, 1341 /* Null Descriptor - overwritten by call gate */ 1342{ 0x0, /* segment base address */ 1343 0x0, /* length - all address space */ 1344 0, /* segment type */ 1345 0, /* segment descriptor priority level */ 1346 0, /* segment descriptor present */ 1347 0, 0, 1348 0, /* default 32 vs 16 bit size */ 1349 0 /* limit granularity (byte/page units)*/ }, 1350 /* Data Descriptor for user */ 1351{ 0x0, /* segment base address */ 1352 0xfffff, /* length - all address space */ 1353 SDT_MEMRWA, /* segment type */ 1354 SEL_UPL, /* segment descriptor priority level */ 1355 1, /* segment descriptor present */ 1356 0, 0, 1357 1, /* default 32 vs 16 bit size */ 1358 1 /* limit granularity (byte/page units)*/ }, 1359}; 1360 1361void 1362setidt(idx, func, typ, dpl, selec) 1363 int idx; 1364 inthand_t *func; 1365 int typ; 1366 int dpl; 1367 int selec; 1368{ 1369 struct gate_descriptor *ip; 1370 1371 ip = idt + idx; 1372 ip->gd_looffset = (int)func; 1373 ip->gd_selector = selec; 1374 ip->gd_stkcpy = 0; 1375 ip->gd_xx = 0; 1376 ip->gd_type = typ; 1377 ip->gd_dpl = dpl; 1378 ip->gd_p = 1; 1379 ip->gd_hioffset = ((int)func)>>16 ; 1380} 1381 1382#define IDTVEC(name) __CONCAT(X,name) 1383 1384extern inthand_t 1385 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 1386 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 1387 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 1388 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 1389 IDTVEC(syscall), IDTVEC(int0x80_syscall); 1390 1391void 1392sdtossd(sd, ssd) 1393 struct segment_descriptor *sd; 1394 struct soft_segment_descriptor *ssd; 1395{ 1396 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 1397 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 1398 ssd->ssd_type = sd->sd_type; 1399 ssd->ssd_dpl = sd->sd_dpl; 1400 ssd->ssd_p = sd->sd_p; 1401 ssd->ssd_def32 = sd->sd_def32; 1402 ssd->ssd_gran = sd->sd_gran; 1403} 1404 1405#define PHYSMAP_SIZE (2 * 8) 1406 1407/* 1408 * Populate the (physmap) array with base/bound pairs describing the 1409 * available physical memory in the system, then test this memory and 1410 * build the phys_avail array describing the actually-available memory. 1411 * 1412 * If we cannot accurately determine the physical memory map, then use 1413 * value from the 0xE801 call, and failing that, the RTC. 1414 * 1415 * Total memory size may be set by the kernel environment variable 1416 * hw.physmem or the compile-time define MAXMEM. 1417 */ 1418static void 1419getmemsize(int first) 1420{ 1421 int i, physmap_idx, pa_indx; 1422 u_int basemem, extmem; 1423 struct vm86frame vmf; 1424 struct vm86context vmc; 1425 vm_offset_t pa, physmap[PHYSMAP_SIZE]; 1426 pt_entry_t pte; 1427 const char *cp; 1428 struct { 1429 u_int64_t base; 1430 u_int64_t length; 1431 u_int32_t type; 1432 } *smap; 1433 1434 bzero(&vmf, sizeof(struct vm86frame)); 1435 bzero(physmap, sizeof(physmap)); 1436 1437 /* 1438 * Perform "base memory" related probes & setup 1439 */ 1440 vm86_intcall(0x12, &vmf); 1441 basemem = vmf.vmf_ax; 1442 if (basemem > 640) { 1443 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", 1444 basemem); 1445 basemem = 640; 1446 } 1447 1448 /* 1449 * XXX if biosbasemem is now < 640, there is a `hole' 1450 * between the end of base memory and the start of 1451 * ISA memory. The hole may be empty or it may 1452 * contain BIOS code or data. Map it read/write so 1453 * that the BIOS can write to it. (Memory from 0 to 1454 * the physical end of the kernel is mapped read-only 1455 * to begin with and then parts of it are remapped. 1456 * The parts that aren't remapped form holes that 1457 * remain read-only and are unused by the kernel. 1458 * The base memory area is below the physical end of 1459 * the kernel and right now forms a read-only hole. 1460 * The part of it from PAGE_SIZE to 1461 * (trunc_page(biosbasemem * 1024) - 1) will be 1462 * remapped and used by the kernel later.) 1463 * 1464 * This code is similar to the code used in 1465 * pmap_mapdev, but since no memory needs to be 1466 * allocated we simply change the mapping. 1467 */ 1468 for (pa = trunc_page(basemem * 1024); 1469 pa < ISA_HOLE_START; pa += PAGE_SIZE) { 1470 pte = (pt_entry_t)vtopte(pa + KERNBASE); 1471 *pte = pa | PG_RW | PG_V; 1472 } 1473 1474 /* 1475 * if basemem != 640, map pages r/w into vm86 page table so 1476 * that the bios can scribble on it. 1477 */ 1478 pte = (pt_entry_t)vm86paddr; 1479 for (i = basemem / 4; i < 160; i++) 1480 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; 1481 1482 /* 1483 * map page 1 R/W into the kernel page table so we can use it 1484 * as a buffer. The kernel will unmap this page later. 1485 */ 1486 pte = (pt_entry_t)vtopte(KERNBASE + (1 << PAGE_SHIFT)); 1487 *pte = (1 << PAGE_SHIFT) | PG_RW | PG_V; 1488 1489 /* 1490 * get memory map with INT 15:E820 1491 */ 1492#define SMAPSIZ sizeof(*smap) 1493#define SMAP_SIG 0x534D4150 /* 'SMAP' */ 1494 1495 vmc.npages = 0; 1496 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT)); 1497 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di); 1498 1499 physmap_idx = 0; 1500 vmf.vmf_ebx = 0; 1501 do { 1502 vmf.vmf_eax = 0xE820; 1503 vmf.vmf_edx = SMAP_SIG; 1504 vmf.vmf_ecx = SMAPSIZ; 1505 i = vm86_datacall(0x15, &vmf, &vmc); 1506 if (i || vmf.vmf_eax != SMAP_SIG) 1507 break; 1508 if (boothowto & RB_VERBOSE) 1509 printf("SMAP type=%02x base=%08x %08x len=%08x %08x\n", 1510 smap->type, 1511 *(u_int32_t *)((char *)&smap->base + 4), 1512 (u_int32_t)smap->base, 1513 *(u_int32_t *)((char *)&smap->length + 4), 1514 (u_int32_t)smap->length); 1515 1516 if (smap->type != 0x01) 1517 goto next_run; 1518 1519 if (smap->length == 0) 1520 goto next_run; 1521 1522 if (smap->base >= 0xffffffff) { 1523 printf("%uK of memory above 4GB ignored\n", 1524 (u_int)(smap->length / 1024)); 1525 goto next_run; 1526 } 1527 1528 for (i = 0; i <= physmap_idx; i += 2) { 1529 if (smap->base < physmap[i + 1]) { 1530 if (boothowto & RB_VERBOSE) 1531 printf( 1532 "Overlapping or non-montonic memory region, ignoring second region\n"); 1533 goto next_run; 1534 } 1535 } 1536 1537 if (smap->base == physmap[physmap_idx + 1]) { 1538 physmap[physmap_idx + 1] += smap->length; 1539 goto next_run; 1540 } 1541 1542 physmap_idx += 2; 1543 if (physmap_idx == PHYSMAP_SIZE) { 1544 printf( 1545 "Too many segments in the physical address map, giving up\n"); 1546 break; 1547 } 1548 physmap[physmap_idx] = smap->base; 1549 physmap[physmap_idx + 1] = smap->base + smap->length; 1550next_run: 1551 } while (vmf.vmf_ebx != 0); 1552 1553 if (physmap[1] != 0) 1554 goto physmap_done; 1555 1556 /* 1557 * If we failed above, try memory map with INT 15:E801 1558 */ 1559 vmf.vmf_ax = 0xE801; 1560 if (vm86_intcall(0x15, &vmf) == 0) { 1561 extmem = vmf.vmf_cx + vmf.vmf_dx * 64; 1562 } else { 1563#if 0 1564 vmf.vmf_ah = 0x88; 1565 vm86_intcall(0x15, &vmf); 1566 extmem = vmf.vmf_ax; 1567#else 1568 /* 1569 * Prefer the RTC value for extended memory. 1570 */ 1571 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8); 1572#endif 1573 } 1574 1575 /* 1576 * Special hack for chipsets that still remap the 384k hole when 1577 * there's 16MB of memory - this really confuses people that 1578 * are trying to use bus mastering ISA controllers with the 1579 * "16MB limit"; they only have 16MB, but the remapping puts 1580 * them beyond the limit. 1581 * 1582 * If extended memory is between 15-16MB (16-17MB phys address range), 1583 * chop it to 15MB. 1584 */ 1585 if ((extmem > 15 * 1024) && (extmem < 16 * 1024)) 1586 extmem = 15 * 1024; 1587 1588 physmap[0] = 0; 1589 physmap[1] = basemem * 1024; 1590 physmap_idx = 2; 1591 physmap[physmap_idx] = 0x100000; 1592 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024; 1593 1594physmap_done: 1595 /* 1596 * Now, physmap contains a map of physical memory. 1597 */ 1598 1599#ifdef SMP 1600 /* make hole for AP bootstrap code */ 1601 physmap[1] = mp_bootaddress(physmap[1] / 1024); 1602 1603 /* look for the MP hardware - needed for apic addresses */ 1604 mp_probe(); 1605#endif 1606 1607 /* 1608 * Maxmem isn't the "maximum memory", it's one larger than the 1609 * highest page of the physical address space. It should be 1610 * called something like "Maxphyspage". We may adjust this 1611 * based on ``hw.physmem'' and the results of the memory test. 1612 */ 1613 Maxmem = atop(physmap[physmap_idx + 1]); 1614 1615#ifdef MAXMEM 1616 Maxmem = MAXMEM / 4; 1617#endif 1618 1619 /* 1620 * hw.maxmem is a size in bytes; we also allow k, m, and g suffixes 1621 * for the appropriate modifiers. This overrides MAXMEM. 1622 */ 1623 if ((cp = getenv("hw.physmem")) != NULL) { 1624 u_int64_t AllowMem, sanity; 1625 char *ep; 1626 1627 sanity = AllowMem = strtouq(cp, &ep, 0); 1628 if ((ep != cp) && (*ep != 0)) { 1629 switch(*ep) { 1630 case 'g': 1631 case 'G': 1632 AllowMem <<= 10; 1633 case 'm': 1634 case 'M': 1635 AllowMem <<= 10; 1636 case 'k': 1637 case 'K': 1638 AllowMem <<= 10; 1639 break; 1640 default: 1641 AllowMem = sanity = 0; 1642 } 1643 if (AllowMem < sanity) 1644 AllowMem = 0; 1645 } 1646 if (AllowMem == 0) 1647 printf("Ignoring invalid memory size of '%s'\n", cp); 1648 else 1649 Maxmem = atop(AllowMem); 1650 } 1651 1652 if (atop(physmap[physmap_idx + 1]) != Maxmem && 1653 (boothowto & RB_VERBOSE)) 1654 printf("Physical memory use set to %uK\n", Maxmem * 4); 1655 1656 /* 1657 * If Maxmem has been increased beyond what the system has detected, 1658 * extend the last memory segment to the new limit. 1659 */ 1660 if (atop(physmap[physmap_idx + 1]) < Maxmem) 1661 physmap[physmap_idx + 1] = ptoa(Maxmem); 1662 1663 /* call pmap initialization to make new kernel address space */ 1664 pmap_bootstrap(first, 0); 1665 1666 /* 1667 * Size up each available chunk of physical memory. 1668 */ 1669 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 1670 pa_indx = 0; 1671 phys_avail[pa_indx++] = physmap[0]; 1672 phys_avail[pa_indx] = physmap[0]; 1673#if 0 1674 pte = (pt_entry_t)vtopte(KERNBASE); 1675#else 1676 pte = (pt_entry_t)CMAP1; 1677#endif 1678 1679 /* 1680 * physmap is in bytes, so when converting to page boundaries, 1681 * round up the start address and round down the end address. 1682 */ 1683 for (i = 0; i <= physmap_idx; i += 2) { 1684 vm_offset_t end; 1685 1686 end = ptoa(Maxmem); 1687 if (physmap[i + 1] < end) 1688 end = trunc_page(physmap[i + 1]); 1689 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 1690 int tmp, page_bad; 1691#if 0 1692 int *ptr = 0; 1693#else 1694 int *ptr = (int *)CADDR1; 1695#endif 1696 1697 /* 1698 * block out kernel memory as not available. 1699 */ 1700 if (pa >= 0x100000 && pa < first) 1701 continue; 1702 1703 page_bad = FALSE; 1704 1705 /* 1706 * map page into kernel: valid, read/write,non-cacheable 1707 */ 1708 *pte = pa | PG_V | PG_RW | PG_N; 1709 invltlb(); 1710 1711 tmp = *(int *)ptr; 1712 /* 1713 * Test for alternating 1's and 0's 1714 */ 1715 *(volatile int *)ptr = 0xaaaaaaaa; 1716 if (*(volatile int *)ptr != 0xaaaaaaaa) { 1717 page_bad = TRUE; 1718 } 1719 /* 1720 * Test for alternating 0's and 1's 1721 */ 1722 *(volatile int *)ptr = 0x55555555; 1723 if (*(volatile int *)ptr != 0x55555555) { 1724 page_bad = TRUE; 1725 } 1726 /* 1727 * Test for all 1's 1728 */ 1729 *(volatile int *)ptr = 0xffffffff; 1730 if (*(volatile int *)ptr != 0xffffffff) { 1731 page_bad = TRUE; 1732 } 1733 /* 1734 * Test for all 0's 1735 */ 1736 *(volatile int *)ptr = 0x0; 1737 if (*(volatile int *)ptr != 0x0) { 1738 page_bad = TRUE; 1739 } 1740 /* 1741 * Restore original value. 1742 */ 1743 *(int *)ptr = tmp; 1744 1745 /* 1746 * Adjust array of valid/good pages. 1747 */ 1748 if (page_bad == TRUE) { 1749 continue; 1750 } 1751 /* 1752 * If this good page is a continuation of the 1753 * previous set of good pages, then just increase 1754 * the end pointer. Otherwise start a new chunk. 1755 * Note that "end" points one higher than end, 1756 * making the range >= start and < end. 1757 * If we're also doing a speculative memory 1758 * test and we at or past the end, bump up Maxmem 1759 * so that we keep going. The first bad page 1760 * will terminate the loop. 1761 */ 1762 if (phys_avail[pa_indx] == pa) { 1763 phys_avail[pa_indx] += PAGE_SIZE; 1764 } else { 1765 pa_indx++; 1766 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1767 printf("Too many holes in the physical address space, giving up\n"); 1768 pa_indx--; 1769 break; 1770 } 1771 phys_avail[pa_indx++] = pa; /* start */ 1772 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1773 } 1774 physmem++; 1775 } 1776 } 1777 *pte = 0; 1778 invltlb(); 1779 1780 /* 1781 * XXX 1782 * The last chunk must contain at least one page plus the message 1783 * buffer to avoid complicating other code (message buffer address 1784 * calculation, etc.). 1785 */ 1786 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1787 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { 1788 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1789 phys_avail[pa_indx--] = 0; 1790 phys_avail[pa_indx--] = 0; 1791 } 1792 1793 Maxmem = atop(phys_avail[pa_indx]); 1794 1795 /* Trim off space for the message buffer. */ 1796 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); 1797 1798 avail_end = phys_avail[pa_indx]; 1799} 1800 1801void 1802init386(first) 1803 int first; 1804{ 1805 int x; 1806 struct gate_descriptor *gdp; 1807 int gsel_tss; 1808#ifndef SMP 1809 /* table descriptors - used to load tables by microp */ 1810 struct region_descriptor r_gdt, r_idt; 1811#endif 1812 int off; 1813 1814 /* 1815 * Prevent lowering of the ipl if we call tsleep() early. 1816 */ 1817 safepri = cpl; 1818 1819 proc0.p_addr = proc0paddr; 1820 1821 atdevbase = ISA_HOLE_START + KERNBASE; 1822 1823 if (bootinfo.bi_modulep) { 1824 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE; 1825 preload_bootstrap_relocate(KERNBASE); 1826 } 1827 if (bootinfo.bi_envp) 1828 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE; 1829 1830 /* 1831 * make gdt memory segments, the code segment goes up to end of the 1832 * page with etext in it, the data segment goes to the end of 1833 * the address space 1834 */ 1835 /* 1836 * XXX text protection is temporarily (?) disabled. The limit was 1837 * i386_btop(round_page(etext)) - 1. 1838 */ 1839 gdt_segs[GCODE_SEL].ssd_limit = i386_btop(0) - 1; 1840 gdt_segs[GDATA_SEL].ssd_limit = i386_btop(0) - 1; 1841#ifdef SMP 1842 gdt_segs[GPRIV_SEL].ssd_limit = 1843 i386_btop(sizeof(struct privatespace)) - 1; 1844 gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[0]; 1845 gdt_segs[GPROC0_SEL].ssd_base = 1846 (int) &SMP_prvspace[0].globaldata.gd_common_tss; 1847 SMP_prvspace[0].globaldata.gd_prvspace = &SMP_prvspace[0]; 1848#else 1849 gdt_segs[GPRIV_SEL].ssd_limit = i386_btop(0) - 1; 1850 gdt_segs[GPROC0_SEL].ssd_base = (int) &common_tss; 1851#endif 1852 1853 for (x = 0; x < NGDT; x++) { 1854#ifdef BDE_DEBUGGER 1855 /* avoid overwriting db entries with APM ones */ 1856 if (x >= GAPMCODE32_SEL && x <= GAPMDATA_SEL) 1857 continue; 1858#endif 1859 ssdtosd(&gdt_segs[x], &gdt[x].sd); 1860 } 1861 1862 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1863 r_gdt.rd_base = (int) gdt; 1864 lgdt(&r_gdt); 1865 1866 /* make ldt memory segments */ 1867 /* 1868 * The data segment limit must not cover the user area because we 1869 * don't want the user area to be writable in copyout() etc. (page 1870 * level protection is lost in kernel mode on 386's). Also, we 1871 * don't want the user area to be writable directly (page level 1872 * protection of the user area is not available on 486's with 1873 * CR0_WP set, because there is no user-read/kernel-write mode). 1874 * 1875 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it 1876 * should be spelled ...MAX_USER... 1877 */ 1878#define VM_END_USER_RW_ADDRESS VM_MAXUSER_ADDRESS 1879 /* 1880 * The code segment limit has to cover the user area until we move 1881 * the signal trampoline out of the user area. This is safe because 1882 * the code segment cannot be written to directly. 1883 */ 1884#define VM_END_USER_R_ADDRESS (VM_END_USER_RW_ADDRESS + UPAGES * PAGE_SIZE) 1885 ldt_segs[LUCODE_SEL].ssd_limit = i386_btop(VM_END_USER_R_ADDRESS) - 1; 1886 ldt_segs[LUDATA_SEL].ssd_limit = i386_btop(VM_END_USER_RW_ADDRESS) - 1; 1887 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++) 1888 ssdtosd(&ldt_segs[x], &ldt[x].sd); 1889 1890 _default_ldt = GSEL(GLDT_SEL, SEL_KPL); 1891 lldt(_default_ldt); 1892#ifdef USER_LDT 1893 currentldt = _default_ldt; 1894#endif 1895 1896 /* exceptions */ 1897 for (x = 0; x < NIDT; x++) 1898 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1899 setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1900 setidt(1, &IDTVEC(dbg), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1901 setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1902 setidt(3, &IDTVEC(bpt), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1903 setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1904 setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1905 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1906 setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1907 setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL)); 1908 setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1909 setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1910 setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1911 setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1912 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1913 setidt(14, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1914 setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1915 setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1916 setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1917 setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1918 setidt(0x80, &IDTVEC(int0x80_syscall), 1919 SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1920 1921 r_idt.rd_limit = sizeof(idt0) - 1; 1922 r_idt.rd_base = (int) idt; 1923 lidt(&r_idt); 1924 1925 /* 1926 * Initialize the console before we print anything out. 1927 */ 1928 cninit(); 1929 1930#include "isa.h" 1931#if NISA >0 1932 isa_defaultirq(); 1933#endif 1934 rand_initialize(); 1935 1936#ifdef DDB 1937 kdb_init(); 1938 if (boothowto & RB_KDB) 1939 Debugger("Boot flags requested debugger"); 1940#endif 1941 1942 finishidentcpu(); /* Final stage of CPU initialization */ 1943 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1944 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1945 initializecpu(); /* Initialize CPU registers */ 1946 1947 /* make an initial tss so cpu can get interrupt stack on syscall! */ 1948 common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16; 1949 common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ; 1950 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1951 private_tss = 0; 1952 tss_gdt = &gdt[GPROC0_SEL].sd; 1953 common_tssd = *tss_gdt; 1954 common_tss.tss_ioopt = (sizeof common_tss) << 16; 1955 ltr(gsel_tss); 1956 1957 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = 1958 dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)]; 1959 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = 1960 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); 1961 dblfault_tss.tss_cr3 = (int)IdlePTD; 1962 dblfault_tss.tss_eip = (int) dblfault_handler; 1963 dblfault_tss.tss_eflags = PSL_KERNEL; 1964 dblfault_tss.tss_ds = dblfault_tss.tss_es = 1965 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); 1966 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL); 1967 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); 1968 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); 1969 1970 vm86_initialize(); 1971 getmemsize(first); 1972 1973 /* now running on new page tables, configured,and u/iom is accessible */ 1974 1975 /* Map the message buffer. */ 1976 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) 1977 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off); 1978 1979 msgbufinit(msgbufp, MSGBUF_SIZE); 1980 1981 /* make a call gate to reenter kernel with */ 1982 gdp = &ldt[LSYS5CALLS_SEL].gd; 1983 1984 x = (int) &IDTVEC(syscall); 1985 gdp->gd_looffset = x++; 1986 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL); 1987 gdp->gd_stkcpy = 1; 1988 gdp->gd_type = SDT_SYS386CGT; 1989 gdp->gd_dpl = SEL_UPL; 1990 gdp->gd_p = 1; 1991 gdp->gd_hioffset = ((int) &IDTVEC(syscall)) >>16; 1992 1993 /* XXX does this work? */ 1994 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL]; 1995 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL]; 1996 1997 /* transfer to user mode */ 1998 1999 _ucodesel = LSEL(LUCODE_SEL, SEL_UPL); 2000 _udatasel = LSEL(LUDATA_SEL, SEL_UPL); 2001 2002 /* setup proc 0's pcb */ 2003 proc0.p_addr->u_pcb.pcb_flags = 0; 2004 proc0.p_addr->u_pcb.pcb_cr3 = (int)IdlePTD; 2005#ifdef SMP 2006 proc0.p_addr->u_pcb.pcb_mpnest = 1; 2007#endif 2008 proc0.p_addr->u_pcb.pcb_ext = 0; 2009} 2010 2011#if defined(I586_CPU) && !defined(NO_F00F_HACK) 2012static void f00f_hack(void *unused); 2013SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL); 2014 2015static void 2016f00f_hack(void *unused) { 2017 struct gate_descriptor *new_idt; 2018#ifndef SMP 2019 struct region_descriptor r_idt; 2020#endif 2021 vm_offset_t tmp; 2022 2023 if (!has_f00f_bug) 2024 return; 2025 2026 printf("Intel Pentium detected, installing workaround for F00F bug\n"); 2027 2028 r_idt.rd_limit = sizeof(idt0) - 1; 2029 2030 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2); 2031 if (tmp == 0) 2032 panic("kmem_alloc returned 0"); 2033 if (((unsigned int)tmp & (PAGE_SIZE-1)) != 0) 2034 panic("kmem_alloc returned non-page-aligned memory"); 2035 /* Put the first seven entries in the lower page */ 2036 new_idt = (struct gate_descriptor*)(tmp + PAGE_SIZE - (7*8)); 2037 bcopy(idt, new_idt, sizeof(idt0)); 2038 r_idt.rd_base = (int)new_idt; 2039 lidt(&r_idt); 2040 idt = new_idt; 2041 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE, 2042 VM_PROT_READ, FALSE) != KERN_SUCCESS) 2043 panic("vm_map_protect failed"); 2044 return; 2045} 2046#endif /* defined(I586_CPU) && !NO_F00F_HACK */ 2047 2048int 2049ptrace_set_pc(p, addr) 2050 struct proc *p; 2051 unsigned long addr; 2052{ 2053 p->p_md.md_regs->tf_eip = addr; 2054 return (0); 2055} 2056 2057int 2058ptrace_single_step(p) 2059 struct proc *p; 2060{ 2061 p->p_md.md_regs->tf_eflags |= PSL_T; 2062 return (0); 2063} 2064 2065int ptrace_read_u_check(p, addr, len) 2066 struct proc *p; 2067 vm_offset_t addr; 2068 size_t len; 2069{ 2070 vm_offset_t gap; 2071 2072 if ((vm_offset_t) (addr + len) < addr) 2073 return EPERM; 2074 if ((vm_offset_t) (addr + len) <= sizeof(struct user)) 2075 return 0; 2076 2077 gap = (char *) p->p_md.md_regs - (char *) p->p_addr; 2078 2079 if ((vm_offset_t) addr < gap) 2080 return EPERM; 2081 if ((vm_offset_t) (addr + len) <= 2082 (vm_offset_t) (gap + sizeof(struct trapframe))) 2083 return 0; 2084 return EPERM; 2085} 2086 2087int ptrace_write_u(p, off, data) 2088 struct proc *p; 2089 vm_offset_t off; 2090 long data; 2091{ 2092 struct trapframe frame_copy; 2093 vm_offset_t min; 2094 struct trapframe *tp; 2095 2096 /* 2097 * Privileged kernel state is scattered all over the user area. 2098 * Only allow write access to parts of regs and to fpregs. 2099 */ 2100 min = (char *)p->p_md.md_regs - (char *)p->p_addr; 2101 if (off >= min && off <= min + sizeof(struct trapframe) - sizeof(int)) { 2102 tp = p->p_md.md_regs; 2103 frame_copy = *tp; 2104 *(int *)((char *)&frame_copy + (off - min)) = data; 2105 if (!EFL_SECURE(frame_copy.tf_eflags, tp->tf_eflags) || 2106 !CS_SECURE(frame_copy.tf_cs)) 2107 return (EINVAL); 2108 *(int*)((char *)p->p_addr + off) = data; 2109 return (0); 2110 } 2111 min = offsetof(struct user, u_pcb) + offsetof(struct pcb, pcb_savefpu); 2112 if (off >= min && off <= min + sizeof(struct save87) - sizeof(int)) { 2113 *(int*)((char *)p->p_addr + off) = data; 2114 return (0); 2115 } 2116 return (EFAULT); 2117} 2118 2119int 2120fill_regs(p, regs) 2121 struct proc *p; 2122 struct reg *regs; 2123{ 2124 struct pcb *pcb; 2125 struct trapframe *tp; 2126 2127 tp = p->p_md.md_regs; 2128 regs->r_fs = tp->tf_fs; 2129 regs->r_es = tp->tf_es; 2130 regs->r_ds = tp->tf_ds; 2131 regs->r_edi = tp->tf_edi; 2132 regs->r_esi = tp->tf_esi; 2133 regs->r_ebp = tp->tf_ebp; 2134 regs->r_ebx = tp->tf_ebx; 2135 regs->r_edx = tp->tf_edx; 2136 regs->r_ecx = tp->tf_ecx; 2137 regs->r_eax = tp->tf_eax; 2138 regs->r_eip = tp->tf_eip; 2139 regs->r_cs = tp->tf_cs; 2140 regs->r_eflags = tp->tf_eflags; 2141 regs->r_esp = tp->tf_esp; 2142 regs->r_ss = tp->tf_ss; 2143 pcb = &p->p_addr->u_pcb; 2144 regs->r_gs = pcb->pcb_gs; 2145 return (0); 2146} 2147 2148int 2149set_regs(p, regs) 2150 struct proc *p; 2151 struct reg *regs; 2152{ 2153 struct pcb *pcb; 2154 struct trapframe *tp; 2155 2156 tp = p->p_md.md_regs; 2157 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) || 2158 !CS_SECURE(regs->r_cs)) 2159 return (EINVAL); 2160 tp->tf_fs = regs->r_fs; 2161 tp->tf_es = regs->r_es; 2162 tp->tf_ds = regs->r_ds; 2163 tp->tf_edi = regs->r_edi; 2164 tp->tf_esi = regs->r_esi; 2165 tp->tf_ebp = regs->r_ebp; 2166 tp->tf_ebx = regs->r_ebx; 2167 tp->tf_edx = regs->r_edx; 2168 tp->tf_ecx = regs->r_ecx; 2169 tp->tf_eax = regs->r_eax; 2170 tp->tf_eip = regs->r_eip; 2171 tp->tf_cs = regs->r_cs; 2172 tp->tf_eflags = regs->r_eflags; 2173 tp->tf_esp = regs->r_esp; 2174 tp->tf_ss = regs->r_ss; 2175 pcb = &p->p_addr->u_pcb; 2176 pcb->pcb_gs = regs->r_gs; 2177 return (0); 2178} 2179 2180int 2181fill_fpregs(p, fpregs) 2182 struct proc *p; 2183 struct fpreg *fpregs; 2184{ 2185 bcopy(&p->p_addr->u_pcb.pcb_savefpu, fpregs, sizeof *fpregs); 2186 return (0); 2187} 2188 2189int 2190set_fpregs(p, fpregs) 2191 struct proc *p; 2192 struct fpreg *fpregs; 2193{ 2194 bcopy(fpregs, &p->p_addr->u_pcb.pcb_savefpu, sizeof *fpregs); 2195 return (0); 2196} 2197 2198int 2199fill_dbregs(p, dbregs) 2200 struct proc *p; 2201 struct dbreg *dbregs; 2202{ 2203 struct pcb *pcb; 2204 2205 pcb = &p->p_addr->u_pcb; 2206 dbregs->dr0 = pcb->pcb_dr0; 2207 dbregs->dr1 = pcb->pcb_dr1; 2208 dbregs->dr2 = pcb->pcb_dr2; 2209 dbregs->dr3 = pcb->pcb_dr3; 2210 dbregs->dr4 = 0; 2211 dbregs->dr5 = 0; 2212 dbregs->dr6 = pcb->pcb_dr6; 2213 dbregs->dr7 = pcb->pcb_dr7; 2214 return (0); 2215} 2216 2217int 2218set_dbregs(p, dbregs) 2219 struct proc *p; 2220 struct dbreg *dbregs; 2221{ 2222 struct pcb *pcb; 2223 2224 pcb = &p->p_addr->u_pcb; 2225 2226 /* 2227 * Don't let a process set a breakpoint that is not within the 2228 * process's address space. If a process could do this, it 2229 * could halt the system by setting a breakpoint in the kernel 2230 * (if ddb was enabled). Thus, we need to check to make sure 2231 * that no breakpoints are being enabled for addresses outside 2232 * process's address space, unless, perhaps, we were called by 2233 * uid 0. 2234 * 2235 * XXX - what about when the watched area of the user's 2236 * address space is written into from within the kernel 2237 * ... wouldn't that still cause a breakpoint to be generated 2238 * from within kernel mode? 2239 */ 2240 2241 if (p->p_ucred->cr_uid != 0) { 2242 if (dbregs->dr7 & 0x3) { 2243 /* dr0 is enabled */ 2244 if (dbregs->dr0 >= VM_MAXUSER_ADDRESS) 2245 return (EINVAL); 2246 } 2247 2248 if (dbregs->dr7 & (0x3<<2)) { 2249 /* dr1 is enabled */ 2250 if (dbregs->dr1 >= VM_MAXUSER_ADDRESS) 2251 return (EINVAL); 2252 } 2253 2254 if (dbregs->dr7 & (0x3<<4)) { 2255 /* dr2 is enabled */ 2256 if (dbregs->dr2 >= VM_MAXUSER_ADDRESS) 2257 return (EINVAL); 2258 } 2259 2260 if (dbregs->dr7 & (0x3<<6)) { 2261 /* dr3 is enabled */ 2262 if (dbregs->dr3 >= VM_MAXUSER_ADDRESS) 2263 return (EINVAL); 2264 } 2265 } 2266 2267 pcb->pcb_dr0 = dbregs->dr0; 2268 pcb->pcb_dr1 = dbregs->dr1; 2269 pcb->pcb_dr2 = dbregs->dr2; 2270 pcb->pcb_dr3 = dbregs->dr3; 2271 pcb->pcb_dr6 = dbregs->dr6; 2272 pcb->pcb_dr7 = dbregs->dr7; 2273 2274 pcb->pcb_flags |= PCB_DBREGS; 2275 2276 return (0); 2277} 2278 2279/* 2280 * Return > 0 if a hardware breakpoint has been hit, and the 2281 * breakpoint was in user space. Return 0, otherwise. 2282 */ 2283int 2284user_dbreg_trap(void) 2285{ 2286 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */ 2287 u_int32_t bp; /* breakpoint bits extracted from dr6 */ 2288 int nbp; /* number of breakpoints that triggered */ 2289 caddr_t addr[4]; /* breakpoint addresses */ 2290 int i; 2291 2292 dr7 = rdr7(); 2293 if ((dr7 & 0x000000ff) == 0) { 2294 /* 2295 * all GE and LE bits in the dr7 register are zero, 2296 * thus the trap couldn't have been caused by the 2297 * hardware debug registers 2298 */ 2299 return 0; 2300 } 2301 2302 nbp = 0; 2303 dr6 = rdr6(); 2304 bp = dr6 & 0x0000000f; 2305 2306 if (!bp) { 2307 /* 2308 * None of the breakpoint bits are set meaning this 2309 * trap was not caused by any of the debug registers 2310 */ 2311 return 0; 2312 } 2313 2314 /* 2315 * at least one of the breakpoints were hit, check to see 2316 * which ones and if any of them are user space addresses 2317 */ 2318 2319 if (bp & 0x01) { 2320 addr[nbp++] = (caddr_t)rdr0(); 2321 } 2322 if (bp & 0x02) { 2323 addr[nbp++] = (caddr_t)rdr1(); 2324 } 2325 if (bp & 0x04) { 2326 addr[nbp++] = (caddr_t)rdr2(); 2327 } 2328 if (bp & 0x08) { 2329 addr[nbp++] = (caddr_t)rdr3(); 2330 } 2331 2332 for (i=0; i<nbp; i++) { 2333 if (addr[i] < 2334 (caddr_t)VM_MAXUSER_ADDRESS) { 2335 /* 2336 * addr[i] is in user space 2337 */ 2338 return nbp; 2339 } 2340 } 2341 2342 /* 2343 * None of the breakpoints are in user space. 2344 */ 2345 return 0; 2346} 2347 2348 2349#ifndef DDB 2350void 2351Debugger(const char *msg) 2352{ 2353 printf("Debugger(\"%s\") called.\n", msg); 2354} 2355#endif /* no DDB */ 2356 2357#include <sys/disklabel.h> 2358 2359/* 2360 * Determine the size of the transfer, and make sure it is 2361 * within the boundaries of the partition. Adjust transfer 2362 * if needed, and signal errors or early completion. 2363 */ 2364int 2365bounds_check_with_label(struct bio *bp, struct disklabel *lp, int wlabel) 2366{ 2367 struct partition *p = lp->d_partitions + dkpart(bp->bio_dev); 2368 int labelsect = lp->d_partitions[0].p_offset; 2369 int maxsz = p->p_size, 2370 sz = (bp->bio_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT; 2371 2372 /* overwriting disk label ? */ 2373 /* XXX should also protect bootstrap in first 8K */ 2374 if (bp->bio_blkno + p->p_offset <= LABELSECTOR + labelsect && 2375#if LABELSECTOR != 0 2376 bp->bio_blkno + p->p_offset + sz > LABELSECTOR + labelsect && 2377#endif 2378 (bp->bio_cmd == BIO_WRITE) && wlabel == 0) { 2379 bp->bio_error = EROFS; 2380 goto bad; 2381 } 2382 2383#if defined(DOSBBSECTOR) && defined(notyet) 2384 /* overwriting master boot record? */ 2385 if (bp->bio_blkno + p->p_offset <= DOSBBSECTOR && 2386 (bp->bio_cmd == BIO_WRITE) && wlabel == 0) { 2387 bp->bio_error = EROFS; 2388 goto bad; 2389 } 2390#endif 2391 2392 /* beyond partition? */ 2393 if (bp->bio_blkno < 0 || bp->bio_blkno + sz > maxsz) { 2394 /* if exactly at end of disk, return an EOF */ 2395 if (bp->bio_blkno == maxsz) { 2396 bp->bio_resid = bp->bio_bcount; 2397 return(0); 2398 } 2399 /* or truncate if part of it fits */ 2400 sz = maxsz - bp->bio_blkno; 2401 if (sz <= 0) { 2402 bp->bio_error = EINVAL; 2403 goto bad; 2404 } 2405 bp->bio_bcount = sz << DEV_BSHIFT; 2406 } 2407 2408 bp->bio_pblkno = bp->bio_blkno + p->p_offset; 2409 return(1); 2410 2411bad: 2412 bp->bio_flags |= BIO_ERROR; 2413 return(-1); 2414} 2415 2416#ifdef DDB 2417 2418/* 2419 * Provide inb() and outb() as functions. They are normally only 2420 * available as macros calling inlined functions, thus cannot be 2421 * called inside DDB. 2422 * 2423 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 2424 */ 2425 2426#undef inb 2427#undef outb 2428 2429/* silence compiler warnings */ 2430u_char inb(u_int); 2431void outb(u_int, u_char); 2432 2433u_char 2434inb(u_int port) 2435{ 2436 u_char data; 2437 /* 2438 * We use %%dx and not %1 here because i/o is done at %dx and not at 2439 * %edx, while gcc generates inferior code (movw instead of movl) 2440 * if we tell it to load (u_short) port. 2441 */ 2442 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 2443 return (data); 2444} 2445 2446void 2447outb(u_int port, u_char data) 2448{ 2449 u_char al; 2450 /* 2451 * Use an unnecessary assignment to help gcc's register allocator. 2452 * This make a large difference for gcc-1.40 and a tiny difference 2453 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 2454 * best results. gcc-2.6.0 can't handle this. 2455 */ 2456 al = data; 2457 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 2458} 2459 2460#endif /* DDB */ 2461