machdep.c revision 62454
1/*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 38 * $FreeBSD: head/sys/amd64/amd64/machdep.c 62454 2000-07-03 09:35:31Z phk $ 39 */ 40 41#include "apm.h" 42#include "npx.h" 43#include "opt_atalk.h" 44#include "opt_compat.h" 45#include "opt_cpu.h" 46#include "opt_ddb.h" 47#include "opt_inet.h" 48#include "opt_ipx.h" 49#include "opt_maxmem.h" 50#include "opt_msgbuf.h" 51#include "opt_perfmon.h" 52#include "opt_smp.h" 53#include "opt_user_ldt.h" 54#include "opt_userconfig.h" 55 56#include <sys/param.h> 57#include <sys/systm.h> 58#include <sys/sysproto.h> 59#include <sys/signalvar.h> 60#include <sys/kernel.h> 61#include <sys/linker.h> 62#include <sys/malloc.h> 63#include <sys/proc.h> 64#include <sys/bio.h> 65#include <sys/buf.h> 66#include <sys/reboot.h> 67#include <sys/callout.h> 68#include <sys/mbuf.h> 69#include <sys/msgbuf.h> 70#include <sys/sysent.h> 71#include <sys/sysctl.h> 72#include <sys/vmmeter.h> 73#include <sys/bus.h> 74 75#include <vm/vm.h> 76#include <vm/vm_param.h> 77#include <sys/lock.h> 78#include <vm/vm_kern.h> 79#include <vm/vm_object.h> 80#include <vm/vm_page.h> 81#include <vm/vm_map.h> 82#include <vm/vm_pager.h> 83#include <vm/vm_extern.h> 84 85#include <sys/user.h> 86#include <sys/exec.h> 87#include <sys/cons.h> 88 89#include <ddb/ddb.h> 90 91#include <net/netisr.h> 92 93#include <machine/cpu.h> 94#include <machine/reg.h> 95#include <machine/clock.h> 96#include <machine/specialreg.h> 97#include <machine/bootinfo.h> 98#include <machine/ipl.h> 99#include <machine/md_var.h> 100#include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */ 101#ifdef SMP 102#include <machine/smp.h> 103#include <machine/globaldata.h> 104#endif 105#ifdef PERFMON 106#include <machine/perfmon.h> 107#endif 108 109#ifdef OLD_BUS_ARCH 110#include <i386/isa/isa_device.h> 111#endif 112#include <i386/isa/intr_machdep.h> 113#include <isa/rtc.h> 114#include <machine/vm86.h> 115#include <sys/ptrace.h> 116#include <machine/sigframe.h> 117 118extern void init386 __P((int first)); 119extern void dblfault_handler __P((void)); 120 121extern void printcpuinfo(void); /* XXX header file */ 122extern void earlysetcpuclass(void); /* same header file */ 123extern void finishidentcpu(void); 124extern void panicifcpuunsupported(void); 125extern void initializecpu(void); 126 127#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 128#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 129 130static void cpu_startup __P((void *)); 131SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) 132 133static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf"); 134 135int _udatasel, _ucodesel; 136u_int atdevbase; 137 138#if defined(SWTCH_OPTIM_STATS) 139extern int swtch_optim_stats; 140SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, 141 CTLFLAG_RD, &swtch_optim_stats, 0, ""); 142SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, 143 CTLFLAG_RD, &tlb_flush_count, 0, ""); 144#endif 145 146#ifdef PC98 147static int ispc98 = 1; 148#else 149static int ispc98 = 0; 150#endif 151SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, ""); 152 153int physmem = 0; 154int cold = 1; 155 156static void osendsig __P((sig_t catcher, int sig, sigset_t *mask, u_long code)); 157 158static int 159sysctl_hw_physmem (SYSCTL_HANDLER_ARGS) 160{ 161 int error = sysctl_handle_int(oidp, 0, ctob(physmem), req); 162 return (error); 163} 164 165SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD, 166 0, 0, sysctl_hw_physmem, "I", ""); 167 168static int 169sysctl_hw_usermem (SYSCTL_HANDLER_ARGS) 170{ 171 int error = sysctl_handle_int(oidp, 0, 172 ctob(physmem - cnt.v_wire_count), req); 173 return (error); 174} 175 176SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD, 177 0, 0, sysctl_hw_usermem, "I", ""); 178 179static int 180sysctl_hw_availpages (SYSCTL_HANDLER_ARGS) 181{ 182 int error = sysctl_handle_int(oidp, 0, 183 i386_btop(avail_end - avail_start), req); 184 return (error); 185} 186 187SYSCTL_PROC(_hw, OID_AUTO, availpages, CTLTYPE_INT|CTLFLAG_RD, 188 0, 0, sysctl_hw_availpages, "I", ""); 189 190static int 191sysctl_machdep_msgbuf (SYSCTL_HANDLER_ARGS) 192{ 193 int error; 194 195 /* Unwind the buffer, so that it's linear (possibly starting with 196 * some initial nulls). 197 */ 198 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr+msgbufp->msg_bufr, 199 msgbufp->msg_size-msgbufp->msg_bufr,req); 200 if(error) return(error); 201 if(msgbufp->msg_bufr>0) { 202 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr, 203 msgbufp->msg_bufr,req); 204 } 205 return(error); 206} 207 208SYSCTL_PROC(_machdep, OID_AUTO, msgbuf, CTLTYPE_STRING|CTLFLAG_RD, 209 0, 0, sysctl_machdep_msgbuf, "A","Contents of kernel message buffer"); 210 211static int msgbuf_clear; 212 213static int 214sysctl_machdep_msgbuf_clear (SYSCTL_HANDLER_ARGS) 215{ 216 int error; 217 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 218 req); 219 if (!error && req->newptr) { 220 /* Clear the buffer and reset write pointer */ 221 bzero(msgbufp->msg_ptr,msgbufp->msg_size); 222 msgbufp->msg_bufr=msgbufp->msg_bufx=0; 223 msgbuf_clear=0; 224 } 225 return (error); 226} 227 228SYSCTL_PROC(_machdep, OID_AUTO, msgbuf_clear, CTLTYPE_INT|CTLFLAG_RW, 229 &msgbuf_clear, 0, sysctl_machdep_msgbuf_clear, "I", 230 "Clear kernel message buffer"); 231 232int bootverbose = 0, Maxmem = 0; 233long dumplo; 234 235vm_offset_t phys_avail[10]; 236 237/* must be 2 less so 0 0 can signal end of chunks */ 238#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2) 239 240static vm_offset_t buffer_sva, buffer_eva; 241vm_offset_t clean_sva, clean_eva; 242static vm_offset_t pager_sva, pager_eva; 243 244#define offsetof(type, member) ((size_t)(&((type *)0)->member)) 245 246static void 247cpu_startup(dummy) 248 void *dummy; 249{ 250 register unsigned i; 251 register caddr_t v; 252 vm_offset_t maxaddr; 253 vm_size_t size = 0; 254 int firstaddr; 255 vm_offset_t minaddr; 256 257 if (boothowto & RB_VERBOSE) 258 bootverbose++; 259 260 /* 261 * Good {morning,afternoon,evening,night}. 262 */ 263 printf(version); 264 earlysetcpuclass(); 265 startrtclock(); 266 printcpuinfo(); 267 panicifcpuunsupported(); 268#ifdef PERFMON 269 perfmon_init(); 270#endif 271 printf("real memory = %u (%uK bytes)\n", ptoa(Maxmem), ptoa(Maxmem) / 1024); 272 /* 273 * Display any holes after the first chunk of extended memory. 274 */ 275 if (bootverbose) { 276 int indx; 277 278 printf("Physical memory chunk(s):\n"); 279 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 280 int size1 = phys_avail[indx + 1] - phys_avail[indx]; 281 282 printf("0x%08x - 0x%08x, %u bytes (%u pages)\n", 283 phys_avail[indx], phys_avail[indx + 1] - 1, size1, 284 size1 / PAGE_SIZE); 285 } 286 } 287 288 /* 289 * Calculate callout wheel size 290 */ 291 for (callwheelsize = 1, callwheelbits = 0; 292 callwheelsize < ncallout; 293 callwheelsize <<= 1, ++callwheelbits) 294 ; 295 callwheelmask = callwheelsize - 1; 296 297 /* 298 * Allocate space for system data structures. 299 * The first available kernel virtual address is in "v". 300 * As pages of kernel virtual memory are allocated, "v" is incremented. 301 * As pages of memory are allocated and cleared, 302 * "firstaddr" is incremented. 303 * An index into the kernel page table corresponding to the 304 * virtual memory address maintained in "v" is kept in "mapaddr". 305 */ 306 307 /* 308 * Make two passes. The first pass calculates how much memory is 309 * needed and allocates it. The second pass assigns virtual 310 * addresses to the various data structures. 311 */ 312 firstaddr = 0; 313again: 314 v = (caddr_t)firstaddr; 315 316#define valloc(name, type, num) \ 317 (name) = (type *)v; v = (caddr_t)((name)+(num)) 318#define valloclim(name, type, num, lim) \ 319 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num))) 320 321 valloc(callout, struct callout, ncallout); 322 valloc(callwheel, struct callout_tailq, callwheelsize); 323 324 /* 325 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE. 326 * For the first 64MB of ram nominally allocate sufficient buffers to 327 * cover 1/4 of our ram. Beyond the first 64MB allocate additional 328 * buffers to cover 1/20 of our ram over 64MB. 329 * 330 * factor represents the 1/4 x ram conversion. 331 */ 332 if (nbuf == 0) { 333 int factor = 4 * BKVASIZE / PAGE_SIZE; 334 335 nbuf = 50; 336 if (physmem > 1024) 337 nbuf += min((physmem - 1024) / factor, 16384 / factor); 338 if (physmem > 16384) 339 nbuf += (physmem - 16384) * 2 / (factor * 5); 340 } 341 342 /* 343 * Do not allow the buffer_map to be more then 1/2 the size of the 344 * kernel_map. 345 */ 346 if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) / 347 (BKVASIZE * 2)) { 348 nbuf = (kernel_map->max_offset - kernel_map->min_offset) / 349 (BKVASIZE * 2); 350 printf("Warning: nbufs capped at %d\n", nbuf); 351 } 352 353 nswbuf = max(min(nbuf/4, 256), 16); 354 355 valloc(swbuf, struct buf, nswbuf); 356 valloc(buf, struct buf, nbuf); 357 v = bufhashinit(v); 358 359 /* 360 * End of first pass, size has been calculated so allocate memory 361 */ 362 if (firstaddr == 0) { 363 size = (vm_size_t)(v - firstaddr); 364 firstaddr = (int)kmem_alloc(kernel_map, round_page(size)); 365 if (firstaddr == 0) 366 panic("startup: no room for tables"); 367 goto again; 368 } 369 370 /* 371 * End of second pass, addresses have been assigned 372 */ 373 if ((vm_size_t)(v - firstaddr) != size) 374 panic("startup: table size inconsistency"); 375 376 clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva, 377 (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size); 378 buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva, 379 (nbuf*BKVASIZE)); 380 pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva, 381 (nswbuf*MAXPHYS) + pager_map_size); 382 pager_map->system_map = 1; 383 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 384 (16*(ARG_MAX+(PAGE_SIZE*3)))); 385 386 /* 387 * Finally, allocate mbuf pool. Since mclrefcnt is an off-size 388 * we use the more space efficient malloc in place of kmem_alloc. 389 */ 390 { 391 vm_offset_t mb_map_size; 392 393 mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES; 394 mb_map_size = roundup2(mb_map_size, max(MCLBYTES, PAGE_SIZE)); 395 mclrefcnt = malloc(mb_map_size / MCLBYTES, M_MBUF, M_NOWAIT); 396 bzero(mclrefcnt, mb_map_size / MCLBYTES); 397 mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr, 398 mb_map_size); 399 mb_map->system_map = 1; 400 } 401 402 /* 403 * Initialize callouts 404 */ 405 SLIST_INIT(&callfree); 406 for (i = 0; i < ncallout; i++) { 407 callout_init(&callout[i]); 408 callout[i].c_flags = CALLOUT_LOCAL_ALLOC; 409 SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); 410 } 411 412 for (i = 0; i < callwheelsize; i++) { 413 TAILQ_INIT(&callwheel[i]); 414 } 415 416#if defined(USERCONFIG) 417 userconfig(); 418 cninit(); /* the preferred console may have changed */ 419#endif 420 421 printf("avail memory = %u (%uK bytes)\n", ptoa(cnt.v_free_count), 422 ptoa(cnt.v_free_count) / 1024); 423 424 /* 425 * Set up buffers, so they can be used to read disk labels. 426 */ 427 bufinit(); 428 vm_pager_bufferinit(); 429 430#ifdef SMP 431 /* 432 * OK, enough kmem_alloc/malloc state should be up, lets get on with it! 433 */ 434 mp_start(); /* fire up the APs and APICs */ 435 mp_announce(); 436#endif /* SMP */ 437} 438 439int 440register_netisr(num, handler) 441 int num; 442 netisr_t *handler; 443{ 444 445 if (num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs)) ) { 446 printf("register_netisr: bad isr number: %d\n", num); 447 return (EINVAL); 448 } 449 netisrs[num] = handler; 450 return (0); 451} 452 453int 454unregister_netisr(num) 455 int num; 456{ 457 458 if (num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs)) ) { 459 printf("unregister_netisr: bad isr number: %d\n", num); 460 return (EINVAL); 461 } 462 netisrs[num] = NULL; 463 return (0); 464} 465 466/* 467 * Send an interrupt to process. 468 * 469 * Stack is set up to allow sigcode stored 470 * at top to call routine, followed by kcall 471 * to sigreturn routine below. After sigreturn 472 * resets the signal mask, the stack, and the 473 * frame pointer, it returns to the user 474 * specified pc, psl. 475 */ 476static void 477osendsig(catcher, sig, mask, code) 478 sig_t catcher; 479 int sig; 480 sigset_t *mask; 481 u_long code; 482{ 483 struct osigframe sf; 484 struct osigframe *fp; 485 struct proc *p; 486 struct sigacts *psp; 487 struct trapframe *regs; 488 int oonstack; 489 490 p = curproc; 491 psp = p->p_sigacts; 492 regs = p->p_md.md_regs; 493 oonstack = p->p_sigstk.ss_flags & SS_ONSTACK; 494 495 /* Allocate and validate space for the signal handler context. */ 496 if ((p->p_flag & P_ALTSTACK) && !oonstack && 497 SIGISMEMBER(psp->ps_sigonstack, sig)) { 498 fp = (struct osigframe *)(p->p_sigstk.ss_sp + 499 p->p_sigstk.ss_size - sizeof(struct osigframe)); 500 p->p_sigstk.ss_flags |= SS_ONSTACK; 501 } else 502 fp = (struct osigframe *)regs->tf_esp - 1; 503 504 /* 505 * grow_stack() will return 0 if *fp does not fit inside the stack 506 * and the stack can not be grown. 507 * useracc() will return FALSE if access is denied. 508 */ 509 if (grow_stack(p, (int)fp) == 0 || 510 !useracc((caddr_t)fp, sizeof(*fp), VM_PROT_WRITE)) { 511 /* 512 * Process has trashed its stack; give it an illegal 513 * instruction to halt it in its tracks. 514 */ 515 SIGACTION(p, SIGILL) = SIG_DFL; 516 SIGDELSET(p->p_sigignore, SIGILL); 517 SIGDELSET(p->p_sigcatch, SIGILL); 518 SIGDELSET(p->p_sigmask, SIGILL); 519 psignal(p, SIGILL); 520 return; 521 } 522 523 /* Translate the signal if appropriate. */ 524 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 525 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 526 527 /* Build the argument list for the signal handler. */ 528 sf.sf_signum = sig; 529 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc; 530 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 531 /* Signal handler installed with SA_SIGINFO. */ 532 sf.sf_arg2 = (register_t)&fp->sf_siginfo; 533 sf.sf_siginfo.si_signo = sig; 534 sf.sf_siginfo.si_code = code; 535 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher; 536 } else { 537 /* Old FreeBSD-style arguments. */ 538 sf.sf_arg2 = code; 539 sf.sf_addr = regs->tf_err; 540 sf.sf_ahu.sf_handler = catcher; 541 } 542 543 /* Save most if not all of trap frame. */ 544 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax; 545 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx; 546 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx; 547 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx; 548 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi; 549 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi; 550 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs; 551 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds; 552 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss; 553 sf.sf_siginfo.si_sc.sc_es = regs->tf_es; 554 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs; 555 sf.sf_siginfo.si_sc.sc_gs = rgs(); 556 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp; 557 558 /* Build the signal context to be used by osigreturn(). */ 559 sf.sf_siginfo.si_sc.sc_onstack = oonstack; 560 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask); 561 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp; 562 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp; 563 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip; 564 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags; 565 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno; 566 sf.sf_siginfo.si_sc.sc_err = regs->tf_err; 567 568 /* 569 * If we're a vm86 process, we want to save the segment registers. 570 * We also change eflags to be our emulated eflags, not the actual 571 * eflags. 572 */ 573 if (regs->tf_eflags & PSL_VM) { 574 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */ 575 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 576 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 577 578 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs; 579 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs; 580 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es; 581 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds; 582 583 if (vm86->vm86_has_vme == 0) 584 sf.sf_siginfo.si_sc.sc_ps = 585 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 586 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 587 588 /* See sendsig() for comments. */ 589 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP); 590 } 591 592 /* Copy the sigframe out to the user's stack. */ 593 if (copyout(&sf, fp, sizeof(*fp)) != 0) { 594 /* 595 * Something is wrong with the stack pointer. 596 * ...Kill the process. 597 */ 598 sigexit(p, SIGILL); 599 } 600 601 regs->tf_esp = (int)fp; 602 regs->tf_eip = PS_STRINGS - szosigcode; 603 regs->tf_cs = _ucodesel; 604 regs->tf_ds = _udatasel; 605 regs->tf_es = _udatasel; 606 regs->tf_fs = _udatasel; 607 load_gs(_udatasel); 608 regs->tf_ss = _udatasel; 609} 610 611void 612sendsig(catcher, sig, mask, code) 613 sig_t catcher; 614 int sig; 615 sigset_t *mask; 616 u_long code; 617{ 618 struct sigframe sf; 619 struct proc *p; 620 struct sigacts *psp; 621 struct trapframe *regs; 622 struct sigframe *sfp; 623 int oonstack; 624 625 p = curproc; 626 psp = p->p_sigacts; 627 if (SIGISMEMBER(psp->ps_osigset, sig)) { 628 osendsig(catcher, sig, mask, code); 629 return; 630 } 631 regs = p->p_md.md_regs; 632 oonstack = p->p_sigstk.ss_flags & SS_ONSTACK; 633 634 /* Save user context. */ 635 bzero(&sf, sizeof(sf)); 636 sf.sf_uc.uc_sigmask = *mask; 637 sf.sf_uc.uc_stack = p->p_sigstk; 638 sf.sf_uc.uc_mcontext.mc_onstack = oonstack; 639 sf.sf_uc.uc_mcontext.mc_gs = rgs(); 640 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs)); 641 642 /* Allocate and validate space for the signal handler context. */ 643 if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack && 644 SIGISMEMBER(psp->ps_sigonstack, sig)) { 645 sfp = (struct sigframe *)(p->p_sigstk.ss_sp + 646 p->p_sigstk.ss_size - sizeof(struct sigframe)); 647 p->p_sigstk.ss_flags |= SS_ONSTACK; 648 } else 649 sfp = (struct sigframe *)regs->tf_esp - 1; 650 651 /* 652 * grow_stack() will return 0 if *sfp does not fit inside the stack 653 * and the stack can not be grown. 654 * useracc() will return FALSE if access is denied. 655 */ 656 if (grow_stack(p, (int)sfp) == 0 || 657 !useracc((caddr_t)sfp, sizeof(*sfp), VM_PROT_WRITE)) { 658 /* 659 * Process has trashed its stack; give it an illegal 660 * instruction to halt it in its tracks. 661 */ 662#ifdef DEBUG 663 printf("process %d has trashed its stack\n", p->p_pid); 664#endif 665 SIGACTION(p, SIGILL) = SIG_DFL; 666 SIGDELSET(p->p_sigignore, SIGILL); 667 SIGDELSET(p->p_sigcatch, SIGILL); 668 SIGDELSET(p->p_sigmask, SIGILL); 669 psignal(p, SIGILL); 670 return; 671 } 672 673 /* Translate the signal if appropriate. */ 674 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 675 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 676 677 /* Build the argument list for the signal handler. */ 678 sf.sf_signum = sig; 679 sf.sf_ucontext = (register_t)&sfp->sf_uc; 680 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 681 /* Signal handler installed with SA_SIGINFO. */ 682 sf.sf_siginfo = (register_t)&sfp->sf_si; 683 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 684 685 /* Fill siginfo structure. */ 686 sf.sf_si.si_signo = sig; 687 sf.sf_si.si_code = code; 688 sf.sf_si.si_addr = (void *)regs->tf_err; 689 } else { 690 /* Old FreeBSD-style arguments. */ 691 sf.sf_siginfo = code; 692 sf.sf_addr = regs->tf_err; 693 sf.sf_ahu.sf_handler = catcher; 694 } 695 696 /* 697 * If we're a vm86 process, we want to save the segment registers. 698 * We also change eflags to be our emulated eflags, not the actual 699 * eflags. 700 */ 701 if (regs->tf_eflags & PSL_VM) { 702 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 703 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 704 705 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 706 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 707 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 708 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 709 710 if (vm86->vm86_has_vme == 0) 711 sf.sf_uc.uc_mcontext.mc_eflags = 712 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 713 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 714 715 /* 716 * We should never have PSL_T set when returning from vm86 717 * mode. It may be set here if we deliver a signal before 718 * getting to vm86 mode, so turn it off. 719 * 720 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 721 * syscalls made by the signal handler. This just avoids 722 * wasting time for our lazy fixup of such faults. PSL_NT 723 * does nothing in vm86 mode, but vm86 programs can set it 724 * almost legitimately in probes for old cpu types. 725 */ 726 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP); 727 } 728 729 /* Copy the sigframe out to the user's stack. */ 730 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { 731 /* 732 * Something is wrong with the stack pointer. 733 * ...Kill the process. 734 */ 735 sigexit(p, SIGILL); 736 } 737 738 regs->tf_esp = (int)sfp; 739 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 740 regs->tf_cs = _ucodesel; 741 regs->tf_ds = _udatasel; 742 regs->tf_es = _udatasel; 743 regs->tf_fs = _udatasel; 744 load_gs(_udatasel); 745 regs->tf_ss = _udatasel; 746} 747 748/* 749 * System call to cleanup state after a signal 750 * has been taken. Reset signal mask and 751 * stack state from context left by sendsig (above). 752 * Return to previous pc and psl as specified by 753 * context left by sendsig. Check carefully to 754 * make sure that the user has not modified the 755 * state to gain improper privileges. 756 */ 757int 758osigreturn(p, uap) 759 struct proc *p; 760 struct osigreturn_args /* { 761 struct osigcontext *sigcntxp; 762 } */ *uap; 763{ 764 struct trapframe *regs; 765 struct osigcontext *scp; 766 int eflags; 767 768 regs = p->p_md.md_regs; 769 scp = uap->sigcntxp; 770 if (!useracc((caddr_t)scp, sizeof(*scp), VM_PROT_READ)) 771 return (EFAULT); 772 eflags = scp->sc_ps; 773 if (eflags & PSL_VM) { 774 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 775 struct vm86_kernel *vm86; 776 777 /* 778 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 779 * set up the vm86 area, and we can't enter vm86 mode. 780 */ 781 if (p->p_addr->u_pcb.pcb_ext == 0) 782 return (EINVAL); 783 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 784 if (vm86->vm86_inited == 0) 785 return (EINVAL); 786 787 /* Go back to user mode if both flags are set. */ 788 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 789 trapsignal(p, SIGBUS, 0); 790 791 if (vm86->vm86_has_vme) { 792 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 793 (eflags & VME_USERCHANGE) | PSL_VM; 794 } else { 795 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 796 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 797 } 798 tf->tf_vm86_ds = scp->sc_ds; 799 tf->tf_vm86_es = scp->sc_es; 800 tf->tf_vm86_fs = scp->sc_fs; 801 tf->tf_vm86_gs = scp->sc_gs; 802 tf->tf_ds = _udatasel; 803 tf->tf_es = _udatasel; 804 tf->tf_fs = _udatasel; 805 } else { 806 /* 807 * Don't allow users to change privileged or reserved flags. 808 */ 809 /* 810 * XXX do allow users to change the privileged flag PSL_RF. 811 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 812 * should sometimes set it there too. tf_eflags is kept in 813 * the signal context during signal handling and there is no 814 * other place to remember it, so the PSL_RF bit may be 815 * corrupted by the signal handler without us knowing. 816 * Corruption of the PSL_RF bit at worst causes one more or 817 * one less debugger trap, so allowing it is fairly harmless. 818 */ 819 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 820 return (EINVAL); 821 } 822 823 /* 824 * Don't allow users to load a valid privileged %cs. Let the 825 * hardware check for invalid selectors, excess privilege in 826 * other selectors, invalid %eip's and invalid %esp's. 827 */ 828 if (!CS_SECURE(scp->sc_cs)) { 829 trapsignal(p, SIGBUS, T_PROTFLT); 830 return (EINVAL); 831 } 832 regs->tf_ds = scp->sc_ds; 833 regs->tf_es = scp->sc_es; 834 regs->tf_fs = scp->sc_fs; 835 } 836 837 /* Restore remaining registers. */ 838 regs->tf_eax = scp->sc_eax; 839 regs->tf_ebx = scp->sc_ebx; 840 regs->tf_ecx = scp->sc_ecx; 841 regs->tf_edx = scp->sc_edx; 842 regs->tf_esi = scp->sc_esi; 843 regs->tf_edi = scp->sc_edi; 844 regs->tf_cs = scp->sc_cs; 845 regs->tf_ss = scp->sc_ss; 846 regs->tf_isp = scp->sc_isp; 847 848 if (scp->sc_onstack & 01) 849 p->p_sigstk.ss_flags |= SS_ONSTACK; 850 else 851 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 852 SIGSETOLD(p->p_sigmask, scp->sc_mask); 853 SIG_CANTMASK(p->p_sigmask); 854 regs->tf_ebp = scp->sc_fp; 855 regs->tf_esp = scp->sc_sp; 856 regs->tf_eip = scp->sc_pc; 857 regs->tf_eflags = eflags; 858 return (EJUSTRETURN); 859} 860 861int 862sigreturn(p, uap) 863 struct proc *p; 864 struct sigreturn_args /* { 865 ucontext_t *sigcntxp; 866 } */ *uap; 867{ 868 struct trapframe *regs; 869 ucontext_t *ucp; 870 int cs, eflags; 871 872 ucp = uap->sigcntxp; 873 if (!useracc((caddr_t)ucp, sizeof(struct osigcontext), VM_PROT_READ)) 874 return (EFAULT); 875 if (((struct osigcontext *)ucp)->sc_trapno == 0x01d516) 876 return (osigreturn(p, (struct osigreturn_args *)uap)); 877 878 /* 879 * Since ucp is not an osigcontext but a ucontext_t, we have to 880 * check again if all of it is accessible. A ucontext_t is 881 * much larger, so instead of just checking for the pointer 882 * being valid for the size of an osigcontext, now check for 883 * it being valid for a whole, new-style ucontext_t. 884 */ 885 if (!useracc((caddr_t)ucp, sizeof(*ucp), VM_PROT_READ)) 886 return (EFAULT); 887 888 regs = p->p_md.md_regs; 889 eflags = ucp->uc_mcontext.mc_eflags; 890 if (eflags & PSL_VM) { 891 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 892 struct vm86_kernel *vm86; 893 894 /* 895 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 896 * set up the vm86 area, and we can't enter vm86 mode. 897 */ 898 if (p->p_addr->u_pcb.pcb_ext == 0) 899 return (EINVAL); 900 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 901 if (vm86->vm86_inited == 0) 902 return (EINVAL); 903 904 /* Go back to user mode if both flags are set. */ 905 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 906 trapsignal(p, SIGBUS, 0); 907 908 if (vm86->vm86_has_vme) { 909 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 910 (eflags & VME_USERCHANGE) | PSL_VM; 911 } else { 912 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 913 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 914 } 915 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe)); 916 tf->tf_eflags = eflags; 917 tf->tf_vm86_ds = tf->tf_ds; 918 tf->tf_vm86_es = tf->tf_es; 919 tf->tf_vm86_fs = tf->tf_fs; 920 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs; 921 tf->tf_ds = _udatasel; 922 tf->tf_es = _udatasel; 923 tf->tf_fs = _udatasel; 924 } else { 925 /* 926 * Don't allow users to change privileged or reserved flags. 927 */ 928 /* 929 * XXX do allow users to change the privileged flag PSL_RF. 930 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 931 * should sometimes set it there too. tf_eflags is kept in 932 * the signal context during signal handling and there is no 933 * other place to remember it, so the PSL_RF bit may be 934 * corrupted by the signal handler without us knowing. 935 * Corruption of the PSL_RF bit at worst causes one more or 936 * one less debugger trap, so allowing it is fairly harmless. 937 */ 938 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 939 printf("sigreturn: eflags = 0x%x\n", eflags); 940 return (EINVAL); 941 } 942 943 /* 944 * Don't allow users to load a valid privileged %cs. Let the 945 * hardware check for invalid selectors, excess privilege in 946 * other selectors, invalid %eip's and invalid %esp's. 947 */ 948 cs = ucp->uc_mcontext.mc_cs; 949 if (!CS_SECURE(cs)) { 950 printf("sigreturn: cs = 0x%x\n", cs); 951 trapsignal(p, SIGBUS, T_PROTFLT); 952 return (EINVAL); 953 } 954 955 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs)); 956 } 957 if (ucp->uc_mcontext.mc_onstack & 1) 958 p->p_sigstk.ss_flags |= SS_ONSTACK; 959 else 960 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 961 962 p->p_sigmask = ucp->uc_sigmask; 963 SIG_CANTMASK(p->p_sigmask); 964 return (EJUSTRETURN); 965} 966 967/* 968 * Machine dependent boot() routine 969 * 970 * I haven't seen anything to put here yet 971 * Possibly some stuff might be grafted back here from boot() 972 */ 973void 974cpu_boot(int howto) 975{ 976} 977 978/* 979 * Shutdown the CPU as much as possible 980 */ 981void 982cpu_halt(void) 983{ 984 for (;;) 985 __asm__ ("hlt"); 986} 987 988/* 989 * Clear registers on exec 990 */ 991void 992setregs(p, entry, stack, ps_strings) 993 struct proc *p; 994 u_long entry; 995 u_long stack; 996 u_long ps_strings; 997{ 998 struct trapframe *regs = p->p_md.md_regs; 999 struct pcb *pcb = &p->p_addr->u_pcb; 1000 1001#ifdef USER_LDT 1002 /* was i386_user_cleanup() in NetBSD */ 1003 user_ldt_free(pcb); 1004#endif 1005 1006 bzero((char *)regs, sizeof(struct trapframe)); 1007 regs->tf_eip = entry; 1008 regs->tf_esp = stack; 1009 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T); 1010 regs->tf_ss = _udatasel; 1011 regs->tf_ds = _udatasel; 1012 regs->tf_es = _udatasel; 1013 regs->tf_fs = _udatasel; 1014 regs->tf_cs = _ucodesel; 1015 1016 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */ 1017 regs->tf_ebx = ps_strings; 1018 1019 /* reset %gs as well */ 1020 if (pcb == curpcb) 1021 load_gs(_udatasel); 1022 else 1023 pcb->pcb_gs = _udatasel; 1024 1025 /* 1026 * Reset the hardware debug registers if they were in use. 1027 * They won't have any meaning for the newly exec'd process. 1028 */ 1029 if (pcb->pcb_flags & PCB_DBREGS) { 1030 pcb->pcb_dr0 = 0; 1031 pcb->pcb_dr1 = 0; 1032 pcb->pcb_dr2 = 0; 1033 pcb->pcb_dr3 = 0; 1034 pcb->pcb_dr6 = 0; 1035 pcb->pcb_dr7 = 0; 1036 if (pcb == curpcb) { 1037 /* 1038 * Clear the debug registers on the running 1039 * CPU, otherwise they will end up affecting 1040 * the next process we switch to. 1041 */ 1042 reset_dbregs(); 1043 } 1044 pcb->pcb_flags &= ~PCB_DBREGS; 1045 } 1046 1047 /* 1048 * Initialize the math emulator (if any) for the current process. 1049 * Actually, just clear the bit that says that the emulator has 1050 * been initialized. Initialization is delayed until the process 1051 * traps to the emulator (if it is done at all) mainly because 1052 * emulators don't provide an entry point for initialization. 1053 */ 1054 p->p_addr->u_pcb.pcb_flags &= ~FP_SOFTFP; 1055 1056 /* 1057 * Arrange to trap the next npx or `fwait' instruction (see npx.c 1058 * for why fwait must be trapped at least if there is an npx or an 1059 * emulator). This is mainly to handle the case where npx0 is not 1060 * configured, since the npx routines normally set up the trap 1061 * otherwise. It should be done only at boot time, but doing it 1062 * here allows modifying `npx_exists' for testing the emulator on 1063 * systems with an npx. 1064 */ 1065 load_cr0(rcr0() | CR0_MP | CR0_TS); 1066 1067#if NNPX > 0 1068 /* Initialize the npx (if any) for the current process. */ 1069 npxinit(__INITIAL_NPXCW__); 1070#endif 1071 1072 /* 1073 * XXX - Linux emulator 1074 * Make sure sure edx is 0x0 on entry. Linux binaries depend 1075 * on it. 1076 */ 1077 p->p_retval[1] = 0; 1078} 1079 1080static int 1081sysctl_machdep_adjkerntz (SYSCTL_HANDLER_ARGS) 1082{ 1083 int error; 1084 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 1085 req); 1086 if (!error && req->newptr) 1087 resettodr(); 1088 return (error); 1089} 1090 1091SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 1092 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 1093 1094SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, 1095 CTLFLAG_RW, &disable_rtc_set, 0, ""); 1096 1097SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo, 1098 CTLFLAG_RD, &bootinfo, bootinfo, ""); 1099 1100SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, 1101 CTLFLAG_RW, &wall_cmos_clock, 0, ""); 1102 1103/* 1104 * Initialize 386 and configure to run kernel 1105 */ 1106 1107/* 1108 * Initialize segments & interrupt table 1109 */ 1110 1111int _default_ldt; 1112#ifdef SMP 1113union descriptor gdt[NGDT * NCPU]; /* global descriptor table */ 1114#else 1115union descriptor gdt[NGDT]; /* global descriptor table */ 1116#endif 1117static struct gate_descriptor idt0[NIDT]; 1118struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 1119union descriptor ldt[NLDT]; /* local descriptor table */ 1120#ifdef SMP 1121/* table descriptors - used to load tables by microp */ 1122struct region_descriptor r_gdt, r_idt; 1123#endif 1124 1125#ifndef SMP 1126extern struct segment_descriptor common_tssd, *tss_gdt; 1127#endif 1128int private_tss; /* flag indicating private tss */ 1129 1130#if defined(I586_CPU) && !defined(NO_F00F_HACK) 1131extern int has_f00f_bug; 1132#endif 1133 1134static struct i386tss dblfault_tss; 1135static char dblfault_stack[PAGE_SIZE]; 1136 1137extern struct user *proc0paddr; 1138 1139 1140/* software prototypes -- in more palatable form */ 1141struct soft_segment_descriptor gdt_segs[] = { 1142/* GNULL_SEL 0 Null Descriptor */ 1143{ 0x0, /* segment base address */ 1144 0x0, /* length */ 1145 0, /* segment type */ 1146 0, /* segment descriptor priority level */ 1147 0, /* segment descriptor present */ 1148 0, 0, 1149 0, /* default 32 vs 16 bit size */ 1150 0 /* limit granularity (byte/page units)*/ }, 1151/* GCODE_SEL 1 Code Descriptor for kernel */ 1152{ 0x0, /* segment base address */ 1153 0xfffff, /* length - all address space */ 1154 SDT_MEMERA, /* segment type */ 1155 0, /* segment descriptor priority level */ 1156 1, /* segment descriptor present */ 1157 0, 0, 1158 1, /* default 32 vs 16 bit size */ 1159 1 /* limit granularity (byte/page units)*/ }, 1160/* GDATA_SEL 2 Data Descriptor for kernel */ 1161{ 0x0, /* segment base address */ 1162 0xfffff, /* length - all address space */ 1163 SDT_MEMRWA, /* segment type */ 1164 0, /* segment descriptor priority level */ 1165 1, /* segment descriptor present */ 1166 0, 0, 1167 1, /* default 32 vs 16 bit size */ 1168 1 /* limit granularity (byte/page units)*/ }, 1169/* GPRIV_SEL 3 SMP Per-Processor Private Data Descriptor */ 1170{ 0x0, /* segment base address */ 1171 0xfffff, /* length - all address space */ 1172 SDT_MEMRWA, /* segment type */ 1173 0, /* segment descriptor priority level */ 1174 1, /* segment descriptor present */ 1175 0, 0, 1176 1, /* default 32 vs 16 bit size */ 1177 1 /* limit granularity (byte/page units)*/ }, 1178/* GPROC0_SEL 4 Proc 0 Tss Descriptor */ 1179{ 1180 0x0, /* segment base address */ 1181 sizeof(struct i386tss)-1,/* length - all address space */ 1182 SDT_SYS386TSS, /* segment type */ 1183 0, /* segment descriptor priority level */ 1184 1, /* segment descriptor present */ 1185 0, 0, 1186 0, /* unused - default 32 vs 16 bit size */ 1187 0 /* limit granularity (byte/page units)*/ }, 1188/* GLDT_SEL 5 LDT Descriptor */ 1189{ (int) ldt, /* segment base address */ 1190 sizeof(ldt)-1, /* length - all address space */ 1191 SDT_SYSLDT, /* segment type */ 1192 SEL_UPL, /* segment descriptor priority level */ 1193 1, /* segment descriptor present */ 1194 0, 0, 1195 0, /* unused - default 32 vs 16 bit size */ 1196 0 /* limit granularity (byte/page units)*/ }, 1197/* GUSERLDT_SEL 6 User LDT Descriptor per process */ 1198{ (int) ldt, /* segment base address */ 1199 (512 * sizeof(union descriptor)-1), /* length */ 1200 SDT_SYSLDT, /* segment type */ 1201 0, /* segment descriptor priority level */ 1202 1, /* segment descriptor present */ 1203 0, 0, 1204 0, /* unused - default 32 vs 16 bit size */ 1205 0 /* limit granularity (byte/page units)*/ }, 1206/* GTGATE_SEL 7 Null Descriptor - Placeholder */ 1207{ 0x0, /* segment base address */ 1208 0x0, /* length - all address space */ 1209 0, /* segment type */ 1210 0, /* segment descriptor priority level */ 1211 0, /* segment descriptor present */ 1212 0, 0, 1213 0, /* default 32 vs 16 bit size */ 1214 0 /* limit granularity (byte/page units)*/ }, 1215/* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */ 1216{ 0x400, /* segment base address */ 1217 0xfffff, /* length */ 1218 SDT_MEMRWA, /* segment type */ 1219 0, /* segment descriptor priority level */ 1220 1, /* segment descriptor present */ 1221 0, 0, 1222 1, /* default 32 vs 16 bit size */ 1223 1 /* limit granularity (byte/page units)*/ }, 1224/* GPANIC_SEL 9 Panic Tss Descriptor */ 1225{ (int) &dblfault_tss, /* segment base address */ 1226 sizeof(struct i386tss)-1,/* length - all address space */ 1227 SDT_SYS386TSS, /* segment type */ 1228 0, /* segment descriptor priority level */ 1229 1, /* segment descriptor present */ 1230 0, 0, 1231 0, /* unused - default 32 vs 16 bit size */ 1232 0 /* limit granularity (byte/page units)*/ }, 1233/* GBIOSCODE32_SEL 10 BIOS 32-bit interface (32bit Code) */ 1234{ 0, /* segment base address (overwritten) */ 1235 0xfffff, /* length */ 1236 SDT_MEMERA, /* segment type */ 1237 0, /* segment descriptor priority level */ 1238 1, /* segment descriptor present */ 1239 0, 0, 1240 0, /* default 32 vs 16 bit size */ 1241 1 /* limit granularity (byte/page units)*/ }, 1242/* GBIOSCODE16_SEL 11 BIOS 32-bit interface (16bit Code) */ 1243{ 0, /* segment base address (overwritten) */ 1244 0xfffff, /* length */ 1245 SDT_MEMERA, /* segment type */ 1246 0, /* segment descriptor priority level */ 1247 1, /* segment descriptor present */ 1248 0, 0, 1249 0, /* default 32 vs 16 bit size */ 1250 1 /* limit granularity (byte/page units)*/ }, 1251/* GBIOSDATA_SEL 12 BIOS 32-bit interface (Data) */ 1252{ 0, /* segment base address (overwritten) */ 1253 0xfffff, /* length */ 1254 SDT_MEMRWA, /* segment type */ 1255 0, /* segment descriptor priority level */ 1256 1, /* segment descriptor present */ 1257 0, 0, 1258 1, /* default 32 vs 16 bit size */ 1259 1 /* limit granularity (byte/page units)*/ }, 1260/* GBIOSUTIL_SEL 13 BIOS 16-bit interface (Utility) */ 1261{ 0, /* segment base address (overwritten) */ 1262 0xfffff, /* length */ 1263 SDT_MEMRWA, /* segment type */ 1264 0, /* segment descriptor priority level */ 1265 1, /* segment descriptor present */ 1266 0, 0, 1267 0, /* default 32 vs 16 bit size */ 1268 1 /* limit granularity (byte/page units)*/ }, 1269/* GBIOSARGS_SEL 14 BIOS 16-bit interface (Arguments) */ 1270{ 0, /* segment base address (overwritten) */ 1271 0xfffff, /* length */ 1272 SDT_MEMRWA, /* segment type */ 1273 0, /* segment descriptor priority level */ 1274 1, /* segment descriptor present */ 1275 0, 0, 1276 0, /* default 32 vs 16 bit size */ 1277 1 /* limit granularity (byte/page units)*/ }, 1278}; 1279 1280static struct soft_segment_descriptor ldt_segs[] = { 1281 /* Null Descriptor - overwritten by call gate */ 1282{ 0x0, /* segment base address */ 1283 0x0, /* length - all address space */ 1284 0, /* segment type */ 1285 0, /* segment descriptor priority level */ 1286 0, /* segment descriptor present */ 1287 0, 0, 1288 0, /* default 32 vs 16 bit size */ 1289 0 /* limit granularity (byte/page units)*/ }, 1290 /* Null Descriptor - overwritten by call gate */ 1291{ 0x0, /* segment base address */ 1292 0x0, /* length - all address space */ 1293 0, /* segment type */ 1294 0, /* segment descriptor priority level */ 1295 0, /* segment descriptor present */ 1296 0, 0, 1297 0, /* default 32 vs 16 bit size */ 1298 0 /* limit granularity (byte/page units)*/ }, 1299 /* Null Descriptor - overwritten by call gate */ 1300{ 0x0, /* segment base address */ 1301 0x0, /* length - all address space */ 1302 0, /* segment type */ 1303 0, /* segment descriptor priority level */ 1304 0, /* segment descriptor present */ 1305 0, 0, 1306 0, /* default 32 vs 16 bit size */ 1307 0 /* limit granularity (byte/page units)*/ }, 1308 /* Code Descriptor for user */ 1309{ 0x0, /* segment base address */ 1310 0xfffff, /* length - all address space */ 1311 SDT_MEMERA, /* segment type */ 1312 SEL_UPL, /* segment descriptor priority level */ 1313 1, /* segment descriptor present */ 1314 0, 0, 1315 1, /* default 32 vs 16 bit size */ 1316 1 /* limit granularity (byte/page units)*/ }, 1317 /* Null Descriptor - overwritten by call gate */ 1318{ 0x0, /* segment base address */ 1319 0x0, /* length - all address space */ 1320 0, /* segment type */ 1321 0, /* segment descriptor priority level */ 1322 0, /* segment descriptor present */ 1323 0, 0, 1324 0, /* default 32 vs 16 bit size */ 1325 0 /* limit granularity (byte/page units)*/ }, 1326 /* Data Descriptor for user */ 1327{ 0x0, /* segment base address */ 1328 0xfffff, /* length - all address space */ 1329 SDT_MEMRWA, /* segment type */ 1330 SEL_UPL, /* segment descriptor priority level */ 1331 1, /* segment descriptor present */ 1332 0, 0, 1333 1, /* default 32 vs 16 bit size */ 1334 1 /* limit granularity (byte/page units)*/ }, 1335}; 1336 1337void 1338setidt(idx, func, typ, dpl, selec) 1339 int idx; 1340 inthand_t *func; 1341 int typ; 1342 int dpl; 1343 int selec; 1344{ 1345 struct gate_descriptor *ip; 1346 1347 ip = idt + idx; 1348 ip->gd_looffset = (int)func; 1349 ip->gd_selector = selec; 1350 ip->gd_stkcpy = 0; 1351 ip->gd_xx = 0; 1352 ip->gd_type = typ; 1353 ip->gd_dpl = dpl; 1354 ip->gd_p = 1; 1355 ip->gd_hioffset = ((int)func)>>16 ; 1356} 1357 1358#define IDTVEC(name) __CONCAT(X,name) 1359 1360extern inthand_t 1361 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 1362 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 1363 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 1364 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 1365 IDTVEC(syscall), IDTVEC(int0x80_syscall); 1366 1367void 1368sdtossd(sd, ssd) 1369 struct segment_descriptor *sd; 1370 struct soft_segment_descriptor *ssd; 1371{ 1372 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 1373 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 1374 ssd->ssd_type = sd->sd_type; 1375 ssd->ssd_dpl = sd->sd_dpl; 1376 ssd->ssd_p = sd->sd_p; 1377 ssd->ssd_def32 = sd->sd_def32; 1378 ssd->ssd_gran = sd->sd_gran; 1379} 1380 1381#define PHYSMAP_SIZE (2 * 8) 1382 1383/* 1384 * Populate the (physmap) array with base/bound pairs describing the 1385 * available physical memory in the system, then test this memory and 1386 * build the phys_avail array describing the actually-available memory. 1387 * 1388 * If we cannot accurately determine the physical memory map, then use 1389 * value from the 0xE801 call, and failing that, the RTC. 1390 * 1391 * Total memory size may be set by the kernel environment variable 1392 * hw.physmem or the compile-time define MAXMEM. 1393 */ 1394static void 1395getmemsize(int first) 1396{ 1397 int i, physmap_idx, pa_indx; 1398 u_int basemem, extmem; 1399 struct vm86frame vmf; 1400 struct vm86context vmc; 1401 vm_offset_t pa, physmap[PHYSMAP_SIZE]; 1402 pt_entry_t pte; 1403 const char *cp; 1404 struct { 1405 u_int64_t base; 1406 u_int64_t length; 1407 u_int32_t type; 1408 } *smap; 1409 1410 bzero(&vmf, sizeof(struct vm86frame)); 1411 bzero(physmap, sizeof(physmap)); 1412 1413 /* 1414 * Perform "base memory" related probes & setup 1415 */ 1416 vm86_intcall(0x12, &vmf); 1417 basemem = vmf.vmf_ax; 1418 if (basemem > 640) { 1419 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", 1420 basemem); 1421 basemem = 640; 1422 } 1423 1424 /* 1425 * XXX if biosbasemem is now < 640, there is a `hole' 1426 * between the end of base memory and the start of 1427 * ISA memory. The hole may be empty or it may 1428 * contain BIOS code or data. Map it read/write so 1429 * that the BIOS can write to it. (Memory from 0 to 1430 * the physical end of the kernel is mapped read-only 1431 * to begin with and then parts of it are remapped. 1432 * The parts that aren't remapped form holes that 1433 * remain read-only and are unused by the kernel. 1434 * The base memory area is below the physical end of 1435 * the kernel and right now forms a read-only hole. 1436 * The part of it from PAGE_SIZE to 1437 * (trunc_page(biosbasemem * 1024) - 1) will be 1438 * remapped and used by the kernel later.) 1439 * 1440 * This code is similar to the code used in 1441 * pmap_mapdev, but since no memory needs to be 1442 * allocated we simply change the mapping. 1443 */ 1444 for (pa = trunc_page(basemem * 1024); 1445 pa < ISA_HOLE_START; pa += PAGE_SIZE) { 1446 pte = (pt_entry_t)vtopte(pa + KERNBASE); 1447 *pte = pa | PG_RW | PG_V; 1448 } 1449 1450 /* 1451 * if basemem != 640, map pages r/w into vm86 page table so 1452 * that the bios can scribble on it. 1453 */ 1454 pte = (pt_entry_t)vm86paddr; 1455 for (i = basemem / 4; i < 160; i++) 1456 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; 1457 1458 /* 1459 * map page 1 R/W into the kernel page table so we can use it 1460 * as a buffer. The kernel will unmap this page later. 1461 */ 1462 pte = (pt_entry_t)vtopte(KERNBASE + (1 << PAGE_SHIFT)); 1463 *pte = (1 << PAGE_SHIFT) | PG_RW | PG_V; 1464 1465 /* 1466 * get memory map with INT 15:E820 1467 */ 1468#define SMAPSIZ sizeof(*smap) 1469#define SMAP_SIG 0x534D4150 /* 'SMAP' */ 1470 1471 vmc.npages = 0; 1472 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT)); 1473 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di); 1474 1475 physmap_idx = 0; 1476 vmf.vmf_ebx = 0; 1477 do { 1478 vmf.vmf_eax = 0xE820; 1479 vmf.vmf_edx = SMAP_SIG; 1480 vmf.vmf_ecx = SMAPSIZ; 1481 i = vm86_datacall(0x15, &vmf, &vmc); 1482 if (i || vmf.vmf_eax != SMAP_SIG) 1483 break; 1484 if (boothowto & RB_VERBOSE) 1485 printf("SMAP type=%02x base=%08x %08x len=%08x %08x\n", 1486 smap->type, 1487 *(u_int32_t *)((char *)&smap->base + 4), 1488 (u_int32_t)smap->base, 1489 *(u_int32_t *)((char *)&smap->length + 4), 1490 (u_int32_t)smap->length); 1491 1492 if (smap->type != 0x01) 1493 goto next_run; 1494 1495 if (smap->length == 0) 1496 goto next_run; 1497 1498 if (smap->base >= 0xffffffff) { 1499 printf("%uK of memory above 4GB ignored\n", 1500 (u_int)(smap->length / 1024)); 1501 goto next_run; 1502 } 1503 1504 for (i = 0; i <= physmap_idx; i += 2) { 1505 if (smap->base < physmap[i + 1]) { 1506 if (boothowto & RB_VERBOSE) 1507 printf( 1508 "Overlapping or non-montonic memory region, ignoring second region\n"); 1509 goto next_run; 1510 } 1511 } 1512 1513 if (smap->base == physmap[physmap_idx + 1]) { 1514 physmap[physmap_idx + 1] += smap->length; 1515 goto next_run; 1516 } 1517 1518 physmap_idx += 2; 1519 if (physmap_idx == PHYSMAP_SIZE) { 1520 printf( 1521 "Too many segments in the physical address map, giving up\n"); 1522 break; 1523 } 1524 physmap[physmap_idx] = smap->base; 1525 physmap[physmap_idx + 1] = smap->base + smap->length; 1526next_run: 1527 } while (vmf.vmf_ebx != 0); 1528 1529 if (physmap[1] != 0) 1530 goto physmap_done; 1531 1532 /* 1533 * If we failed above, try memory map with INT 15:E801 1534 */ 1535 vmf.vmf_ax = 0xE801; 1536 if (vm86_intcall(0x15, &vmf) == 0) { 1537 extmem = vmf.vmf_cx + vmf.vmf_dx * 64; 1538 } else { 1539#if 0 1540 vmf.vmf_ah = 0x88; 1541 vm86_intcall(0x15, &vmf); 1542 extmem = vmf.vmf_ax; 1543#else 1544 /* 1545 * Prefer the RTC value for extended memory. 1546 */ 1547 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8); 1548#endif 1549 } 1550 1551 /* 1552 * Special hack for chipsets that still remap the 384k hole when 1553 * there's 16MB of memory - this really confuses people that 1554 * are trying to use bus mastering ISA controllers with the 1555 * "16MB limit"; they only have 16MB, but the remapping puts 1556 * them beyond the limit. 1557 * 1558 * If extended memory is between 15-16MB (16-17MB phys address range), 1559 * chop it to 15MB. 1560 */ 1561 if ((extmem > 15 * 1024) && (extmem < 16 * 1024)) 1562 extmem = 15 * 1024; 1563 1564 physmap[0] = 0; 1565 physmap[1] = basemem * 1024; 1566 physmap_idx = 2; 1567 physmap[physmap_idx] = 0x100000; 1568 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024; 1569 1570physmap_done: 1571 /* 1572 * Now, physmap contains a map of physical memory. 1573 */ 1574 1575#ifdef SMP 1576 /* make hole for AP bootstrap code */ 1577 physmap[1] = mp_bootaddress(physmap[1] / 1024); 1578 1579 /* look for the MP hardware - needed for apic addresses */ 1580 mp_probe(); 1581#endif 1582 1583 /* 1584 * Maxmem isn't the "maximum memory", it's one larger than the 1585 * highest page of the physical address space. It should be 1586 * called something like "Maxphyspage". We may adjust this 1587 * based on ``hw.physmem'' and the results of the memory test. 1588 */ 1589 Maxmem = atop(physmap[physmap_idx + 1]); 1590 1591#ifdef MAXMEM 1592 Maxmem = MAXMEM / 4; 1593#endif 1594 1595 /* 1596 * hw.maxmem is a size in bytes; we also allow k, m, and g suffixes 1597 * for the appropriate modifiers. This overrides MAXMEM. 1598 */ 1599 if ((cp = getenv("hw.physmem")) != NULL) { 1600 u_int64_t AllowMem, sanity; 1601 char *ep; 1602 1603 sanity = AllowMem = strtouq(cp, &ep, 0); 1604 if ((ep != cp) && (*ep != 0)) { 1605 switch(*ep) { 1606 case 'g': 1607 case 'G': 1608 AllowMem <<= 10; 1609 case 'm': 1610 case 'M': 1611 AllowMem <<= 10; 1612 case 'k': 1613 case 'K': 1614 AllowMem <<= 10; 1615 break; 1616 default: 1617 AllowMem = sanity = 0; 1618 } 1619 if (AllowMem < sanity) 1620 AllowMem = 0; 1621 } 1622 if (AllowMem == 0) 1623 printf("Ignoring invalid memory size of '%s'\n", cp); 1624 else 1625 Maxmem = atop(AllowMem); 1626 } 1627 1628 if (atop(physmap[physmap_idx + 1]) != Maxmem && 1629 (boothowto & RB_VERBOSE)) 1630 printf("Physical memory use set to %uK\n", Maxmem * 4); 1631 1632 /* 1633 * If Maxmem has been increased beyond what the system has detected, 1634 * extend the last memory segment to the new limit. 1635 */ 1636 if (atop(physmap[physmap_idx + 1]) < Maxmem) 1637 physmap[physmap_idx + 1] = ptoa(Maxmem); 1638 1639 /* call pmap initialization to make new kernel address space */ 1640 pmap_bootstrap(first, 0); 1641 1642 /* 1643 * Size up each available chunk of physical memory. 1644 */ 1645 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 1646 pa_indx = 0; 1647 phys_avail[pa_indx++] = physmap[0]; 1648 phys_avail[pa_indx] = physmap[0]; 1649#if 0 1650 pte = (pt_entry_t)vtopte(KERNBASE); 1651#else 1652 pte = (pt_entry_t)CMAP1; 1653#endif 1654 1655 /* 1656 * physmap is in bytes, so when converting to page boundaries, 1657 * round up the start address and round down the end address. 1658 */ 1659 for (i = 0; i <= physmap_idx; i += 2) { 1660 vm_offset_t end; 1661 1662 end = ptoa(Maxmem); 1663 if (physmap[i + 1] < end) 1664 end = trunc_page(physmap[i + 1]); 1665 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 1666 int tmp, page_bad; 1667#if 0 1668 int *ptr = 0; 1669#else 1670 int *ptr = (int *)CADDR1; 1671#endif 1672 1673 /* 1674 * block out kernel memory as not available. 1675 */ 1676 if (pa >= 0x100000 && pa < first) 1677 continue; 1678 1679 page_bad = FALSE; 1680 1681 /* 1682 * map page into kernel: valid, read/write,non-cacheable 1683 */ 1684 *pte = pa | PG_V | PG_RW | PG_N; 1685 invltlb(); 1686 1687 tmp = *(int *)ptr; 1688 /* 1689 * Test for alternating 1's and 0's 1690 */ 1691 *(volatile int *)ptr = 0xaaaaaaaa; 1692 if (*(volatile int *)ptr != 0xaaaaaaaa) { 1693 page_bad = TRUE; 1694 } 1695 /* 1696 * Test for alternating 0's and 1's 1697 */ 1698 *(volatile int *)ptr = 0x55555555; 1699 if (*(volatile int *)ptr != 0x55555555) { 1700 page_bad = TRUE; 1701 } 1702 /* 1703 * Test for all 1's 1704 */ 1705 *(volatile int *)ptr = 0xffffffff; 1706 if (*(volatile int *)ptr != 0xffffffff) { 1707 page_bad = TRUE; 1708 } 1709 /* 1710 * Test for all 0's 1711 */ 1712 *(volatile int *)ptr = 0x0; 1713 if (*(volatile int *)ptr != 0x0) { 1714 page_bad = TRUE; 1715 } 1716 /* 1717 * Restore original value. 1718 */ 1719 *(int *)ptr = tmp; 1720 1721 /* 1722 * Adjust array of valid/good pages. 1723 */ 1724 if (page_bad == TRUE) { 1725 continue; 1726 } 1727 /* 1728 * If this good page is a continuation of the 1729 * previous set of good pages, then just increase 1730 * the end pointer. Otherwise start a new chunk. 1731 * Note that "end" points one higher than end, 1732 * making the range >= start and < end. 1733 * If we're also doing a speculative memory 1734 * test and we at or past the end, bump up Maxmem 1735 * so that we keep going. The first bad page 1736 * will terminate the loop. 1737 */ 1738 if (phys_avail[pa_indx] == pa) { 1739 phys_avail[pa_indx] += PAGE_SIZE; 1740 } else { 1741 pa_indx++; 1742 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1743 printf("Too many holes in the physical address space, giving up\n"); 1744 pa_indx--; 1745 break; 1746 } 1747 phys_avail[pa_indx++] = pa; /* start */ 1748 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1749 } 1750 physmem++; 1751 } 1752 } 1753 *pte = 0; 1754 invltlb(); 1755 1756 /* 1757 * XXX 1758 * The last chunk must contain at least one page plus the message 1759 * buffer to avoid complicating other code (message buffer address 1760 * calculation, etc.). 1761 */ 1762 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1763 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { 1764 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1765 phys_avail[pa_indx--] = 0; 1766 phys_avail[pa_indx--] = 0; 1767 } 1768 1769 Maxmem = atop(phys_avail[pa_indx]); 1770 1771 /* Trim off space for the message buffer. */ 1772 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); 1773 1774 avail_end = phys_avail[pa_indx]; 1775} 1776 1777void 1778init386(first) 1779 int first; 1780{ 1781 int x; 1782 struct gate_descriptor *gdp; 1783 int gsel_tss; 1784#ifndef SMP 1785 /* table descriptors - used to load tables by microp */ 1786 struct region_descriptor r_gdt, r_idt; 1787#endif 1788 int off; 1789 1790 /* 1791 * Prevent lowering of the ipl if we call tsleep() early. 1792 */ 1793 safepri = cpl; 1794 1795 proc0.p_addr = proc0paddr; 1796 1797 atdevbase = ISA_HOLE_START + KERNBASE; 1798 1799 if (bootinfo.bi_modulep) { 1800 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE; 1801 preload_bootstrap_relocate(KERNBASE); 1802 } 1803 if (bootinfo.bi_envp) 1804 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE; 1805 1806 /* 1807 * make gdt memory segments, the code segment goes up to end of the 1808 * page with etext in it, the data segment goes to the end of 1809 * the address space 1810 */ 1811 /* 1812 * XXX text protection is temporarily (?) disabled. The limit was 1813 * i386_btop(round_page(etext)) - 1. 1814 */ 1815 gdt_segs[GCODE_SEL].ssd_limit = i386_btop(0) - 1; 1816 gdt_segs[GDATA_SEL].ssd_limit = i386_btop(0) - 1; 1817#ifdef SMP 1818 gdt_segs[GPRIV_SEL].ssd_limit = 1819 i386_btop(sizeof(struct privatespace)) - 1; 1820 gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[0]; 1821 gdt_segs[GPROC0_SEL].ssd_base = 1822 (int) &SMP_prvspace[0].globaldata.gd_common_tss; 1823 SMP_prvspace[0].globaldata.gd_prvspace = &SMP_prvspace[0]; 1824#else 1825 gdt_segs[GPRIV_SEL].ssd_limit = i386_btop(0) - 1; 1826 gdt_segs[GPROC0_SEL].ssd_base = (int) &common_tss; 1827#endif 1828 1829 for (x = 0; x < NGDT; x++) { 1830#ifdef BDE_DEBUGGER 1831 /* avoid overwriting db entries with APM ones */ 1832 if (x >= GAPMCODE32_SEL && x <= GAPMDATA_SEL) 1833 continue; 1834#endif 1835 ssdtosd(&gdt_segs[x], &gdt[x].sd); 1836 } 1837 1838 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1839 r_gdt.rd_base = (int) gdt; 1840 lgdt(&r_gdt); 1841 1842 /* make ldt memory segments */ 1843 /* 1844 * The data segment limit must not cover the user area because we 1845 * don't want the user area to be writable in copyout() etc. (page 1846 * level protection is lost in kernel mode on 386's). Also, we 1847 * don't want the user area to be writable directly (page level 1848 * protection of the user area is not available on 486's with 1849 * CR0_WP set, because there is no user-read/kernel-write mode). 1850 * 1851 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it 1852 * should be spelled ...MAX_USER... 1853 */ 1854#define VM_END_USER_RW_ADDRESS VM_MAXUSER_ADDRESS 1855 /* 1856 * The code segment limit has to cover the user area until we move 1857 * the signal trampoline out of the user area. This is safe because 1858 * the code segment cannot be written to directly. 1859 */ 1860#define VM_END_USER_R_ADDRESS (VM_END_USER_RW_ADDRESS + UPAGES * PAGE_SIZE) 1861 ldt_segs[LUCODE_SEL].ssd_limit = i386_btop(VM_END_USER_R_ADDRESS) - 1; 1862 ldt_segs[LUDATA_SEL].ssd_limit = i386_btop(VM_END_USER_RW_ADDRESS) - 1; 1863 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++) 1864 ssdtosd(&ldt_segs[x], &ldt[x].sd); 1865 1866 _default_ldt = GSEL(GLDT_SEL, SEL_KPL); 1867 lldt(_default_ldt); 1868#ifdef USER_LDT 1869 currentldt = _default_ldt; 1870#endif 1871 1872 /* exceptions */ 1873 for (x = 0; x < NIDT; x++) 1874 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1875 setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1876 setidt(1, &IDTVEC(dbg), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1877 setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1878 setidt(3, &IDTVEC(bpt), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1879 setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1880 setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1881 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1882 setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1883 setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL)); 1884 setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1885 setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1886 setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1887 setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1888 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1889 setidt(14, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1890 setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1891 setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1892 setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1893 setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1894 setidt(0x80, &IDTVEC(int0x80_syscall), 1895 SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1896 1897 r_idt.rd_limit = sizeof(idt0) - 1; 1898 r_idt.rd_base = (int) idt; 1899 lidt(&r_idt); 1900 1901 /* 1902 * Initialize the console before we print anything out. 1903 */ 1904 cninit(); 1905 1906#include "isa.h" 1907#if NISA >0 1908 isa_defaultirq(); 1909#endif 1910 1911#ifdef DDB 1912 kdb_init(); 1913 if (boothowto & RB_KDB) 1914 Debugger("Boot flags requested debugger"); 1915#endif 1916 1917 finishidentcpu(); /* Final stage of CPU initialization */ 1918 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1919 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1920 initializecpu(); /* Initialize CPU registers */ 1921 1922 /* make an initial tss so cpu can get interrupt stack on syscall! */ 1923 common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16; 1924 common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ; 1925 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1926 private_tss = 0; 1927 tss_gdt = &gdt[GPROC0_SEL].sd; 1928 common_tssd = *tss_gdt; 1929 common_tss.tss_ioopt = (sizeof common_tss) << 16; 1930 ltr(gsel_tss); 1931 1932 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = 1933 dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)]; 1934 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = 1935 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); 1936 dblfault_tss.tss_cr3 = (int)IdlePTD; 1937 dblfault_tss.tss_eip = (int) dblfault_handler; 1938 dblfault_tss.tss_eflags = PSL_KERNEL; 1939 dblfault_tss.tss_ds = dblfault_tss.tss_es = 1940 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); 1941 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL); 1942 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); 1943 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); 1944 1945 vm86_initialize(); 1946 getmemsize(first); 1947 1948 /* now running on new page tables, configured,and u/iom is accessible */ 1949 1950 /* Map the message buffer. */ 1951 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) 1952 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off); 1953 1954 msgbufinit(msgbufp, MSGBUF_SIZE); 1955 1956 /* make a call gate to reenter kernel with */ 1957 gdp = &ldt[LSYS5CALLS_SEL].gd; 1958 1959 x = (int) &IDTVEC(syscall); 1960 gdp->gd_looffset = x++; 1961 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL); 1962 gdp->gd_stkcpy = 1; 1963 gdp->gd_type = SDT_SYS386CGT; 1964 gdp->gd_dpl = SEL_UPL; 1965 gdp->gd_p = 1; 1966 gdp->gd_hioffset = ((int) &IDTVEC(syscall)) >>16; 1967 1968 /* XXX does this work? */ 1969 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL]; 1970 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL]; 1971 1972 /* transfer to user mode */ 1973 1974 _ucodesel = LSEL(LUCODE_SEL, SEL_UPL); 1975 _udatasel = LSEL(LUDATA_SEL, SEL_UPL); 1976 1977 /* setup proc 0's pcb */ 1978 proc0.p_addr->u_pcb.pcb_flags = 0; 1979 proc0.p_addr->u_pcb.pcb_cr3 = (int)IdlePTD; 1980#ifdef SMP 1981 proc0.p_addr->u_pcb.pcb_mpnest = 1; 1982#endif 1983 proc0.p_addr->u_pcb.pcb_ext = 0; 1984} 1985 1986#if defined(I586_CPU) && !defined(NO_F00F_HACK) 1987static void f00f_hack(void *unused); 1988SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL); 1989 1990static void 1991f00f_hack(void *unused) { 1992 struct gate_descriptor *new_idt; 1993#ifndef SMP 1994 struct region_descriptor r_idt; 1995#endif 1996 vm_offset_t tmp; 1997 1998 if (!has_f00f_bug) 1999 return; 2000 2001 printf("Intel Pentium detected, installing workaround for F00F bug\n"); 2002 2003 r_idt.rd_limit = sizeof(idt0) - 1; 2004 2005 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2); 2006 if (tmp == 0) 2007 panic("kmem_alloc returned 0"); 2008 if (((unsigned int)tmp & (PAGE_SIZE-1)) != 0) 2009 panic("kmem_alloc returned non-page-aligned memory"); 2010 /* Put the first seven entries in the lower page */ 2011 new_idt = (struct gate_descriptor*)(tmp + PAGE_SIZE - (7*8)); 2012 bcopy(idt, new_idt, sizeof(idt0)); 2013 r_idt.rd_base = (int)new_idt; 2014 lidt(&r_idt); 2015 idt = new_idt; 2016 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE, 2017 VM_PROT_READ, FALSE) != KERN_SUCCESS) 2018 panic("vm_map_protect failed"); 2019 return; 2020} 2021#endif /* defined(I586_CPU) && !NO_F00F_HACK */ 2022 2023int 2024ptrace_set_pc(p, addr) 2025 struct proc *p; 2026 unsigned long addr; 2027{ 2028 p->p_md.md_regs->tf_eip = addr; 2029 return (0); 2030} 2031 2032int 2033ptrace_single_step(p) 2034 struct proc *p; 2035{ 2036 p->p_md.md_regs->tf_eflags |= PSL_T; 2037 return (0); 2038} 2039 2040int ptrace_read_u_check(p, addr, len) 2041 struct proc *p; 2042 vm_offset_t addr; 2043 size_t len; 2044{ 2045 vm_offset_t gap; 2046 2047 if ((vm_offset_t) (addr + len) < addr) 2048 return EPERM; 2049 if ((vm_offset_t) (addr + len) <= sizeof(struct user)) 2050 return 0; 2051 2052 gap = (char *) p->p_md.md_regs - (char *) p->p_addr; 2053 2054 if ((vm_offset_t) addr < gap) 2055 return EPERM; 2056 if ((vm_offset_t) (addr + len) <= 2057 (vm_offset_t) (gap + sizeof(struct trapframe))) 2058 return 0; 2059 return EPERM; 2060} 2061 2062int ptrace_write_u(p, off, data) 2063 struct proc *p; 2064 vm_offset_t off; 2065 long data; 2066{ 2067 struct trapframe frame_copy; 2068 vm_offset_t min; 2069 struct trapframe *tp; 2070 2071 /* 2072 * Privileged kernel state is scattered all over the user area. 2073 * Only allow write access to parts of regs and to fpregs. 2074 */ 2075 min = (char *)p->p_md.md_regs - (char *)p->p_addr; 2076 if (off >= min && off <= min + sizeof(struct trapframe) - sizeof(int)) { 2077 tp = p->p_md.md_regs; 2078 frame_copy = *tp; 2079 *(int *)((char *)&frame_copy + (off - min)) = data; 2080 if (!EFL_SECURE(frame_copy.tf_eflags, tp->tf_eflags) || 2081 !CS_SECURE(frame_copy.tf_cs)) 2082 return (EINVAL); 2083 *(int*)((char *)p->p_addr + off) = data; 2084 return (0); 2085 } 2086 min = offsetof(struct user, u_pcb) + offsetof(struct pcb, pcb_savefpu); 2087 if (off >= min && off <= min + sizeof(struct save87) - sizeof(int)) { 2088 *(int*)((char *)p->p_addr + off) = data; 2089 return (0); 2090 } 2091 return (EFAULT); 2092} 2093 2094int 2095fill_regs(p, regs) 2096 struct proc *p; 2097 struct reg *regs; 2098{ 2099 struct pcb *pcb; 2100 struct trapframe *tp; 2101 2102 tp = p->p_md.md_regs; 2103 regs->r_fs = tp->tf_fs; 2104 regs->r_es = tp->tf_es; 2105 regs->r_ds = tp->tf_ds; 2106 regs->r_edi = tp->tf_edi; 2107 regs->r_esi = tp->tf_esi; 2108 regs->r_ebp = tp->tf_ebp; 2109 regs->r_ebx = tp->tf_ebx; 2110 regs->r_edx = tp->tf_edx; 2111 regs->r_ecx = tp->tf_ecx; 2112 regs->r_eax = tp->tf_eax; 2113 regs->r_eip = tp->tf_eip; 2114 regs->r_cs = tp->tf_cs; 2115 regs->r_eflags = tp->tf_eflags; 2116 regs->r_esp = tp->tf_esp; 2117 regs->r_ss = tp->tf_ss; 2118 pcb = &p->p_addr->u_pcb; 2119 regs->r_gs = pcb->pcb_gs; 2120 return (0); 2121} 2122 2123int 2124set_regs(p, regs) 2125 struct proc *p; 2126 struct reg *regs; 2127{ 2128 struct pcb *pcb; 2129 struct trapframe *tp; 2130 2131 tp = p->p_md.md_regs; 2132 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) || 2133 !CS_SECURE(regs->r_cs)) 2134 return (EINVAL); 2135 tp->tf_fs = regs->r_fs; 2136 tp->tf_es = regs->r_es; 2137 tp->tf_ds = regs->r_ds; 2138 tp->tf_edi = regs->r_edi; 2139 tp->tf_esi = regs->r_esi; 2140 tp->tf_ebp = regs->r_ebp; 2141 tp->tf_ebx = regs->r_ebx; 2142 tp->tf_edx = regs->r_edx; 2143 tp->tf_ecx = regs->r_ecx; 2144 tp->tf_eax = regs->r_eax; 2145 tp->tf_eip = regs->r_eip; 2146 tp->tf_cs = regs->r_cs; 2147 tp->tf_eflags = regs->r_eflags; 2148 tp->tf_esp = regs->r_esp; 2149 tp->tf_ss = regs->r_ss; 2150 pcb = &p->p_addr->u_pcb; 2151 pcb->pcb_gs = regs->r_gs; 2152 return (0); 2153} 2154 2155int 2156fill_fpregs(p, fpregs) 2157 struct proc *p; 2158 struct fpreg *fpregs; 2159{ 2160 bcopy(&p->p_addr->u_pcb.pcb_savefpu, fpregs, sizeof *fpregs); 2161 return (0); 2162} 2163 2164int 2165set_fpregs(p, fpregs) 2166 struct proc *p; 2167 struct fpreg *fpregs; 2168{ 2169 bcopy(fpregs, &p->p_addr->u_pcb.pcb_savefpu, sizeof *fpregs); 2170 return (0); 2171} 2172 2173int 2174fill_dbregs(p, dbregs) 2175 struct proc *p; 2176 struct dbreg *dbregs; 2177{ 2178 struct pcb *pcb; 2179 2180 pcb = &p->p_addr->u_pcb; 2181 dbregs->dr0 = pcb->pcb_dr0; 2182 dbregs->dr1 = pcb->pcb_dr1; 2183 dbregs->dr2 = pcb->pcb_dr2; 2184 dbregs->dr3 = pcb->pcb_dr3; 2185 dbregs->dr4 = 0; 2186 dbregs->dr5 = 0; 2187 dbregs->dr6 = pcb->pcb_dr6; 2188 dbregs->dr7 = pcb->pcb_dr7; 2189 return (0); 2190} 2191 2192int 2193set_dbregs(p, dbregs) 2194 struct proc *p; 2195 struct dbreg *dbregs; 2196{ 2197 struct pcb *pcb; 2198 2199 pcb = &p->p_addr->u_pcb; 2200 2201 /* 2202 * Don't let a process set a breakpoint that is not within the 2203 * process's address space. If a process could do this, it 2204 * could halt the system by setting a breakpoint in the kernel 2205 * (if ddb was enabled). Thus, we need to check to make sure 2206 * that no breakpoints are being enabled for addresses outside 2207 * process's address space, unless, perhaps, we were called by 2208 * uid 0. 2209 * 2210 * XXX - what about when the watched area of the user's 2211 * address space is written into from within the kernel 2212 * ... wouldn't that still cause a breakpoint to be generated 2213 * from within kernel mode? 2214 */ 2215 2216 if (p->p_ucred->cr_uid != 0) { 2217 if (dbregs->dr7 & 0x3) { 2218 /* dr0 is enabled */ 2219 if (dbregs->dr0 >= VM_MAXUSER_ADDRESS) 2220 return (EINVAL); 2221 } 2222 2223 if (dbregs->dr7 & (0x3<<2)) { 2224 /* dr1 is enabled */ 2225 if (dbregs->dr1 >= VM_MAXUSER_ADDRESS) 2226 return (EINVAL); 2227 } 2228 2229 if (dbregs->dr7 & (0x3<<4)) { 2230 /* dr2 is enabled */ 2231 if (dbregs->dr2 >= VM_MAXUSER_ADDRESS) 2232 return (EINVAL); 2233 } 2234 2235 if (dbregs->dr7 & (0x3<<6)) { 2236 /* dr3 is enabled */ 2237 if (dbregs->dr3 >= VM_MAXUSER_ADDRESS) 2238 return (EINVAL); 2239 } 2240 } 2241 2242 pcb->pcb_dr0 = dbregs->dr0; 2243 pcb->pcb_dr1 = dbregs->dr1; 2244 pcb->pcb_dr2 = dbregs->dr2; 2245 pcb->pcb_dr3 = dbregs->dr3; 2246 pcb->pcb_dr6 = dbregs->dr6; 2247 pcb->pcb_dr7 = dbregs->dr7; 2248 2249 pcb->pcb_flags |= PCB_DBREGS; 2250 2251 return (0); 2252} 2253 2254/* 2255 * Return > 0 if a hardware breakpoint has been hit, and the 2256 * breakpoint was in user space. Return 0, otherwise. 2257 */ 2258int 2259user_dbreg_trap(void) 2260{ 2261 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */ 2262 u_int32_t bp; /* breakpoint bits extracted from dr6 */ 2263 int nbp; /* number of breakpoints that triggered */ 2264 caddr_t addr[4]; /* breakpoint addresses */ 2265 int i; 2266 2267 dr7 = rdr7(); 2268 if ((dr7 & 0x000000ff) == 0) { 2269 /* 2270 * all GE and LE bits in the dr7 register are zero, 2271 * thus the trap couldn't have been caused by the 2272 * hardware debug registers 2273 */ 2274 return 0; 2275 } 2276 2277 nbp = 0; 2278 dr6 = rdr6(); 2279 bp = dr6 & 0x0000000f; 2280 2281 if (!bp) { 2282 /* 2283 * None of the breakpoint bits are set meaning this 2284 * trap was not caused by any of the debug registers 2285 */ 2286 return 0; 2287 } 2288 2289 /* 2290 * at least one of the breakpoints were hit, check to see 2291 * which ones and if any of them are user space addresses 2292 */ 2293 2294 if (bp & 0x01) { 2295 addr[nbp++] = (caddr_t)rdr0(); 2296 } 2297 if (bp & 0x02) { 2298 addr[nbp++] = (caddr_t)rdr1(); 2299 } 2300 if (bp & 0x04) { 2301 addr[nbp++] = (caddr_t)rdr2(); 2302 } 2303 if (bp & 0x08) { 2304 addr[nbp++] = (caddr_t)rdr3(); 2305 } 2306 2307 for (i=0; i<nbp; i++) { 2308 if (addr[i] < 2309 (caddr_t)VM_MAXUSER_ADDRESS) { 2310 /* 2311 * addr[i] is in user space 2312 */ 2313 return nbp; 2314 } 2315 } 2316 2317 /* 2318 * None of the breakpoints are in user space. 2319 */ 2320 return 0; 2321} 2322 2323 2324#ifndef DDB 2325void 2326Debugger(const char *msg) 2327{ 2328 printf("Debugger(\"%s\") called.\n", msg); 2329} 2330#endif /* no DDB */ 2331 2332#include <sys/disklabel.h> 2333 2334/* 2335 * Determine the size of the transfer, and make sure it is 2336 * within the boundaries of the partition. Adjust transfer 2337 * if needed, and signal errors or early completion. 2338 */ 2339int 2340bounds_check_with_label(struct bio *bp, struct disklabel *lp, int wlabel) 2341{ 2342 struct partition *p = lp->d_partitions + dkpart(bp->bio_dev); 2343 int labelsect = lp->d_partitions[0].p_offset; 2344 int maxsz = p->p_size, 2345 sz = (bp->bio_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT; 2346 2347 /* overwriting disk label ? */ 2348 /* XXX should also protect bootstrap in first 8K */ 2349 if (bp->bio_blkno + p->p_offset <= LABELSECTOR + labelsect && 2350#if LABELSECTOR != 0 2351 bp->bio_blkno + p->p_offset + sz > LABELSECTOR + labelsect && 2352#endif 2353 (bp->bio_cmd == BIO_WRITE) && wlabel == 0) { 2354 bp->bio_error = EROFS; 2355 goto bad; 2356 } 2357 2358#if defined(DOSBBSECTOR) && defined(notyet) 2359 /* overwriting master boot record? */ 2360 if (bp->bio_blkno + p->p_offset <= DOSBBSECTOR && 2361 (bp->bio_cmd == BIO_WRITE) && wlabel == 0) { 2362 bp->bio_error = EROFS; 2363 goto bad; 2364 } 2365#endif 2366 2367 /* beyond partition? */ 2368 if (bp->bio_blkno < 0 || bp->bio_blkno + sz > maxsz) { 2369 /* if exactly at end of disk, return an EOF */ 2370 if (bp->bio_blkno == maxsz) { 2371 bp->bio_resid = bp->bio_bcount; 2372 return(0); 2373 } 2374 /* or truncate if part of it fits */ 2375 sz = maxsz - bp->bio_blkno; 2376 if (sz <= 0) { 2377 bp->bio_error = EINVAL; 2378 goto bad; 2379 } 2380 bp->bio_bcount = sz << DEV_BSHIFT; 2381 } 2382 2383 bp->bio_pblkno = bp->bio_blkno + p->p_offset; 2384 return(1); 2385 2386bad: 2387 bp->bio_flags |= BIO_ERROR; 2388 return(-1); 2389} 2390 2391#ifdef DDB 2392 2393/* 2394 * Provide inb() and outb() as functions. They are normally only 2395 * available as macros calling inlined functions, thus cannot be 2396 * called inside DDB. 2397 * 2398 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 2399 */ 2400 2401#undef inb 2402#undef outb 2403 2404/* silence compiler warnings */ 2405u_char inb(u_int); 2406void outb(u_int, u_char); 2407 2408u_char 2409inb(u_int port) 2410{ 2411 u_char data; 2412 /* 2413 * We use %%dx and not %1 here because i/o is done at %dx and not at 2414 * %edx, while gcc generates inferior code (movw instead of movl) 2415 * if we tell it to load (u_short) port. 2416 */ 2417 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 2418 return (data); 2419} 2420 2421void 2422outb(u_int port, u_char data) 2423{ 2424 u_char al; 2425 /* 2426 * Use an unnecessary assignment to help gcc's register allocator. 2427 * This make a large difference for gcc-1.40 and a tiny difference 2428 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 2429 * best results. gcc-2.6.0 can't handle this. 2430 */ 2431 al = data; 2432 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 2433} 2434 2435#endif /* DDB */ 2436