machdep.c revision 33134
1/*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 38 * $Id: machdep.c,v 1.287 1998/02/04 22:32:08 eivind Exp $ 39 */ 40 41#include "apm.h" 42#include "npx.h" 43#include "opt_bounce.h" 44#include "opt_cpu.h" 45#include "opt_ddb.h" 46#include "opt_maxmem.h" 47#include "opt_perfmon.h" 48#include "opt_smp.h" 49#include "opt_sysvipc.h" 50#include "opt_user_ldt.h" 51#include "opt_userconfig.h" 52#include "opt_vm86.h" 53 54#include <sys/param.h> 55#include <sys/systm.h> 56#include <sys/sysproto.h> 57#include <sys/signalvar.h> 58#include <sys/kernel.h> 59#include <sys/proc.h> 60#include <sys/buf.h> 61#include <sys/reboot.h> 62#include <sys/conf.h> 63#include <sys/callout.h> 64#include <sys/malloc.h> 65#include <sys/mbuf.h> 66#include <sys/msgbuf.h> 67#include <sys/sysent.h> 68#include <sys/sysctl.h> 69#include <sys/vmmeter.h> 70 71#ifdef SYSVSHM 72#include <sys/shm.h> 73#endif 74 75#ifdef SYSVMSG 76#include <sys/msg.h> 77#endif 78 79#ifdef SYSVSEM 80#include <sys/sem.h> 81#endif 82 83#include <vm/vm.h> 84#include <vm/vm_param.h> 85#include <vm/vm_prot.h> 86#include <sys/lock.h> 87#include <vm/vm_kern.h> 88#include <vm/vm_object.h> 89#include <vm/vm_page.h> 90#include <vm/vm_map.h> 91#include <vm/vm_pager.h> 92#include <vm/vm_extern.h> 93 94#include <sys/user.h> 95#include <sys/exec.h> 96 97#include <ddb/ddb.h> 98 99#include <net/netisr.h> 100 101#if NAPM > 0 102#include <machine/apm_bios.h> 103#endif 104#include <machine/cpu.h> 105#include <machine/reg.h> 106#include <machine/clock.h> 107#include <machine/specialreg.h> 108#include <machine/cons.h> 109#include <machine/bootinfo.h> 110#include <machine/ipl.h> 111#include <machine/md_var.h> 112#include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */ 113#ifdef SMP 114#include <machine/smp.h> 115#endif 116#ifdef PERFMON 117#include <machine/perfmon.h> 118#endif 119 120#include <i386/isa/isa_device.h> 121#include <i386/isa/intr_machdep.h> 122#include <i386/isa/rtc.h> 123#include <machine/random.h> 124 125extern void init386 __P((int first)); 126extern int ptrace_set_pc __P((struct proc *p, unsigned int addr)); 127extern int ptrace_single_step __P((struct proc *p)); 128extern int ptrace_write_u __P((struct proc *p, vm_offset_t off, int data)); 129extern void dblfault_handler __P((void)); 130 131extern void printcpuinfo(void); /* XXX header file */ 132extern void earlysetcpuclass(void); /* same header file */ 133extern void finishidentcpu(void); 134extern void panicifcpuunsupported(void); 135extern void initializecpu(void); 136 137static void cpu_startup __P((void *)); 138SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) 139 140static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf"); 141 142#ifdef BOUNCE_BUFFERS 143#ifdef BOUNCEPAGES 144int bouncepages = BOUNCEPAGES; 145#else 146int bouncepages = 0; 147#endif 148#endif /* BOUNCE_BUFFERS */ 149 150int msgbufmapped = 0; /* set when safe to use msgbuf */ 151int _udatasel, _ucodesel; 152u_int atdevbase; 153 154#if defined(SWTCH_OPTIM_STATS) 155extern int swtch_optim_stats; 156SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, 157 CTLFLAG_RD, &swtch_optim_stats, 0, ""); 158SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, 159 CTLFLAG_RD, &tlb_flush_count, 0, ""); 160#endif 161 162 163int physmem = 0; 164int cold = 1; 165 166static int 167sysctl_hw_physmem SYSCTL_HANDLER_ARGS 168{ 169 int error = sysctl_handle_int(oidp, 0, ctob(physmem), req); 170 return (error); 171} 172 173SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD, 174 0, 0, sysctl_hw_physmem, "I", ""); 175 176static int 177sysctl_hw_usermem SYSCTL_HANDLER_ARGS 178{ 179 int error = sysctl_handle_int(oidp, 0, 180 ctob(physmem - cnt.v_wire_count), req); 181 return (error); 182} 183 184SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD, 185 0, 0, sysctl_hw_usermem, "I", ""); 186 187int bootverbose = 0, Maxmem = 0; 188long dumplo; 189 190vm_offset_t phys_avail[10]; 191 192/* must be 2 less so 0 0 can signal end of chunks */ 193#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2) 194 195static void setup_netisrs __P((struct linker_set *)); /* XXX declare elsewhere */ 196 197static vm_offset_t buffer_sva, buffer_eva; 198vm_offset_t clean_sva, clean_eva; 199static vm_offset_t pager_sva, pager_eva; 200extern struct linker_set netisr_set; 201 202#define offsetof(type, member) ((size_t)(&((type *)0)->member)) 203 204static void 205cpu_startup(dummy) 206 void *dummy; 207{ 208 register unsigned i; 209 register caddr_t v; 210 vm_offset_t maxaddr; 211 vm_size_t size = 0; 212 int firstaddr; 213 vm_offset_t minaddr; 214 215 if (boothowto & RB_VERBOSE) 216 bootverbose++; 217 218 /* 219 * Good {morning,afternoon,evening,night}. 220 */ 221 printf(version); 222 earlysetcpuclass(); 223 startrtclock(); 224 printcpuinfo(); 225 panicifcpuunsupported(); 226#ifdef PERFMON 227 perfmon_init(); 228#endif 229 printf("real memory = %d (%dK bytes)\n", ptoa(Maxmem), ptoa(Maxmem) / 1024); 230 /* 231 * Display any holes after the first chunk of extended memory. 232 */ 233 if (bootverbose) { 234 int indx; 235 236 printf("Physical memory chunk(s):\n"); 237 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 238 int size1 = phys_avail[indx + 1] - phys_avail[indx]; 239 240 printf("0x%08lx - 0x%08lx, %d bytes (%d pages)\n", phys_avail[indx], 241 phys_avail[indx + 1] - 1, size1, size1 / PAGE_SIZE); 242 } 243 } 244 245 /* 246 * Quickly wire in netisrs. 247 */ 248 setup_netisrs(&netisr_set); 249 250 /* 251 * Calculate callout wheel size 252 */ 253 for (callwheelsize = 1, callwheelbits = 0; 254 callwheelsize < ncallout; 255 callwheelsize <<= 1, ++callwheelbits) 256 ; 257 callwheelmask = callwheelsize - 1; 258 259 /* 260 * Allocate space for system data structures. 261 * The first available kernel virtual address is in "v". 262 * As pages of kernel virtual memory are allocated, "v" is incremented. 263 * As pages of memory are allocated and cleared, 264 * "firstaddr" is incremented. 265 * An index into the kernel page table corresponding to the 266 * virtual memory address maintained in "v" is kept in "mapaddr". 267 */ 268 269 /* 270 * Make two passes. The first pass calculates how much memory is 271 * needed and allocates it. The second pass assigns virtual 272 * addresses to the various data structures. 273 */ 274 firstaddr = 0; 275again: 276 v = (caddr_t)firstaddr; 277 278#define valloc(name, type, num) \ 279 (name) = (type *)v; v = (caddr_t)((name)+(num)) 280#define valloclim(name, type, num, lim) \ 281 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num))) 282 283#ifdef BOUNCE_BUFFERS 284 /* 285 * If there is more than 16MB of memory, allocate some bounce buffers 286 */ 287 if (Maxmem > 4096) { 288 if (bouncepages == 0) { 289 bouncepages = 64; 290 } 291 v = (caddr_t)((vm_offset_t)round_page(v)); 292 valloc(bouncememory, char, bouncepages * PAGE_SIZE); 293 } 294#endif 295 296 valloc(callout, struct callout, ncallout); 297 valloc(callwheel, struct callout_tailq, callwheelsize); 298#ifdef SYSVSHM 299 valloc(shmsegs, struct shmid_ds, shminfo.shmmni); 300#endif 301#ifdef SYSVSEM 302 valloc(sema, struct semid_ds, seminfo.semmni); 303 valloc(sem, struct sem, seminfo.semmns); 304 /* This is pretty disgusting! */ 305 valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int)); 306#endif 307#ifdef SYSVMSG 308 valloc(msgpool, char, msginfo.msgmax); 309 valloc(msgmaps, struct msgmap, msginfo.msgseg); 310 valloc(msghdrs, struct msg, msginfo.msgtql); 311 valloc(msqids, struct msqid_ds, msginfo.msgmni); 312#endif 313 314 if (nbuf == 0) { 315 nbuf = 30; 316 if( physmem > 1024) 317 nbuf += min((physmem - 1024) / 8, 2048); 318 } 319 nswbuf = max(min(nbuf/4, 64), 16); 320 321 valloc(swbuf, struct buf, nswbuf); 322 valloc(buf, struct buf, nbuf); 323 324 325 /* 326 * End of first pass, size has been calculated so allocate memory 327 */ 328 if (firstaddr == 0) { 329 size = (vm_size_t)(v - firstaddr); 330 firstaddr = (int)kmem_alloc(kernel_map, round_page(size)); 331 if (firstaddr == 0) 332 panic("startup: no room for tables"); 333 goto again; 334 } 335 336 /* 337 * End of second pass, addresses have been assigned 338 */ 339 if ((vm_size_t)(v - firstaddr) != size) 340 panic("startup: table size inconsistency"); 341 342#ifdef BOUNCE_BUFFERS 343 clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva, 344 (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + 345 maxbkva + pager_map_size); 346 io_map = kmem_suballoc(clean_map, &minaddr, &maxaddr, maxbkva); 347#else 348 clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva, 349 (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size); 350#endif 351 buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva, 352 (nbuf*BKVASIZE)); 353 pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva, 354 (nswbuf*MAXPHYS) + pager_map_size); 355 pager_map->system_map = 1; 356 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 357 (16*(ARG_MAX+PAGE_SIZE))); 358 359 /* 360 * Finally, allocate mbuf pool. Since mclrefcnt is an off-size 361 * we use the more space efficient malloc in place of kmem_alloc. 362 */ 363 { 364 vm_offset_t mb_map_size; 365 366 mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES; 367 mb_map_size = roundup2(mb_map_size, max(MCLBYTES, PAGE_SIZE)); 368 mclrefcnt = malloc(mb_map_size / MCLBYTES, M_MBUF, M_NOWAIT); 369 bzero(mclrefcnt, mb_map_size / MCLBYTES); 370 mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr, 371 mb_map_size); 372 mb_map->system_map = 1; 373 } 374 375 /* 376 * Initialize callouts 377 */ 378 SLIST_INIT(&callfree); 379 for (i = 0; i < ncallout; i++) { 380 SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); 381 } 382 383 for (i = 0; i < callwheelsize; i++) { 384 TAILQ_INIT(&callwheel[i]); 385 } 386 387#if defined(USERCONFIG) 388#if defined(USERCONFIG_BOOT) 389 if (1) { 390#else 391 if (boothowto & RB_CONFIG) { 392#endif 393 userconfig(); 394 cninit(); /* the preferred console may have changed */ 395 } 396#endif 397 398#ifdef BOUNCE_BUFFERS 399 /* 400 * init bounce buffers 401 */ 402 vm_bounce_init(); 403#endif 404 405 printf("avail memory = %d (%dK bytes)\n", ptoa(cnt.v_free_count), 406 ptoa(cnt.v_free_count) / 1024); 407 408 /* 409 * Set up buffers, so they can be used to read disk labels. 410 */ 411 bufinit(); 412 vm_pager_bufferinit(); 413 414#ifdef SMP 415 /* 416 * OK, enough kmem_alloc/malloc state should be up, lets get on with it! 417 */ 418 mp_start(); /* fire up the APs and APICs */ 419 mp_announce(); 420#endif /* SMP */ 421} 422 423int 424register_netisr(num, handler) 425 int num; 426 netisr_t *handler; 427{ 428 429 if (num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs)) ) { 430 printf("register_netisr: bad isr number: %d\n", num); 431 return (EINVAL); 432 } 433 netisrs[num] = handler; 434 return (0); 435} 436 437static void 438setup_netisrs(ls) 439 struct linker_set *ls; 440{ 441 int i; 442 const struct netisrtab *nit; 443 444 for(i = 0; ls->ls_items[i]; i++) { 445 nit = (const struct netisrtab *)ls->ls_items[i]; 446 register_netisr(nit->nit_num, nit->nit_isr); 447 } 448} 449 450/* 451 * Send an interrupt to process. 452 * 453 * Stack is set up to allow sigcode stored 454 * at top to call routine, followed by kcall 455 * to sigreturn routine below. After sigreturn 456 * resets the signal mask, the stack, and the 457 * frame pointer, it returns to the user 458 * specified pc, psl. 459 */ 460void 461sendsig(catcher, sig, mask, code) 462 sig_t catcher; 463 int sig, mask; 464 u_long code; 465{ 466 register struct proc *p = curproc; 467 register struct trapframe *regs; 468 register struct sigframe *fp; 469 struct sigframe sf; 470 struct sigacts *psp = p->p_sigacts; 471 int oonstack; 472 473 regs = p->p_md.md_regs; 474 oonstack = psp->ps_sigstk.ss_flags & SS_ONSTACK; 475 /* 476 * Allocate and validate space for the signal handler context. 477 */ 478 if ((psp->ps_flags & SAS_ALTSTACK) && !oonstack && 479 (psp->ps_sigonstack & sigmask(sig))) { 480 fp = (struct sigframe *)(psp->ps_sigstk.ss_sp + 481 psp->ps_sigstk.ss_size - sizeof(struct sigframe)); 482 psp->ps_sigstk.ss_flags |= SS_ONSTACK; 483 } else { 484 fp = (struct sigframe *)regs->tf_esp - 1; 485 } 486 487 /* 488 * grow() will return FALSE if the fp will not fit inside the stack 489 * and the stack can not be grown. useracc will return FALSE 490 * if access is denied. 491 */ 492 if ((grow(p, (int)fp) == FALSE) || 493 (useracc((caddr_t)fp, sizeof(struct sigframe), B_WRITE) == FALSE)) { 494 /* 495 * Process has trashed its stack; give it an illegal 496 * instruction to halt it in its tracks. 497 */ 498 SIGACTION(p, SIGILL) = SIG_DFL; 499 sig = sigmask(SIGILL); 500 p->p_sigignore &= ~sig; 501 p->p_sigcatch &= ~sig; 502 p->p_sigmask &= ~sig; 503 psignal(p, SIGILL); 504 return; 505 } 506 507 /* 508 * Build the argument list for the signal handler. 509 */ 510 if (p->p_sysent->sv_sigtbl) { 511 if (sig < p->p_sysent->sv_sigsize) 512 sig = p->p_sysent->sv_sigtbl[sig]; 513 else 514 sig = p->p_sysent->sv_sigsize + 1; 515 } 516 sf.sf_signum = sig; 517 sf.sf_code = code; 518 sf.sf_scp = &fp->sf_sc; 519 sf.sf_addr = (char *) regs->tf_err; 520 sf.sf_handler = catcher; 521 522 /* save scratch registers */ 523 sf.sf_sc.sc_eax = regs->tf_eax; 524 sf.sf_sc.sc_ebx = regs->tf_ebx; 525 sf.sf_sc.sc_ecx = regs->tf_ecx; 526 sf.sf_sc.sc_edx = regs->tf_edx; 527 sf.sf_sc.sc_esi = regs->tf_esi; 528 sf.sf_sc.sc_edi = regs->tf_edi; 529 sf.sf_sc.sc_cs = regs->tf_cs; 530 sf.sf_sc.sc_ds = regs->tf_ds; 531 sf.sf_sc.sc_ss = regs->tf_ss; 532 sf.sf_sc.sc_es = regs->tf_es; 533 sf.sf_sc.sc_isp = regs->tf_isp; 534 535 /* 536 * Build the signal context to be used by sigreturn. 537 */ 538 sf.sf_sc.sc_onstack = oonstack; 539 sf.sf_sc.sc_mask = mask; 540 sf.sf_sc.sc_sp = regs->tf_esp; 541 sf.sf_sc.sc_fp = regs->tf_ebp; 542 sf.sf_sc.sc_pc = regs->tf_eip; 543 sf.sf_sc.sc_ps = regs->tf_eflags; 544 sf.sf_sc.sc_trapno = regs->tf_trapno; 545 sf.sf_sc.sc_err = regs->tf_err; 546 547#ifdef VM86 548 /* 549 * If we're a vm86 process, we want to save the segment registers. 550 * We also change eflags to be our emulated eflags, not the actual 551 * eflags. 552 */ 553 if (regs->tf_eflags & PSL_VM) { 554 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 555 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 556 557 sf.sf_sc.sc_gs = tf->tf_vm86_gs; 558 sf.sf_sc.sc_fs = tf->tf_vm86_fs; 559 sf.sf_sc.sc_es = tf->tf_vm86_es; 560 sf.sf_sc.sc_ds = tf->tf_vm86_ds; 561 562 if (vm86->vm86_has_vme == 0) 563 sf.sf_sc.sc_ps = (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) 564 | (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 565 566 /* 567 * We should never have PSL_T set when returning from vm86 568 * mode. It may be set here if we deliver a signal before 569 * getting to vm86 mode, so turn it off. 570 */ 571 tf->tf_eflags &= ~(PSL_VM | PSL_T | PSL_VIF | PSL_VIP); 572 } 573#endif /* VM86 */ 574 575 /* 576 * Copy the sigframe out to the user's stack. 577 */ 578 if (copyout(&sf, fp, sizeof(struct sigframe)) != 0) { 579 /* 580 * Something is wrong with the stack pointer. 581 * ...Kill the process. 582 */ 583 sigexit(p, SIGILL); 584 } 585 586 regs->tf_esp = (int)fp; 587 regs->tf_eip = (int)(((char *)PS_STRINGS) - *(p->p_sysent->sv_szsigcode)); 588 regs->tf_cs = _ucodesel; 589 regs->tf_ds = _udatasel; 590 regs->tf_es = _udatasel; 591 regs->tf_ss = _udatasel; 592} 593 594/* 595 * System call to cleanup state after a signal 596 * has been taken. Reset signal mask and 597 * stack state from context left by sendsig (above). 598 * Return to previous pc and psl as specified by 599 * context left by sendsig. Check carefully to 600 * make sure that the user has not modified the 601 * state to gain improper privileges. 602 */ 603int 604sigreturn(p, uap) 605 struct proc *p; 606 struct sigreturn_args /* { 607 struct sigcontext *sigcntxp; 608 } */ *uap; 609{ 610 register struct sigcontext *scp; 611 register struct sigframe *fp; 612 register struct trapframe *regs = p->p_md.md_regs; 613 int eflags; 614 615 /* 616 * (XXX old comment) regs->tf_esp points to the return address. 617 * The user scp pointer is above that. 618 * The return address is faked in the signal trampoline code 619 * for consistency. 620 */ 621 scp = uap->sigcntxp; 622 fp = (struct sigframe *) 623 ((caddr_t)scp - offsetof(struct sigframe, sf_sc)); 624 625 if (useracc((caddr_t)fp, sizeof (*fp), B_WRITE) == 0) 626 return(EFAULT); 627 628 eflags = scp->sc_ps; 629#ifdef VM86 630 if (eflags & PSL_VM) { 631 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 632 struct vm86_kernel *vm86; 633 634 /* 635 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 636 * set up the vm86 area, and we can't enter vm86 mode. 637 */ 638 if (p->p_addr->u_pcb.pcb_ext == 0) 639 return (EINVAL); 640 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 641 if (vm86->vm86_inited == 0) 642 return (EINVAL); 643 644 /* go back to user mode if both flags are set */ 645 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 646 trapsignal(p, SIGBUS, 0); 647 648#define VM_USERCHANGE (PSL_USERCHANGE | PSL_RF) 649#define VME_USERCHANGE (VM_USERCHANGE | PSL_VIP | PSL_VIF) 650 if (vm86->vm86_has_vme) { 651 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 652 (eflags & VME_USERCHANGE) | PSL_VM; 653 } else { 654 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 655 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 656 } 657 tf->tf_vm86_ds = scp->sc_ds; 658 tf->tf_vm86_es = scp->sc_es; 659 tf->tf_vm86_fs = scp->sc_fs; 660 tf->tf_vm86_gs = scp->sc_gs; 661 tf->tf_ds = _udatasel; 662 tf->tf_es = _udatasel; 663 } else { 664#endif /* VM86 */ 665 /* 666 * Don't allow users to change privileged or reserved flags. 667 */ 668#define EFLAGS_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 669 /* 670 * XXX do allow users to change the privileged flag PSL_RF. 671 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 672 * should sometimes set it there too. tf_eflags is kept in 673 * the signal context during signal handling and there is no 674 * other place to remember it, so the PSL_RF bit may be 675 * corrupted by the signal handler without us knowing. 676 * Corruption of the PSL_RF bit at worst causes one more or 677 * one less debugger trap, so allowing it is fairly harmless. 678 */ 679 if (!EFLAGS_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 680#ifdef DEBUG 681 printf("sigreturn: eflags = 0x%x\n", eflags); 682#endif 683 return(EINVAL); 684 } 685 686 /* 687 * Don't allow users to load a valid privileged %cs. Let the 688 * hardware check for invalid selectors, excess privilege in 689 * other selectors, invalid %eip's and invalid %esp's. 690 */ 691#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 692 if (!CS_SECURE(scp->sc_cs)) { 693#ifdef DEBUG 694 printf("sigreturn: cs = 0x%x\n", scp->sc_cs); 695#endif 696 trapsignal(p, SIGBUS, T_PROTFLT); 697 return(EINVAL); 698 } 699 regs->tf_ds = scp->sc_ds; 700 regs->tf_es = scp->sc_es; 701#ifdef VM86 702 } 703#endif 704 705 /* restore scratch registers */ 706 regs->tf_eax = scp->sc_eax; 707 regs->tf_ebx = scp->sc_ebx; 708 regs->tf_ecx = scp->sc_ecx; 709 regs->tf_edx = scp->sc_edx; 710 regs->tf_esi = scp->sc_esi; 711 regs->tf_edi = scp->sc_edi; 712 regs->tf_cs = scp->sc_cs; 713 regs->tf_ss = scp->sc_ss; 714 regs->tf_isp = scp->sc_isp; 715 716 if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0) 717 return(EINVAL); 718 719 if (scp->sc_onstack & 01) 720 p->p_sigacts->ps_sigstk.ss_flags |= SS_ONSTACK; 721 else 722 p->p_sigacts->ps_sigstk.ss_flags &= ~SS_ONSTACK; 723 p->p_sigmask = scp->sc_mask & ~sigcantmask; 724 regs->tf_ebp = scp->sc_fp; 725 regs->tf_esp = scp->sc_sp; 726 regs->tf_eip = scp->sc_pc; 727 regs->tf_eflags = eflags; 728 return(EJUSTRETURN); 729} 730 731/* 732 * Machine dependent boot() routine 733 * 734 * I haven't seen anything to put here yet 735 * Possibly some stuff might be grafted back here from boot() 736 */ 737void 738cpu_boot(int howto) 739{ 740} 741 742/* 743 * Shutdown the CPU as much as possible 744 */ 745void 746cpu_halt(void) 747{ 748 for (;;) 749 __asm__ ("hlt"); 750} 751 752/* 753 * Turn the power off. 754 */ 755void 756cpu_power_down(void) 757{ 758#if NAPM > 0 759 apm_power_off(); 760#endif 761} 762 763/* 764 * Clear registers on exec 765 */ 766void 767setregs(p, entry, stack) 768 struct proc *p; 769 u_long entry; 770 u_long stack; 771{ 772 struct trapframe *regs = p->p_md.md_regs; 773 774#ifdef USER_LDT 775 struct pcb *pcb = &p->p_addr->u_pcb; 776 777 /* was i386_user_cleanup() in NetBSD */ 778 if (pcb->pcb_ldt) { 779 if (pcb == curpcb) 780 lldt(GSEL(GUSERLDT_SEL, SEL_KPL)); 781 kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ldt, 782 pcb->pcb_ldt_len * sizeof(union descriptor)); 783 pcb->pcb_ldt_len = (int)pcb->pcb_ldt = 0; 784 } 785#endif 786 787 bzero((char *)regs, sizeof(struct trapframe)); 788 regs->tf_eip = entry; 789 regs->tf_esp = stack; 790 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T); 791 regs->tf_ss = _udatasel; 792 regs->tf_ds = _udatasel; 793 regs->tf_es = _udatasel; 794 regs->tf_cs = _ucodesel; 795 796 /* 797 * Initialize the math emulator (if any) for the current process. 798 * Actually, just clear the bit that says that the emulator has 799 * been initialized. Initialization is delayed until the process 800 * traps to the emulator (if it is done at all) mainly because 801 * emulators don't provide an entry point for initialization. 802 */ 803 p->p_addr->u_pcb.pcb_flags &= ~FP_SOFTFP; 804 805 /* 806 * Arrange to trap the next npx or `fwait' instruction (see npx.c 807 * for why fwait must be trapped at least if there is an npx or an 808 * emulator). This is mainly to handle the case where npx0 is not 809 * configured, since the npx routines normally set up the trap 810 * otherwise. It should be done only at boot time, but doing it 811 * here allows modifying `npx_exists' for testing the emulator on 812 * systems with an npx. 813 */ 814 load_cr0(rcr0() | CR0_MP | CR0_TS); 815 816#if NNPX > 0 817 /* Initialize the npx (if any) for the current process. */ 818 npxinit(__INITIAL_NPXCW__); 819#endif 820} 821 822static int 823sysctl_machdep_adjkerntz SYSCTL_HANDLER_ARGS 824{ 825 int error; 826 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 827 req); 828 if (!error && req->newptr) 829 resettodr(); 830 return (error); 831} 832 833SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 834 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 835 836SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, 837 CTLFLAG_RW, &disable_rtc_set, 0, ""); 838 839SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo, 840 CTLFLAG_RD, &bootinfo, bootinfo, ""); 841 842SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, 843 CTLFLAG_RW, &wall_cmos_clock, 0, ""); 844 845/* 846 * Initialize 386 and configure to run kernel 847 */ 848 849/* 850 * Initialize segments & interrupt table 851 */ 852 853int currentldt; 854int _default_ldt; 855#ifdef SMP 856union descriptor gdt[NGDT + NCPU]; /* global descriptor table */ 857#else 858union descriptor gdt[NGDT]; /* global descriptor table */ 859#endif 860struct gate_descriptor idt[NIDT]; /* interrupt descriptor table */ 861union descriptor ldt[NLDT]; /* local descriptor table */ 862#ifdef SMP 863/* table descriptors - used to load tables by microp */ 864struct region_descriptor r_gdt, r_idt; 865#endif 866 867#ifdef SMP 868extern struct i386tss common_tss; /* One tss per cpu */ 869#ifdef VM86 870extern struct segment_descriptor common_tssd; 871extern int private_tss; 872extern u_int my_tr; 873#endif /* VM86 */ 874#else 875struct i386tss common_tss; 876#ifdef VM86 877struct segment_descriptor common_tssd; 878u_int private_tss; /* flag indicating private tss */ 879u_int my_tr; /* which task register setting */ 880#endif /* VM86 */ 881#endif 882 883#if defined(I586_CPU) && !defined(NO_F00F_HACK) 884struct gate_descriptor *t_idt; 885extern int has_f00f_bug; 886#endif 887 888static struct i386tss dblfault_tss; 889static char dblfault_stack[PAGE_SIZE]; 890 891extern struct user *proc0paddr; 892 893 894/* software prototypes -- in more palatable form */ 895struct soft_segment_descriptor gdt_segs[ 896#ifdef SMP 897 NGDT + NCPU 898#endif 899 ] = { 900/* GNULL_SEL 0 Null Descriptor */ 901{ 0x0, /* segment base address */ 902 0x0, /* length */ 903 0, /* segment type */ 904 0, /* segment descriptor priority level */ 905 0, /* segment descriptor present */ 906 0, 0, 907 0, /* default 32 vs 16 bit size */ 908 0 /* limit granularity (byte/page units)*/ }, 909/* GCODE_SEL 1 Code Descriptor for kernel */ 910{ 0x0, /* segment base address */ 911 0xfffff, /* length - all address space */ 912 SDT_MEMERA, /* segment type */ 913 0, /* segment descriptor priority level */ 914 1, /* segment descriptor present */ 915 0, 0, 916 1, /* default 32 vs 16 bit size */ 917 1 /* limit granularity (byte/page units)*/ }, 918/* GDATA_SEL 2 Data Descriptor for kernel */ 919{ 0x0, /* segment base address */ 920 0xfffff, /* length - all address space */ 921 SDT_MEMRWA, /* segment type */ 922 0, /* segment descriptor priority level */ 923 1, /* segment descriptor present */ 924 0, 0, 925 1, /* default 32 vs 16 bit size */ 926 1 /* limit granularity (byte/page units)*/ }, 927/* GLDT_SEL 3 LDT Descriptor */ 928{ (int) ldt, /* segment base address */ 929 sizeof(ldt)-1, /* length - all address space */ 930 SDT_SYSLDT, /* segment type */ 931 SEL_UPL, /* segment descriptor priority level */ 932 1, /* segment descriptor present */ 933 0, 0, 934 0, /* unused - default 32 vs 16 bit size */ 935 0 /* limit granularity (byte/page units)*/ }, 936/* GTGATE_SEL 4 Null Descriptor - Placeholder */ 937{ 0x0, /* segment base address */ 938 0x0, /* length - all address space */ 939 0, /* segment type */ 940 0, /* segment descriptor priority level */ 941 0, /* segment descriptor present */ 942 0, 0, 943 0, /* default 32 vs 16 bit size */ 944 0 /* limit granularity (byte/page units)*/ }, 945/* GPANIC_SEL 5 Panic Tss Descriptor */ 946{ (int) &dblfault_tss, /* segment base address */ 947 sizeof(struct i386tss)-1,/* length - all address space */ 948 SDT_SYS386TSS, /* segment type */ 949 0, /* segment descriptor priority level */ 950 1, /* segment descriptor present */ 951 0, 0, 952 0, /* unused - default 32 vs 16 bit size */ 953 0 /* limit granularity (byte/page units)*/ }, 954/* GPROC0_SEL 6 Proc 0 Tss Descriptor */ 955{ 956 (int) &common_tss, /* segment base address */ 957 sizeof(struct i386tss)-1,/* length - all address space */ 958 SDT_SYS386TSS, /* segment type */ 959 0, /* segment descriptor priority level */ 960 1, /* segment descriptor present */ 961 0, 0, 962 0, /* unused - default 32 vs 16 bit size */ 963 0 /* limit granularity (byte/page units)*/ }, 964/* GUSERLDT_SEL 7 User LDT Descriptor per process */ 965{ (int) ldt, /* segment base address */ 966 (512 * sizeof(union descriptor)-1), /* length */ 967 SDT_SYSLDT, /* segment type */ 968 0, /* segment descriptor priority level */ 969 1, /* segment descriptor present */ 970 0, 0, 971 0, /* unused - default 32 vs 16 bit size */ 972 0 /* limit granularity (byte/page units)*/ }, 973/* GAPMCODE32_SEL 8 APM BIOS 32-bit interface (32bit Code) */ 974{ 0, /* segment base address (overwritten by APM) */ 975 0xfffff, /* length */ 976 SDT_MEMERA, /* segment type */ 977 0, /* segment descriptor priority level */ 978 1, /* segment descriptor present */ 979 0, 0, 980 1, /* default 32 vs 16 bit size */ 981 1 /* limit granularity (byte/page units)*/ }, 982/* GAPMCODE16_SEL 9 APM BIOS 32-bit interface (16bit Code) */ 983{ 0, /* segment base address (overwritten by APM) */ 984 0xfffff, /* length */ 985 SDT_MEMERA, /* segment type */ 986 0, /* segment descriptor priority level */ 987 1, /* segment descriptor present */ 988 0, 0, 989 0, /* default 32 vs 16 bit size */ 990 1 /* limit granularity (byte/page units)*/ }, 991/* GAPMDATA_SEL 10 APM BIOS 32-bit interface (Data) */ 992{ 0, /* segment base address (overwritten by APM) */ 993 0xfffff, /* length */ 994 SDT_MEMRWA, /* segment type */ 995 0, /* segment descriptor priority level */ 996 1, /* segment descriptor present */ 997 0, 0, 998 1, /* default 32 vs 16 bit size */ 999 1 /* limit granularity (byte/page units)*/ }, 1000}; 1001 1002static struct soft_segment_descriptor ldt_segs[] = { 1003 /* Null Descriptor - overwritten by call gate */ 1004{ 0x0, /* segment base address */ 1005 0x0, /* length - all address space */ 1006 0, /* segment type */ 1007 0, /* segment descriptor priority level */ 1008 0, /* segment descriptor present */ 1009 0, 0, 1010 0, /* default 32 vs 16 bit size */ 1011 0 /* limit granularity (byte/page units)*/ }, 1012 /* Null Descriptor - overwritten by call gate */ 1013{ 0x0, /* segment base address */ 1014 0x0, /* length - all address space */ 1015 0, /* segment type */ 1016 0, /* segment descriptor priority level */ 1017 0, /* segment descriptor present */ 1018 0, 0, 1019 0, /* default 32 vs 16 bit size */ 1020 0 /* limit granularity (byte/page units)*/ }, 1021 /* Null Descriptor - overwritten by call gate */ 1022{ 0x0, /* segment base address */ 1023 0x0, /* length - all address space */ 1024 0, /* segment type */ 1025 0, /* segment descriptor priority level */ 1026 0, /* segment descriptor present */ 1027 0, 0, 1028 0, /* default 32 vs 16 bit size */ 1029 0 /* limit granularity (byte/page units)*/ }, 1030 /* Code Descriptor for user */ 1031{ 0x0, /* segment base address */ 1032 0xfffff, /* length - all address space */ 1033 SDT_MEMERA, /* segment type */ 1034 SEL_UPL, /* segment descriptor priority level */ 1035 1, /* segment descriptor present */ 1036 0, 0, 1037 1, /* default 32 vs 16 bit size */ 1038 1 /* limit granularity (byte/page units)*/ }, 1039 /* Data Descriptor for user */ 1040{ 0x0, /* segment base address */ 1041 0xfffff, /* length - all address space */ 1042 SDT_MEMRWA, /* segment type */ 1043 SEL_UPL, /* segment descriptor priority level */ 1044 1, /* segment descriptor present */ 1045 0, 0, 1046 1, /* default 32 vs 16 bit size */ 1047 1 /* limit granularity (byte/page units)*/ }, 1048}; 1049 1050void 1051setidt(idx, func, typ, dpl, selec) 1052 int idx; 1053 inthand_t *func; 1054 int typ; 1055 int dpl; 1056 int selec; 1057{ 1058 struct gate_descriptor *ip = idt + idx; 1059 1060 ip->gd_looffset = (int)func; 1061 ip->gd_selector = selec; 1062 ip->gd_stkcpy = 0; 1063 ip->gd_xx = 0; 1064 ip->gd_type = typ; 1065 ip->gd_dpl = dpl; 1066 ip->gd_p = 1; 1067 ip->gd_hioffset = ((int)func)>>16 ; 1068} 1069 1070#define IDTVEC(name) __CONCAT(X,name) 1071 1072extern inthand_t 1073 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 1074 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 1075 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 1076 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 1077 IDTVEC(syscall), IDTVEC(int0x80_syscall); 1078 1079void 1080sdtossd(sd, ssd) 1081 struct segment_descriptor *sd; 1082 struct soft_segment_descriptor *ssd; 1083{ 1084 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 1085 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 1086 ssd->ssd_type = sd->sd_type; 1087 ssd->ssd_dpl = sd->sd_dpl; 1088 ssd->ssd_p = sd->sd_p; 1089 ssd->ssd_def32 = sd->sd_def32; 1090 ssd->ssd_gran = sd->sd_gran; 1091} 1092 1093void 1094init386(first) 1095 int first; 1096{ 1097 int x; 1098 unsigned biosbasemem, biosextmem; 1099 struct gate_descriptor *gdp; 1100 int gsel_tss; 1101 1102 struct isa_device *idp; 1103#ifndef SMP 1104 /* table descriptors - used to load tables by microp */ 1105 struct region_descriptor r_gdt, r_idt; 1106#endif 1107 int pagesinbase, pagesinext; 1108 int target_page, pa_indx; 1109 int off; 1110 int speculative_mprobe; 1111 1112 /* 1113 * Prevent lowering of the ipl if we call tsleep() early. 1114 */ 1115 safepri = cpl; 1116 1117 proc0.p_addr = proc0paddr; 1118 1119 atdevbase = ISA_HOLE_START + KERNBASE; 1120 1121 /* 1122 * Initialize the console before we print anything out. 1123 */ 1124 cninit(); 1125 1126 /* 1127 * make gdt memory segments, the code segment goes up to end of the 1128 * page with etext in it, the data segment goes to the end of 1129 * the address space 1130 */ 1131 /* 1132 * XXX text protection is temporarily (?) disabled. The limit was 1133 * i386_btop(round_page(etext)) - 1. 1134 */ 1135 gdt_segs[GCODE_SEL].ssd_limit = i386_btop(0) - 1; 1136 gdt_segs[GDATA_SEL].ssd_limit = i386_btop(0) - 1; 1137#ifdef BDE_DEBUGGER 1138#define NGDT1 8 /* avoid overwriting db entries with APM ones */ 1139#else 1140#define NGDT1 (sizeof gdt_segs / sizeof gdt_segs[0]) 1141#endif 1142 for (x = 0; x < NGDT1; x++) 1143 ssdtosd(&gdt_segs[x], &gdt[x].sd); 1144#ifdef VM86 1145 common_tssd = gdt[GPROC0_SEL].sd; 1146#endif /* VM86 */ 1147 1148#ifdef SMP 1149 /* 1150 * Spin these up now. init_secondary() grabs them. We could use 1151 * #for(x,y,z) / #endfor cpp directives if they existed. 1152 */ 1153 for (x = 0; x < NCPU; x++) { 1154 gdt_segs[NGDT + x] = gdt_segs[GPROC0_SEL]; 1155 ssdtosd(&gdt_segs[NGDT + x], &gdt[NGDT + x].sd); 1156 } 1157#endif 1158 1159 /* make ldt memory segments */ 1160 /* 1161 * The data segment limit must not cover the user area because we 1162 * don't want the user area to be writable in copyout() etc. (page 1163 * level protection is lost in kernel mode on 386's). Also, we 1164 * don't want the user area to be writable directly (page level 1165 * protection of the user area is not available on 486's with 1166 * CR0_WP set, because there is no user-read/kernel-write mode). 1167 * 1168 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it 1169 * should be spelled ...MAX_USER... 1170 */ 1171#define VM_END_USER_RW_ADDRESS VM_MAXUSER_ADDRESS 1172 /* 1173 * The code segment limit has to cover the user area until we move 1174 * the signal trampoline out of the user area. This is safe because 1175 * the code segment cannot be written to directly. 1176 */ 1177#define VM_END_USER_R_ADDRESS (VM_END_USER_RW_ADDRESS + UPAGES * PAGE_SIZE) 1178 ldt_segs[LUCODE_SEL].ssd_limit = i386_btop(VM_END_USER_R_ADDRESS) - 1; 1179 ldt_segs[LUDATA_SEL].ssd_limit = i386_btop(VM_END_USER_RW_ADDRESS) - 1; 1180 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++) 1181 ssdtosd(&ldt_segs[x], &ldt[x].sd); 1182 1183 /* exceptions */ 1184 for (x = 0; x < NIDT; x++) 1185 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1186 setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1187 setidt(1, &IDTVEC(dbg), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1188 setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1189 setidt(3, &IDTVEC(bpt), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1190 setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1191 setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1192 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1193 setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1194 setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL)); 1195 setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1196 setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1197 setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1198 setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1199 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1200 setidt(14, &IDTVEC(page), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1201 setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1202 setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1203 setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1204 setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1205 setidt(0x80, &IDTVEC(int0x80_syscall), 1206 SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1207 1208#include "isa.h" 1209#if NISA >0 1210 isa_defaultirq(); 1211#endif 1212 rand_initialize(); 1213 1214 r_gdt.rd_limit = sizeof(gdt) - 1; 1215 r_gdt.rd_base = (int) gdt; 1216 lgdt(&r_gdt); 1217 1218 r_idt.rd_limit = sizeof(idt) - 1; 1219 r_idt.rd_base = (int) idt; 1220 lidt(&r_idt); 1221 1222 _default_ldt = GSEL(GLDT_SEL, SEL_KPL); 1223 lldt(_default_ldt); 1224 currentldt = _default_ldt; 1225 1226#ifdef DDB 1227 kdb_init(); 1228 if (boothowto & RB_KDB) 1229 Debugger("Boot flags requested debugger"); 1230#endif 1231 1232 finishidentcpu(); /* Final stage of CPU initialization */ 1233 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1234 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1235 initializecpu(); /* Initialize CPU registers */ 1236 1237 /* Use BIOS values stored in RTC CMOS RAM, since probing 1238 * breaks certain 386 AT relics. 1239 */ 1240 biosbasemem = rtcin(RTC_BASELO)+ (rtcin(RTC_BASEHI)<<8); 1241 biosextmem = rtcin(RTC_EXTLO)+ (rtcin(RTC_EXTHI)<<8); 1242 1243 /* 1244 * If BIOS tells us that it has more than 640k in the basemem, 1245 * don't believe it - set it to 640k. 1246 */ 1247 if (biosbasemem > 640) { 1248 printf("Preposterous RTC basemem of %dK, truncating to 640K\n", 1249 biosbasemem); 1250 biosbasemem = 640; 1251 } 1252 if (bootinfo.bi_memsizes_valid && bootinfo.bi_basemem > 640) { 1253 printf("Preposterous BIOS basemem of %dK, truncating to 640K\n", 1254 bootinfo.bi_basemem); 1255 bootinfo.bi_basemem = 640; 1256 } 1257 1258 /* 1259 * Warn if the official BIOS interface disagrees with the RTC 1260 * interface used above about the amount of base memory or the 1261 * amount of extended memory. Prefer the BIOS value for the base 1262 * memory. This is necessary for machines that `steal' base 1263 * memory for use as BIOS memory, at least if we are going to use 1264 * the BIOS for apm. Prefer the RTC value for extended memory. 1265 * Eventually the hackish interface shouldn't even be looked at. 1266 */ 1267 if (bootinfo.bi_memsizes_valid) { 1268 if (bootinfo.bi_basemem != biosbasemem) { 1269 vm_offset_t pa; 1270 1271 printf( 1272 "BIOS basemem (%ldK) != RTC basemem (%dK), setting to BIOS value\n", 1273 bootinfo.bi_basemem, biosbasemem); 1274 biosbasemem = bootinfo.bi_basemem; 1275 1276 /* 1277 * XXX if biosbasemem is now < 640, there is `hole' 1278 * between the end of base memory and the start of 1279 * ISA memory. The hole may be empty or it may 1280 * contain BIOS code or data. Map it read/write so 1281 * that the BIOS can write to it. (Memory from 0 to 1282 * the physical end of the kernel is mapped read-only 1283 * to begin with and then parts of it are remapped. 1284 * The parts that aren't remapped form holes that 1285 * remain read-only and are unused by the kernel. 1286 * The base memory area is below the physical end of 1287 * the kernel and right now forms a read-only hole. 1288 * The part of it from 0 to 1289 * (trunc_page(biosbasemem * 1024) - 1) will be 1290 * remapped and used by the kernel later.) 1291 * 1292 * This code is similar to the code used in 1293 * pmap_mapdev, but since no memory needs to be 1294 * allocated we simply change the mapping. 1295 */ 1296 for (pa = trunc_page(biosbasemem * 1024); 1297 pa < ISA_HOLE_START; pa += PAGE_SIZE) { 1298 unsigned *pte; 1299 1300 pte = (unsigned *)vtopte(pa + KERNBASE); 1301 *pte = pa | PG_RW | PG_V; 1302 } 1303 } 1304 if (bootinfo.bi_extmem != biosextmem) 1305 printf("BIOS extmem (%ldK) != RTC extmem (%dK)\n", 1306 bootinfo.bi_extmem, biosextmem); 1307 } 1308 1309#ifdef SMP 1310 /* make hole for AP bootstrap code */ 1311 pagesinbase = mp_bootaddress(biosbasemem) / PAGE_SIZE; 1312#else 1313 pagesinbase = biosbasemem * 1024 / PAGE_SIZE; 1314#endif 1315 1316 pagesinext = biosextmem * 1024 / PAGE_SIZE; 1317 1318 /* 1319 * Special hack for chipsets that still remap the 384k hole when 1320 * there's 16MB of memory - this really confuses people that 1321 * are trying to use bus mastering ISA controllers with the 1322 * "16MB limit"; they only have 16MB, but the remapping puts 1323 * them beyond the limit. 1324 */ 1325 /* 1326 * If extended memory is between 15-16MB (16-17MB phys address range), 1327 * chop it to 15MB. 1328 */ 1329 if ((pagesinext > 3840) && (pagesinext < 4096)) 1330 pagesinext = 3840; 1331 1332 /* 1333 * Maxmem isn't the "maximum memory", it's one larger than the 1334 * highest page of the physical address space. It should be 1335 * called something like "Maxphyspage". 1336 */ 1337 Maxmem = pagesinext + 0x100000/PAGE_SIZE; 1338 /* 1339 * Indicate that we wish to do a speculative search for memory beyond 1340 * the end of the reported size if the indicated amount is 64MB (0x4000 1341 * pages) - which is the largest amount that the BIOS/bootblocks can 1342 * currently report. If a specific amount of memory is indicated via 1343 * the MAXMEM option or the npx0 "msize", then don't do the speculative 1344 * memory probe. 1345 */ 1346 if (Maxmem >= 0x4000) 1347 speculative_mprobe = TRUE; 1348 else 1349 speculative_mprobe = FALSE; 1350 1351#ifdef MAXMEM 1352 Maxmem = MAXMEM/4; 1353 speculative_mprobe = FALSE; 1354#endif 1355 1356#if NNPX > 0 1357 idp = find_isadev(isa_devtab_null, &npxdriver, 0); 1358 if (idp != NULL && idp->id_msize != 0) { 1359 Maxmem = idp->id_msize / 4; 1360 speculative_mprobe = FALSE; 1361 } 1362#endif 1363 1364#ifdef SMP 1365 /* look for the MP hardware - needed for apic addresses */ 1366 mp_probe(); 1367#endif 1368 1369 /* call pmap initialization to make new kernel address space */ 1370 pmap_bootstrap (first, 0); 1371 1372 /* 1373 * Size up each available chunk of physical memory. 1374 */ 1375 1376 /* 1377 * We currently don't bother testing base memory. 1378 * XXX ...but we probably should. 1379 */ 1380 pa_indx = 0; 1381 if (pagesinbase > 1) { 1382 phys_avail[pa_indx++] = PAGE_SIZE; /* skip first page of memory */ 1383 phys_avail[pa_indx] = ptoa(pagesinbase);/* memory up to the ISA hole */ 1384 physmem = pagesinbase - 1; 1385 } else { 1386 /* point at first chunk end */ 1387 pa_indx++; 1388 } 1389 1390 for (target_page = avail_start; target_page < ptoa(Maxmem); target_page += PAGE_SIZE) { 1391 int tmp, page_bad; 1392 1393 page_bad = FALSE; 1394 1395 /* 1396 * map page into kernel: valid, read/write, non-cacheable 1397 */ 1398 *(int *)CMAP1 = PG_V | PG_RW | PG_N | target_page; 1399 invltlb(); 1400 1401 tmp = *(int *)CADDR1; 1402 /* 1403 * Test for alternating 1's and 0's 1404 */ 1405 *(volatile int *)CADDR1 = 0xaaaaaaaa; 1406 if (*(volatile int *)CADDR1 != 0xaaaaaaaa) { 1407 page_bad = TRUE; 1408 } 1409 /* 1410 * Test for alternating 0's and 1's 1411 */ 1412 *(volatile int *)CADDR1 = 0x55555555; 1413 if (*(volatile int *)CADDR1 != 0x55555555) { 1414 page_bad = TRUE; 1415 } 1416 /* 1417 * Test for all 1's 1418 */ 1419 *(volatile int *)CADDR1 = 0xffffffff; 1420 if (*(volatile int *)CADDR1 != 0xffffffff) { 1421 page_bad = TRUE; 1422 } 1423 /* 1424 * Test for all 0's 1425 */ 1426 *(volatile int *)CADDR1 = 0x0; 1427 if (*(volatile int *)CADDR1 != 0x0) { 1428 /* 1429 * test of page failed 1430 */ 1431 page_bad = TRUE; 1432 } 1433 /* 1434 * Restore original value. 1435 */ 1436 *(int *)CADDR1 = tmp; 1437 1438 /* 1439 * Adjust array of valid/good pages. 1440 */ 1441 if (page_bad == FALSE) { 1442 /* 1443 * If this good page is a continuation of the 1444 * previous set of good pages, then just increase 1445 * the end pointer. Otherwise start a new chunk. 1446 * Note that "end" points one higher than end, 1447 * making the range >= start and < end. 1448 * If we're also doing a speculative memory 1449 * test and we at or past the end, bump up Maxmem 1450 * so that we keep going. The first bad page 1451 * will terminate the loop. 1452 */ 1453 if (phys_avail[pa_indx] == target_page) { 1454 phys_avail[pa_indx] += PAGE_SIZE; 1455 if (speculative_mprobe == TRUE && 1456 phys_avail[pa_indx] >= (64*1024*1024)) 1457 Maxmem++; 1458 } else { 1459 pa_indx++; 1460 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1461 printf("Too many holes in the physical address space, giving up\n"); 1462 pa_indx--; 1463 break; 1464 } 1465 phys_avail[pa_indx++] = target_page; /* start */ 1466 phys_avail[pa_indx] = target_page + PAGE_SIZE; /* end */ 1467 } 1468 physmem++; 1469 } 1470 } 1471 1472 *(int *)CMAP1 = 0; 1473 invltlb(); 1474 1475 /* 1476 * XXX 1477 * The last chunk must contain at least one page plus the message 1478 * buffer to avoid complicating other code (message buffer address 1479 * calculation, etc.). 1480 */ 1481 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1482 round_page(sizeof(struct msgbuf)) >= phys_avail[pa_indx]) { 1483 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1484 phys_avail[pa_indx--] = 0; 1485 phys_avail[pa_indx--] = 0; 1486 } 1487 1488 Maxmem = atop(phys_avail[pa_indx]); 1489 1490 /* Trim off space for the message buffer. */ 1491 phys_avail[pa_indx] -= round_page(sizeof(struct msgbuf)); 1492 1493 avail_end = phys_avail[pa_indx]; 1494 1495 /* now running on new page tables, configured,and u/iom is accessible */ 1496 1497 /* Map the message buffer. */ 1498 for (off = 0; off < round_page(sizeof(struct msgbuf)); off += PAGE_SIZE) 1499 pmap_enter(kernel_pmap, (vm_offset_t)msgbufp + off, 1500 avail_end + off, VM_PROT_ALL, TRUE); 1501 msgbufmapped = 1; 1502 1503 /* make an initial tss so cpu can get interrupt stack on syscall! */ 1504#ifdef VM86 1505 common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16; 1506#else 1507 common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE; 1508#endif /* VM86 */ 1509 common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ; 1510 common_tss.tss_ioopt = (sizeof common_tss) << 16; 1511 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1512 ltr(gsel_tss); 1513#ifdef VM86 1514 private_tss = 0; 1515 my_tr = GPROC0_SEL; 1516#endif 1517 1518 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = 1519 dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)]; 1520 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = 1521 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); 1522 dblfault_tss.tss_cr3 = (int)IdlePTD; 1523 dblfault_tss.tss_eip = (int) dblfault_handler; 1524 dblfault_tss.tss_eflags = PSL_KERNEL; 1525 dblfault_tss.tss_ds = dblfault_tss.tss_es = dblfault_tss.tss_fs = 1526 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); 1527 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); 1528 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); 1529 1530 /* make a call gate to reenter kernel with */ 1531 gdp = &ldt[LSYS5CALLS_SEL].gd; 1532 1533 x = (int) &IDTVEC(syscall); 1534 gdp->gd_looffset = x++; 1535 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL); 1536 gdp->gd_stkcpy = 1; 1537 gdp->gd_type = SDT_SYS386CGT; 1538 gdp->gd_dpl = SEL_UPL; 1539 gdp->gd_p = 1; 1540 gdp->gd_hioffset = ((int) &IDTVEC(syscall)) >>16; 1541 1542 /* XXX does this work? */ 1543 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL]; 1544 1545 /* transfer to user mode */ 1546 1547 _ucodesel = LSEL(LUCODE_SEL, SEL_UPL); 1548 _udatasel = LSEL(LUDATA_SEL, SEL_UPL); 1549 1550 /* setup proc 0's pcb */ 1551 proc0.p_addr->u_pcb.pcb_flags = 0; 1552 proc0.p_addr->u_pcb.pcb_cr3 = (int)IdlePTD; 1553#ifdef SMP 1554 proc0.p_addr->u_pcb.pcb_mpnest = 1; 1555#endif 1556#ifdef VM86 1557 proc0.p_addr->u_pcb.pcb_ext = 0; 1558#endif 1559} 1560 1561#if defined(I586_CPU) && !defined(NO_F00F_HACK) 1562void f00f_hack(void); 1563SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL); 1564 1565void 1566f00f_hack(void) { 1567 struct region_descriptor r_idt; 1568 unsigned char *tmp; 1569 int i; 1570 1571 if (!has_f00f_bug) 1572 return; 1573 1574 printf("Intel Pentium F00F detected, installing workaround\n"); 1575 1576 r_idt.rd_limit = sizeof(idt) - 1; 1577 1578 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2); 1579 if (tmp == 0) 1580 panic("kmem_alloc returned 0"); 1581 if (((unsigned int)tmp & (PAGE_SIZE-1)) != 0) 1582 panic("kmem_alloc returned non-page-aligned memory"); 1583 /* Put the first seven entries in the lower page */ 1584 t_idt = (struct gate_descriptor*)(tmp + PAGE_SIZE - (7*8)); 1585 bcopy(idt, t_idt, sizeof(idt)); 1586 r_idt.rd_base = (int)t_idt; 1587 lidt(&r_idt); 1588 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE, 1589 VM_PROT_READ, FALSE) != KERN_SUCCESS) 1590 panic("vm_map_protect failed"); 1591 return; 1592} 1593#endif /* defined(I586_CPU) && !NO_F00F_HACK */ 1594 1595int 1596ptrace_set_pc(p, addr) 1597 struct proc *p; 1598 unsigned int addr; 1599{ 1600 p->p_md.md_regs->tf_eip = addr; 1601 return (0); 1602} 1603 1604int 1605ptrace_single_step(p) 1606 struct proc *p; 1607{ 1608 p->p_md.md_regs->tf_eflags |= PSL_T; 1609 return (0); 1610} 1611 1612int ptrace_write_u(p, off, data) 1613 struct proc *p; 1614 vm_offset_t off; 1615 int data; 1616{ 1617 struct trapframe frame_copy; 1618 vm_offset_t min; 1619 struct trapframe *tp; 1620 1621 /* 1622 * Privileged kernel state is scattered all over the user area. 1623 * Only allow write access to parts of regs and to fpregs. 1624 */ 1625 min = (char *)p->p_md.md_regs - (char *)p->p_addr; 1626 if (off >= min && off <= min + sizeof(struct trapframe) - sizeof(int)) { 1627 tp = p->p_md.md_regs; 1628 frame_copy = *tp; 1629 *(int *)((char *)&frame_copy + (off - min)) = data; 1630 if (!EFLAGS_SECURE(frame_copy.tf_eflags, tp->tf_eflags) || 1631 !CS_SECURE(frame_copy.tf_cs)) 1632 return (EINVAL); 1633 *(int*)((char *)p->p_addr + off) = data; 1634 return (0); 1635 } 1636 min = offsetof(struct user, u_pcb) + offsetof(struct pcb, pcb_savefpu); 1637 if (off >= min && off <= min + sizeof(struct save87) - sizeof(int)) { 1638 *(int*)((char *)p->p_addr + off) = data; 1639 return (0); 1640 } 1641 return (EFAULT); 1642} 1643 1644int 1645fill_regs(p, regs) 1646 struct proc *p; 1647 struct reg *regs; 1648{ 1649 struct pcb *pcb; 1650 struct trapframe *tp; 1651 1652 tp = p->p_md.md_regs; 1653 regs->r_es = tp->tf_es; 1654 regs->r_ds = tp->tf_ds; 1655 regs->r_edi = tp->tf_edi; 1656 regs->r_esi = tp->tf_esi; 1657 regs->r_ebp = tp->tf_ebp; 1658 regs->r_ebx = tp->tf_ebx; 1659 regs->r_edx = tp->tf_edx; 1660 regs->r_ecx = tp->tf_ecx; 1661 regs->r_eax = tp->tf_eax; 1662 regs->r_eip = tp->tf_eip; 1663 regs->r_cs = tp->tf_cs; 1664 regs->r_eflags = tp->tf_eflags; 1665 regs->r_esp = tp->tf_esp; 1666 regs->r_ss = tp->tf_ss; 1667 pcb = &p->p_addr->u_pcb; 1668 regs->r_fs = pcb->pcb_fs; 1669 regs->r_gs = pcb->pcb_gs; 1670 return (0); 1671} 1672 1673int 1674set_regs(p, regs) 1675 struct proc *p; 1676 struct reg *regs; 1677{ 1678 struct pcb *pcb; 1679 struct trapframe *tp; 1680 1681 tp = p->p_md.md_regs; 1682 if (!EFLAGS_SECURE(regs->r_eflags, tp->tf_eflags) || 1683 !CS_SECURE(regs->r_cs)) 1684 return (EINVAL); 1685 tp->tf_es = regs->r_es; 1686 tp->tf_ds = regs->r_ds; 1687 tp->tf_edi = regs->r_edi; 1688 tp->tf_esi = regs->r_esi; 1689 tp->tf_ebp = regs->r_ebp; 1690 tp->tf_ebx = regs->r_ebx; 1691 tp->tf_edx = regs->r_edx; 1692 tp->tf_ecx = regs->r_ecx; 1693 tp->tf_eax = regs->r_eax; 1694 tp->tf_eip = regs->r_eip; 1695 tp->tf_cs = regs->r_cs; 1696 tp->tf_eflags = regs->r_eflags; 1697 tp->tf_esp = regs->r_esp; 1698 tp->tf_ss = regs->r_ss; 1699 pcb = &p->p_addr->u_pcb; 1700 pcb->pcb_fs = regs->r_fs; 1701 pcb->pcb_gs = regs->r_gs; 1702 return (0); 1703} 1704 1705#ifndef DDB 1706void 1707Debugger(const char *msg) 1708{ 1709 printf("Debugger(\"%s\") called.\n", msg); 1710} 1711#endif /* no DDB */ 1712 1713#include <sys/disklabel.h> 1714 1715/* 1716 * Determine the size of the transfer, and make sure it is 1717 * within the boundaries of the partition. Adjust transfer 1718 * if needed, and signal errors or early completion. 1719 */ 1720int 1721bounds_check_with_label(struct buf *bp, struct disklabel *lp, int wlabel) 1722{ 1723 struct partition *p = lp->d_partitions + dkpart(bp->b_dev); 1724 int labelsect = lp->d_partitions[0].p_offset; 1725 int maxsz = p->p_size, 1726 sz = (bp->b_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT; 1727 1728 /* overwriting disk label ? */ 1729 /* XXX should also protect bootstrap in first 8K */ 1730 if (bp->b_blkno + p->p_offset <= LABELSECTOR + labelsect && 1731#if LABELSECTOR != 0 1732 bp->b_blkno + p->p_offset + sz > LABELSECTOR + labelsect && 1733#endif 1734 (bp->b_flags & B_READ) == 0 && wlabel == 0) { 1735 bp->b_error = EROFS; 1736 goto bad; 1737 } 1738 1739#if defined(DOSBBSECTOR) && defined(notyet) 1740 /* overwriting master boot record? */ 1741 if (bp->b_blkno + p->p_offset <= DOSBBSECTOR && 1742 (bp->b_flags & B_READ) == 0 && wlabel == 0) { 1743 bp->b_error = EROFS; 1744 goto bad; 1745 } 1746#endif 1747 1748 /* beyond partition? */ 1749 if (bp->b_blkno < 0 || bp->b_blkno + sz > maxsz) { 1750 /* if exactly at end of disk, return an EOF */ 1751 if (bp->b_blkno == maxsz) { 1752 bp->b_resid = bp->b_bcount; 1753 return(0); 1754 } 1755 /* or truncate if part of it fits */ 1756 sz = maxsz - bp->b_blkno; 1757 if (sz <= 0) { 1758 bp->b_error = EINVAL; 1759 goto bad; 1760 } 1761 bp->b_bcount = sz << DEV_BSHIFT; 1762 } 1763 1764 bp->b_pblkno = bp->b_blkno + p->p_offset; 1765 return(1); 1766 1767bad: 1768 bp->b_flags |= B_ERROR; 1769 return(-1); 1770} 1771 1772#ifdef DDB 1773 1774/* 1775 * Provide inb() and outb() as functions. They are normally only 1776 * available as macros calling inlined functions, thus cannot be 1777 * called inside DDB. 1778 * 1779 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 1780 */ 1781 1782#undef inb 1783#undef outb 1784 1785/* silence compiler warnings */ 1786u_char inb(u_int); 1787void outb(u_int, u_char); 1788 1789u_char 1790inb(u_int port) 1791{ 1792 u_char data; 1793 /* 1794 * We use %%dx and not %1 here because i/o is done at %dx and not at 1795 * %edx, while gcc generates inferior code (movw instead of movl) 1796 * if we tell it to load (u_short) port. 1797 */ 1798 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 1799 return (data); 1800} 1801 1802void 1803outb(u_int port, u_char data) 1804{ 1805 u_char al; 1806 /* 1807 * Use an unnecessary assignment to help gcc's register allocator. 1808 * This make a large difference for gcc-1.40 and a tiny difference 1809 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 1810 * best results. gcc-2.6.0 can't handle this. 1811 */ 1812 al = data; 1813 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 1814} 1815 1816#endif /* DDB */ 1817