machdep.c revision 32464
1/*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 38 * $Id: machdep.c,v 1.280 1997/12/27 02:28:27 peter Exp $ 39 */ 40 41#include "apm.h" 42#include "npx.h" 43#include "opt_bounce.h" 44#include "opt_cpu.h" 45#include "opt_ddb.h" 46#include "opt_maxmem.h" 47#include "opt_perfmon.h" 48#include "opt_smp.h" 49#include "opt_sysvipc.h" 50#include "opt_user_ldt.h" 51#include "opt_userconfig.h" 52#include "opt_vm86.h" 53 54#include <sys/param.h> 55#include <sys/systm.h> 56#include <sys/sysproto.h> 57#include <sys/signalvar.h> 58#include <sys/kernel.h> 59#include <sys/proc.h> 60#include <sys/buf.h> 61#include <sys/reboot.h> 62#include <sys/conf.h> 63#include <sys/callout.h> 64#include <sys/malloc.h> 65#include <sys/mbuf.h> 66#include <sys/msgbuf.h> 67#include <sys/sysent.h> 68#include <sys/sysctl.h> 69#include <sys/vmmeter.h> 70 71#ifdef SYSVSHM 72#include <sys/shm.h> 73#endif 74 75#ifdef SYSVMSG 76#include <sys/msg.h> 77#endif 78 79#ifdef SYSVSEM 80#include <sys/sem.h> 81#endif 82 83#include <vm/vm.h> 84#include <vm/vm_param.h> 85#include <vm/vm_prot.h> 86#include <sys/lock.h> 87#include <vm/vm_kern.h> 88#include <vm/vm_object.h> 89#include <vm/vm_page.h> 90#include <vm/vm_map.h> 91#include <vm/vm_pager.h> 92#include <vm/vm_extern.h> 93 94#include <sys/user.h> 95#include <sys/exec.h> 96 97#include <ddb/ddb.h> 98 99#include <net/netisr.h> 100 101#if NAPM > 0 102#include <machine/apm_bios.h> 103#endif 104#include <machine/cpu.h> 105#include <machine/reg.h> 106#include <machine/clock.h> 107#include <machine/specialreg.h> 108#include <machine/cons.h> 109#include <machine/bootinfo.h> 110#include <machine/ipl.h> 111#include <machine/md_var.h> 112#include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */ 113#ifdef SMP 114#include <machine/smp.h> 115#endif 116#ifdef PERFMON 117#include <machine/perfmon.h> 118#endif 119 120#include <i386/isa/isa_device.h> 121#include <i386/isa/intr_machdep.h> 122#include <i386/isa/rtc.h> 123#include <machine/random.h> 124 125extern void init386 __P((int first)); 126extern int ptrace_set_pc __P((struct proc *p, unsigned int addr)); 127extern int ptrace_single_step __P((struct proc *p)); 128extern int ptrace_write_u __P((struct proc *p, vm_offset_t off, int data)); 129extern void dblfault_handler __P((void)); 130 131extern void printcpuinfo(void); /* XXX header file */ 132extern void earlysetcpuclass(void); /* same header file */ 133extern void finishidentcpu(void); 134extern void panicifcpuunsupported(void); 135extern void initializecpu(void); 136 137static void cpu_startup __P((void *)); 138SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) 139 140static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf"); 141 142#ifdef BOUNCE_BUFFERS 143#ifdef BOUNCEPAGES 144int bouncepages = BOUNCEPAGES; 145#else 146int bouncepages = 0; 147#endif 148#endif /* BOUNCE_BUFFERS */ 149 150int msgbufmapped = 0; /* set when safe to use msgbuf */ 151int _udatasel, _ucodesel; 152u_int atdevbase; 153 154#if defined(SWTCH_OPTIM_STATS) 155extern int swtch_optim_stats; 156SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, 157 CTLFLAG_RD, &swtch_optim_stats, 0, ""); 158SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, 159 CTLFLAG_RD, &tlb_flush_count, 0, ""); 160#endif 161 162 163int physmem = 0; 164int cold = 1; 165 166static int 167sysctl_hw_physmem SYSCTL_HANDLER_ARGS 168{ 169 int error = sysctl_handle_int(oidp, 0, ctob(physmem), req); 170 return (error); 171} 172 173SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD, 174 0, 0, sysctl_hw_physmem, "I", ""); 175 176static int 177sysctl_hw_usermem SYSCTL_HANDLER_ARGS 178{ 179 int error = sysctl_handle_int(oidp, 0, 180 ctob(physmem - cnt.v_wire_count), req); 181 return (error); 182} 183 184SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD, 185 0, 0, sysctl_hw_usermem, "I", ""); 186 187int bootverbose = 0, Maxmem = 0; 188long dumplo; 189 190vm_offset_t phys_avail[10]; 191 192/* must be 2 less so 0 0 can signal end of chunks */ 193#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2) 194 195static void setup_netisrs __P((struct linker_set *)); /* XXX declare elsewhere */ 196 197static vm_offset_t buffer_sva, buffer_eva; 198vm_offset_t clean_sva, clean_eva; 199static vm_offset_t pager_sva, pager_eva; 200extern struct linker_set netisr_set; 201 202#define offsetof(type, member) ((size_t)(&((type *)0)->member)) 203 204static void 205cpu_startup(dummy) 206 void *dummy; 207{ 208 register unsigned i; 209 register caddr_t v; 210 vm_offset_t maxaddr; 211 vm_size_t size = 0; 212 int firstaddr; 213 vm_offset_t minaddr; 214 215 if (boothowto & RB_VERBOSE) 216 bootverbose++; 217 218 /* 219 * Good {morning,afternoon,evening,night}. 220 */ 221 printf(version); 222 earlysetcpuclass(); 223 startrtclock(); 224 printcpuinfo(); 225 panicifcpuunsupported(); 226#ifdef PERFMON 227 perfmon_init(); 228#endif 229 printf("real memory = %d (%dK bytes)\n", ptoa(Maxmem), ptoa(Maxmem) / 1024); 230 /* 231 * Display any holes after the first chunk of extended memory. 232 */ 233 if (bootverbose) { 234 int indx; 235 236 printf("Physical memory chunk(s):\n"); 237 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 238 int size1 = phys_avail[indx + 1] - phys_avail[indx]; 239 240 printf("0x%08lx - 0x%08lx, %d bytes (%d pages)\n", phys_avail[indx], 241 phys_avail[indx + 1] - 1, size1, size1 / PAGE_SIZE); 242 } 243 } 244 245 /* 246 * Quickly wire in netisrs. 247 */ 248 setup_netisrs(&netisr_set); 249 250 /* 251 * Calculate callout wheel size 252 */ 253 for (callwheelsize = 1, callwheelbits = 0; 254 callwheelsize < ncallout; 255 callwheelsize <<= 1, ++callwheelbits) 256 ; 257 callwheelmask = callwheelsize - 1; 258 259 /* 260 * Allocate space for system data structures. 261 * The first available kernel virtual address is in "v". 262 * As pages of kernel virtual memory are allocated, "v" is incremented. 263 * As pages of memory are allocated and cleared, 264 * "firstaddr" is incremented. 265 * An index into the kernel page table corresponding to the 266 * virtual memory address maintained in "v" is kept in "mapaddr". 267 */ 268 269 /* 270 * Make two passes. The first pass calculates how much memory is 271 * needed and allocates it. The second pass assigns virtual 272 * addresses to the various data structures. 273 */ 274 firstaddr = 0; 275again: 276 v = (caddr_t)firstaddr; 277 278#define valloc(name, type, num) \ 279 (name) = (type *)v; v = (caddr_t)((name)+(num)) 280#define valloclim(name, type, num, lim) \ 281 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num))) 282 valloc(callout, struct callout, ncallout); 283 valloc(callwheel, struct callout_tailq, callwheelsize); 284#ifdef SYSVSHM 285 valloc(shmsegs, struct shmid_ds, shminfo.shmmni); 286#endif 287#ifdef SYSVSEM 288 valloc(sema, struct semid_ds, seminfo.semmni); 289 valloc(sem, struct sem, seminfo.semmns); 290 /* This is pretty disgusting! */ 291 valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int)); 292#endif 293#ifdef SYSVMSG 294 valloc(msgpool, char, msginfo.msgmax); 295 valloc(msgmaps, struct msgmap, msginfo.msgseg); 296 valloc(msghdrs, struct msg, msginfo.msgtql); 297 valloc(msqids, struct msqid_ds, msginfo.msgmni); 298#endif 299 300 if (nbuf == 0) { 301 nbuf = 30; 302 if( physmem > 1024) 303 nbuf += min((physmem - 1024) / 8, 2048); 304 } 305 nswbuf = max(min(nbuf/4, 128), 16); 306 307 valloc(swbuf, struct buf, nswbuf); 308 valloc(buf, struct buf, nbuf); 309 310#ifdef BOUNCE_BUFFERS 311 /* 312 * If there is more than 16MB of memory, allocate some bounce buffers 313 */ 314 if (Maxmem > 4096) { 315 if (bouncepages == 0) { 316 bouncepages = 64; 317 bouncepages += ((Maxmem - 4096) / 2048) * 32; 318 if (bouncepages > 128) 319 bouncepages = 128; 320 } 321 v = (caddr_t)((vm_offset_t)round_page(v)); 322 valloc(bouncememory, char, bouncepages * PAGE_SIZE); 323 } 324#endif 325 326 /* 327 * End of first pass, size has been calculated so allocate memory 328 */ 329 if (firstaddr == 0) { 330 size = (vm_size_t)(v - firstaddr); 331 firstaddr = (int)kmem_alloc(kernel_map, round_page(size)); 332 if (firstaddr == 0) 333 panic("startup: no room for tables"); 334 goto again; 335 } 336 337 /* 338 * End of second pass, addresses have been assigned 339 */ 340 if ((vm_size_t)(v - firstaddr) != size) 341 panic("startup: table size inconsistency"); 342 343#ifdef BOUNCE_BUFFERS 344 clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva, 345 (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + 346 maxbkva + pager_map_size, TRUE); 347 io_map = kmem_suballoc(clean_map, &minaddr, &maxaddr, maxbkva, FALSE); 348#else 349 clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva, 350 (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size, TRUE); 351#endif 352 buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva, 353 (nbuf*BKVASIZE), TRUE); 354 pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva, 355 (nswbuf*MAXPHYS) + pager_map_size, TRUE); 356 pager_map->system_map = 1; 357 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 358 (16*(ARG_MAX+PAGE_SIZE)), TRUE); 359 u_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 360 (maxproc*UPAGES*PAGE_SIZE), FALSE); 361 362 /* 363 * Finally, allocate mbuf pool. Since mclrefcnt is an off-size 364 * we use the more space efficient malloc in place of kmem_alloc. 365 */ 366 { 367 vm_offset_t mb_map_size; 368 369 mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES; 370 mb_map_size = roundup2(mb_map_size, max(MCLBYTES, PAGE_SIZE)); 371 mclrefcnt = malloc(mb_map_size / MCLBYTES, M_MBUF, M_NOWAIT); 372 bzero(mclrefcnt, mb_map_size / MCLBYTES); 373 mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr, 374 mb_map_size, FALSE); 375 mb_map->system_map = 1; 376 } 377 378 /* 379 * Initialize callouts 380 */ 381 SLIST_INIT(&callfree); 382 for (i = 0; i < ncallout; i++) { 383 SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); 384 } 385 386 for (i = 0; i < callwheelsize; i++) { 387 TAILQ_INIT(&callwheel[i]); 388 } 389 390#if defined(USERCONFIG) 391#if defined(USERCONFIG_BOOT) 392 if (1) { 393#else 394 if (boothowto & RB_CONFIG) { 395#endif 396 userconfig(); 397 cninit(); /* the preferred console may have changed */ 398 } 399#endif 400 401#ifdef BOUNCE_BUFFERS 402 /* 403 * init bounce buffers 404 */ 405 vm_bounce_init(); 406#endif 407 408 printf("avail memory = %d (%dK bytes)\n", ptoa(cnt.v_free_count), 409 ptoa(cnt.v_free_count) / 1024); 410 411 /* 412 * Set up buffers, so they can be used to read disk labels. 413 */ 414 bufinit(); 415 vm_pager_bufferinit(); 416 417#ifdef SMP 418 /* 419 * OK, enough kmem_alloc/malloc state should be up, lets get on with it! 420 */ 421 mp_start(); /* fire up the APs and APICs */ 422 mp_announce(); 423#endif /* SMP */ 424} 425 426int 427register_netisr(num, handler) 428 int num; 429 netisr_t *handler; 430{ 431 432 if (num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs)) ) { 433 printf("register_netisr: bad isr number: %d\n", num); 434 return (EINVAL); 435 } 436 netisrs[num] = handler; 437 return (0); 438} 439 440static void 441setup_netisrs(ls) 442 struct linker_set *ls; 443{ 444 int i; 445 const struct netisrtab *nit; 446 447 for(i = 0; ls->ls_items[i]; i++) { 448 nit = (const struct netisrtab *)ls->ls_items[i]; 449 register_netisr(nit->nit_num, nit->nit_isr); 450 } 451} 452 453/* 454 * Send an interrupt to process. 455 * 456 * Stack is set up to allow sigcode stored 457 * at top to call routine, followed by kcall 458 * to sigreturn routine below. After sigreturn 459 * resets the signal mask, the stack, and the 460 * frame pointer, it returns to the user 461 * specified pc, psl. 462 */ 463void 464sendsig(catcher, sig, mask, code) 465 sig_t catcher; 466 int sig, mask; 467 u_long code; 468{ 469 register struct proc *p = curproc; 470 register struct trapframe *regs; 471 register struct sigframe *fp; 472 struct sigframe sf; 473 struct sigacts *psp = p->p_sigacts; 474 int oonstack; 475 476 regs = p->p_md.md_regs; 477 oonstack = psp->ps_sigstk.ss_flags & SS_ONSTACK; 478 /* 479 * Allocate and validate space for the signal handler context. 480 */ 481 if ((psp->ps_flags & SAS_ALTSTACK) && !oonstack && 482 (psp->ps_sigonstack & sigmask(sig))) { 483 fp = (struct sigframe *)(psp->ps_sigstk.ss_sp + 484 psp->ps_sigstk.ss_size - sizeof(struct sigframe)); 485 psp->ps_sigstk.ss_flags |= SS_ONSTACK; 486 } else { 487 fp = (struct sigframe *)regs->tf_esp - 1; 488 } 489 490 /* 491 * grow() will return FALSE if the fp will not fit inside the stack 492 * and the stack can not be grown. useracc will return FALSE 493 * if access is denied. 494 */ 495 if ((grow(p, (int)fp) == FALSE) || 496 (useracc((caddr_t)fp, sizeof(struct sigframe), B_WRITE) == FALSE)) { 497 /* 498 * Process has trashed its stack; give it an illegal 499 * instruction to halt it in its tracks. 500 */ 501 SIGACTION(p, SIGILL) = SIG_DFL; 502 sig = sigmask(SIGILL); 503 p->p_sigignore &= ~sig; 504 p->p_sigcatch &= ~sig; 505 p->p_sigmask &= ~sig; 506 psignal(p, SIGILL); 507 return; 508 } 509 510 /* 511 * Build the argument list for the signal handler. 512 */ 513 if (p->p_sysent->sv_sigtbl) { 514 if (sig < p->p_sysent->sv_sigsize) 515 sig = p->p_sysent->sv_sigtbl[sig]; 516 else 517 sig = p->p_sysent->sv_sigsize + 1; 518 } 519 sf.sf_signum = sig; 520 sf.sf_code = code; 521 sf.sf_scp = &fp->sf_sc; 522 sf.sf_addr = (char *) regs->tf_err; 523 sf.sf_handler = catcher; 524 525 /* save scratch registers */ 526 sf.sf_sc.sc_eax = regs->tf_eax; 527 sf.sf_sc.sc_ebx = regs->tf_ebx; 528 sf.sf_sc.sc_ecx = regs->tf_ecx; 529 sf.sf_sc.sc_edx = regs->tf_edx; 530 sf.sf_sc.sc_esi = regs->tf_esi; 531 sf.sf_sc.sc_edi = regs->tf_edi; 532 sf.sf_sc.sc_cs = regs->tf_cs; 533 sf.sf_sc.sc_ds = regs->tf_ds; 534 sf.sf_sc.sc_ss = regs->tf_ss; 535 sf.sf_sc.sc_es = regs->tf_es; 536 sf.sf_sc.sc_isp = regs->tf_isp; 537 538 /* 539 * Build the signal context to be used by sigreturn. 540 */ 541 sf.sf_sc.sc_onstack = oonstack; 542 sf.sf_sc.sc_mask = mask; 543 sf.sf_sc.sc_sp = regs->tf_esp; 544 sf.sf_sc.sc_fp = regs->tf_ebp; 545 sf.sf_sc.sc_pc = regs->tf_eip; 546 sf.sf_sc.sc_ps = regs->tf_eflags; 547 sf.sf_sc.sc_trapno = regs->tf_trapno; 548 sf.sf_sc.sc_err = regs->tf_err; 549 550 /* 551 * If we're a vm86 process, we want to save the segment registers. 552 * We also change eflags to be our emulated eflags, not the actual 553 * eflags. 554 */ 555 if (regs->tf_eflags & PSL_VM) { 556 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 557 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 558 559 sf.sf_sc.sc_gs = tf->tf_vm86_gs; 560 sf.sf_sc.sc_fs = tf->tf_vm86_fs; 561 sf.sf_sc.sc_es = tf->tf_vm86_es; 562 sf.sf_sc.sc_ds = tf->tf_vm86_ds; 563 564 if (vm86->vm86_has_vme == 0) 565 sf.sf_sc.sc_ps = (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) 566 | (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 567 568 /* 569 * We should never have PSL_T set when returning from vm86 570 * mode. It may be set here if we deliver a signal before 571 * getting to vm86 mode, so turn it off. 572 */ 573 tf->tf_eflags &= ~(PSL_VM | PSL_T | PSL_VIF | PSL_VIP); 574 } 575 576 /* 577 * Copy the sigframe out to the user's stack. 578 */ 579 if (copyout(&sf, fp, sizeof(struct sigframe)) != 0) { 580 /* 581 * Something is wrong with the stack pointer. 582 * ...Kill the process. 583 */ 584 sigexit(p, SIGILL); 585 } 586 587 regs->tf_esp = (int)fp; 588 regs->tf_eip = (int)(((char *)PS_STRINGS) - *(p->p_sysent->sv_szsigcode)); 589 regs->tf_cs = _ucodesel; 590 regs->tf_ds = _udatasel; 591 regs->tf_es = _udatasel; 592 regs->tf_ss = _udatasel; 593} 594 595/* 596 * System call to cleanup state after a signal 597 * has been taken. Reset signal mask and 598 * stack state from context left by sendsig (above). 599 * Return to previous pc and psl as specified by 600 * context left by sendsig. Check carefully to 601 * make sure that the user has not modified the 602 * state to gain improper privileges. 603 */ 604int 605sigreturn(p, uap) 606 struct proc *p; 607 struct sigreturn_args /* { 608 struct sigcontext *sigcntxp; 609 } */ *uap; 610{ 611 register struct sigcontext *scp; 612 register struct sigframe *fp; 613 register struct trapframe *regs = p->p_md.md_regs; 614 int eflags; 615 616 /* 617 * (XXX old comment) regs->tf_esp points to the return address. 618 * The user scp pointer is above that. 619 * The return address is faked in the signal trampoline code 620 * for consistency. 621 */ 622 scp = uap->sigcntxp; 623 fp = (struct sigframe *) 624 ((caddr_t)scp - offsetof(struct sigframe, sf_sc)); 625 626 if (useracc((caddr_t)fp, sizeof (*fp), B_WRITE) == 0) 627 return(EFAULT); 628 629 eflags = scp->sc_ps; 630 if (eflags & PSL_VM) { 631 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 632 struct vm86_kernel *vm86; 633 634 /* 635 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 636 * set up the vm86 area, and we can't enter vm86 mode. 637 */ 638 if (p->p_addr->u_pcb.pcb_ext == 0) 639 return (EINVAL); 640 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 641 if (vm86->vm86_inited == 0) 642 return (EINVAL); 643 644 /* go back to user mode if both flags are set */ 645 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 646 trapsignal(p, SIGBUS, 0); 647 648#define VM_USERCHANGE (PSL_USERCHANGE | PSL_RF) 649#define VME_USERCHANGE (VM_USERCHANGE | PSL_VIP | PSL_VIF) 650 if (vm86->vm86_has_vme) { 651 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 652 (eflags & VME_USERCHANGE) | PSL_VM; 653 } else { 654 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 655 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 656 } 657 tf->tf_vm86_ds = scp->sc_ds; 658 tf->tf_vm86_es = scp->sc_es; 659 tf->tf_vm86_fs = scp->sc_fs; 660 tf->tf_vm86_gs = scp->sc_gs; 661 tf->tf_ds = _udatasel; 662 tf->tf_es = _udatasel; 663 } else { 664 /* 665 * Don't allow users to change privileged or reserved flags. 666 */ 667#define EFLAGS_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 668 /* 669 * XXX do allow users to change the privileged flag PSL_RF. 670 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 671 * should sometimes set it there too. tf_eflags is kept in 672 * the signal context during signal handling and there is no 673 * other place to remember it, so the PSL_RF bit may be 674 * corrupted by the signal handler without us knowing. 675 * Corruption of the PSL_RF bit at worst causes one more or 676 * one less debugger trap, so allowing it is fairly harmless. 677 */ 678 if (!EFLAGS_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 679#ifdef DEBUG 680 printf("sigreturn: eflags = 0x%x\n", eflags); 681#endif 682 return(EINVAL); 683 } 684 685 /* 686 * Don't allow users to load a valid privileged %cs. Let the 687 * hardware check for invalid selectors, excess privilege in 688 * other selectors, invalid %eip's and invalid %esp's. 689 */ 690#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 691 if (!CS_SECURE(scp->sc_cs)) { 692#ifdef DEBUG 693 printf("sigreturn: cs = 0x%x\n", scp->sc_cs); 694#endif 695 trapsignal(p, SIGBUS, T_PROTFLT); 696 return(EINVAL); 697 } 698 regs->tf_ds = scp->sc_ds; 699 regs->tf_es = scp->sc_es; 700 } 701 /* restore scratch registers */ 702 regs->tf_eax = scp->sc_eax; 703 regs->tf_ebx = scp->sc_ebx; 704 regs->tf_ecx = scp->sc_ecx; 705 regs->tf_edx = scp->sc_edx; 706 regs->tf_esi = scp->sc_esi; 707 regs->tf_edi = scp->sc_edi; 708 regs->tf_cs = scp->sc_cs; 709 regs->tf_ss = scp->sc_ss; 710 regs->tf_isp = scp->sc_isp; 711 712 if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0) 713 return(EINVAL); 714 715 if (scp->sc_onstack & 01) 716 p->p_sigacts->ps_sigstk.ss_flags |= SS_ONSTACK; 717 else 718 p->p_sigacts->ps_sigstk.ss_flags &= ~SS_ONSTACK; 719 p->p_sigmask = scp->sc_mask & ~sigcantmask; 720 regs->tf_ebp = scp->sc_fp; 721 regs->tf_esp = scp->sc_sp; 722 regs->tf_eip = scp->sc_pc; 723 regs->tf_eflags = eflags; 724 return(EJUSTRETURN); 725} 726 727/* 728 * Machine dependent boot() routine 729 * 730 * I haven't seen anything to put here yet 731 * Possibly some stuff might be grafted back here from boot() 732 */ 733void 734cpu_boot(int howto) 735{ 736} 737 738/* 739 * Shutdown the CPU as much as possible 740 */ 741void 742cpu_halt(void) 743{ 744 for (;;) 745 __asm__ ("hlt"); 746} 747 748/* 749 * Turn the power off. 750 */ 751void 752cpu_power_down(void) 753{ 754#if NAPM > 0 755 apm_power_off(); 756#endif 757} 758 759/* 760 * Clear registers on exec 761 */ 762void 763setregs(p, entry, stack) 764 struct proc *p; 765 u_long entry; 766 u_long stack; 767{ 768 struct trapframe *regs = p->p_md.md_regs; 769 770#ifdef USER_LDT 771 struct pcb *pcb = &p->p_addr->u_pcb; 772 773 /* was i386_user_cleanup() in NetBSD */ 774 if (pcb->pcb_ldt) { 775 if (pcb == curpcb) 776 lldt(GSEL(GUSERLDT_SEL, SEL_KPL)); 777 kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ldt, 778 pcb->pcb_ldt_len * sizeof(union descriptor)); 779 pcb->pcb_ldt_len = (int)pcb->pcb_ldt = 0; 780 } 781#endif 782 783 bzero((char *)regs, sizeof(struct trapframe)); 784 regs->tf_eip = entry; 785 regs->tf_esp = stack; 786 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T); 787 regs->tf_ss = _udatasel; 788 regs->tf_ds = _udatasel; 789 regs->tf_es = _udatasel; 790 regs->tf_cs = _ucodesel; 791 792 /* 793 * Initialize the math emulator (if any) for the current process. 794 * Actually, just clear the bit that says that the emulator has 795 * been initialized. Initialization is delayed until the process 796 * traps to the emulator (if it is done at all) mainly because 797 * emulators don't provide an entry point for initialization. 798 */ 799 p->p_addr->u_pcb.pcb_flags &= ~FP_SOFTFP; 800 801 /* 802 * Arrange to trap the next npx or `fwait' instruction (see npx.c 803 * for why fwait must be trapped at least if there is an npx or an 804 * emulator). This is mainly to handle the case where npx0 is not 805 * configured, since the npx routines normally set up the trap 806 * otherwise. It should be done only at boot time, but doing it 807 * here allows modifying `npx_exists' for testing the emulator on 808 * systems with an npx. 809 */ 810 load_cr0(rcr0() | CR0_MP | CR0_TS); 811 812#if NNPX > 0 813 /* Initialize the npx (if any) for the current process. */ 814 npxinit(__INITIAL_NPXCW__); 815#endif 816} 817 818static int 819sysctl_machdep_adjkerntz SYSCTL_HANDLER_ARGS 820{ 821 int error; 822 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 823 req); 824 if (!error && req->newptr) 825 resettodr(); 826 return (error); 827} 828 829SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 830 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 831 832SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, 833 CTLFLAG_RW, &disable_rtc_set, 0, ""); 834 835SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo, 836 CTLFLAG_RD, &bootinfo, bootinfo, ""); 837 838SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, 839 CTLFLAG_RW, &wall_cmos_clock, 0, ""); 840 841/* 842 * Initialize 386 and configure to run kernel 843 */ 844 845/* 846 * Initialize segments & interrupt table 847 */ 848 849int currentldt; 850int _default_ldt; 851#ifdef SMP 852union descriptor gdt[NGDT + NCPU]; /* global descriptor table */ 853#else 854union descriptor gdt[NGDT]; /* global descriptor table */ 855#endif 856struct gate_descriptor idt[NIDT]; /* interrupt descriptor table */ 857union descriptor ldt[NLDT]; /* local descriptor table */ 858#ifdef SMP 859/* table descriptors - used to load tables by microp */ 860struct region_descriptor r_gdt, r_idt; 861#endif 862 863#ifdef SMP 864extern struct i386tss common_tss; /* One tss per cpu */ 865#ifdef VM86 866extern struct segment_descriptor common_tssd; 867extern int private_tss; 868extern u_int my_tr; 869#endif /* VM86 */ 870#else 871struct i386tss common_tss; 872#ifdef VM86 873struct segment_descriptor common_tssd; 874u_int private_tss; /* flag indicating private tss */ 875u_int my_tr; /* which task register setting */ 876#endif /* VM86 */ 877#endif 878 879#if defined(I586_CPU) && !defined(NO_F00F_HACK) 880struct gate_descriptor *t_idt; 881extern int has_f00f_bug; 882#endif 883 884static struct i386tss dblfault_tss; 885static char dblfault_stack[PAGE_SIZE]; 886 887extern struct user *proc0paddr; 888 889 890/* software prototypes -- in more palatable form */ 891struct soft_segment_descriptor gdt_segs[ 892#ifdef SMP 893 NGDT + NCPU 894#endif 895 ] = { 896/* GNULL_SEL 0 Null Descriptor */ 897{ 0x0, /* segment base address */ 898 0x0, /* length */ 899 0, /* segment type */ 900 0, /* segment descriptor priority level */ 901 0, /* segment descriptor present */ 902 0, 0, 903 0, /* default 32 vs 16 bit size */ 904 0 /* limit granularity (byte/page units)*/ }, 905/* GCODE_SEL 1 Code Descriptor for kernel */ 906{ 0x0, /* segment base address */ 907 0xfffff, /* length - all address space */ 908 SDT_MEMERA, /* segment type */ 909 0, /* segment descriptor priority level */ 910 1, /* segment descriptor present */ 911 0, 0, 912 1, /* default 32 vs 16 bit size */ 913 1 /* limit granularity (byte/page units)*/ }, 914/* GDATA_SEL 2 Data Descriptor for kernel */ 915{ 0x0, /* segment base address */ 916 0xfffff, /* length - all address space */ 917 SDT_MEMRWA, /* segment type */ 918 0, /* segment descriptor priority level */ 919 1, /* segment descriptor present */ 920 0, 0, 921 1, /* default 32 vs 16 bit size */ 922 1 /* limit granularity (byte/page units)*/ }, 923/* GLDT_SEL 3 LDT Descriptor */ 924{ (int) ldt, /* segment base address */ 925 sizeof(ldt)-1, /* length - all address space */ 926 SDT_SYSLDT, /* segment type */ 927 SEL_UPL, /* segment descriptor priority level */ 928 1, /* segment descriptor present */ 929 0, 0, 930 0, /* unused - default 32 vs 16 bit size */ 931 0 /* limit granularity (byte/page units)*/ }, 932/* GTGATE_SEL 4 Null Descriptor - Placeholder */ 933{ 0x0, /* segment base address */ 934 0x0, /* length - all address space */ 935 0, /* segment type */ 936 0, /* segment descriptor priority level */ 937 0, /* segment descriptor present */ 938 0, 0, 939 0, /* default 32 vs 16 bit size */ 940 0 /* limit granularity (byte/page units)*/ }, 941/* GPANIC_SEL 5 Panic Tss Descriptor */ 942{ (int) &dblfault_tss, /* segment base address */ 943 sizeof(struct i386tss)-1,/* length - all address space */ 944 SDT_SYS386TSS, /* segment type */ 945 0, /* segment descriptor priority level */ 946 1, /* segment descriptor present */ 947 0, 0, 948 0, /* unused - default 32 vs 16 bit size */ 949 0 /* limit granularity (byte/page units)*/ }, 950/* GPROC0_SEL 6 Proc 0 Tss Descriptor */ 951{ 952 (int) &common_tss, /* segment base address */ 953 sizeof(struct i386tss)-1,/* length - all address space */ 954 SDT_SYS386TSS, /* segment type */ 955 0, /* segment descriptor priority level */ 956 1, /* segment descriptor present */ 957 0, 0, 958 0, /* unused - default 32 vs 16 bit size */ 959 0 /* limit granularity (byte/page units)*/ }, 960/* GUSERLDT_SEL 7 User LDT Descriptor per process */ 961{ (int) ldt, /* segment base address */ 962 (512 * sizeof(union descriptor)-1), /* length */ 963 SDT_SYSLDT, /* segment type */ 964 0, /* segment descriptor priority level */ 965 1, /* segment descriptor present */ 966 0, 0, 967 0, /* unused - default 32 vs 16 bit size */ 968 0 /* limit granularity (byte/page units)*/ }, 969/* GAPMCODE32_SEL 8 APM BIOS 32-bit interface (32bit Code) */ 970{ 0, /* segment base address (overwritten by APM) */ 971 0xfffff, /* length */ 972 SDT_MEMERA, /* segment type */ 973 0, /* segment descriptor priority level */ 974 1, /* segment descriptor present */ 975 0, 0, 976 1, /* default 32 vs 16 bit size */ 977 1 /* limit granularity (byte/page units)*/ }, 978/* GAPMCODE16_SEL 9 APM BIOS 32-bit interface (16bit Code) */ 979{ 0, /* segment base address (overwritten by APM) */ 980 0xfffff, /* length */ 981 SDT_MEMERA, /* segment type */ 982 0, /* segment descriptor priority level */ 983 1, /* segment descriptor present */ 984 0, 0, 985 0, /* default 32 vs 16 bit size */ 986 1 /* limit granularity (byte/page units)*/ }, 987/* GAPMDATA_SEL 10 APM BIOS 32-bit interface (Data) */ 988{ 0, /* segment base address (overwritten by APM) */ 989 0xfffff, /* length */ 990 SDT_MEMRWA, /* segment type */ 991 0, /* segment descriptor priority level */ 992 1, /* segment descriptor present */ 993 0, 0, 994 1, /* default 32 vs 16 bit size */ 995 1 /* limit granularity (byte/page units)*/ }, 996}; 997 998static struct soft_segment_descriptor ldt_segs[] = { 999 /* Null Descriptor - overwritten by call gate */ 1000{ 0x0, /* segment base address */ 1001 0x0, /* length - all address space */ 1002 0, /* segment type */ 1003 0, /* segment descriptor priority level */ 1004 0, /* segment descriptor present */ 1005 0, 0, 1006 0, /* default 32 vs 16 bit size */ 1007 0 /* limit granularity (byte/page units)*/ }, 1008 /* Null Descriptor - overwritten by call gate */ 1009{ 0x0, /* segment base address */ 1010 0x0, /* length - all address space */ 1011 0, /* segment type */ 1012 0, /* segment descriptor priority level */ 1013 0, /* segment descriptor present */ 1014 0, 0, 1015 0, /* default 32 vs 16 bit size */ 1016 0 /* limit granularity (byte/page units)*/ }, 1017 /* Null Descriptor - overwritten by call gate */ 1018{ 0x0, /* segment base address */ 1019 0x0, /* length - all address space */ 1020 0, /* segment type */ 1021 0, /* segment descriptor priority level */ 1022 0, /* segment descriptor present */ 1023 0, 0, 1024 0, /* default 32 vs 16 bit size */ 1025 0 /* limit granularity (byte/page units)*/ }, 1026 /* Code Descriptor for user */ 1027{ 0x0, /* segment base address */ 1028 0xfffff, /* length - all address space */ 1029 SDT_MEMERA, /* segment type */ 1030 SEL_UPL, /* segment descriptor priority level */ 1031 1, /* segment descriptor present */ 1032 0, 0, 1033 1, /* default 32 vs 16 bit size */ 1034 1 /* limit granularity (byte/page units)*/ }, 1035 /* Data Descriptor for user */ 1036{ 0x0, /* segment base address */ 1037 0xfffff, /* length - all address space */ 1038 SDT_MEMRWA, /* segment type */ 1039 SEL_UPL, /* segment descriptor priority level */ 1040 1, /* segment descriptor present */ 1041 0, 0, 1042 1, /* default 32 vs 16 bit size */ 1043 1 /* limit granularity (byte/page units)*/ }, 1044}; 1045 1046void 1047setidt(idx, func, typ, dpl, selec) 1048 int idx; 1049 inthand_t *func; 1050 int typ; 1051 int dpl; 1052 int selec; 1053{ 1054 struct gate_descriptor *ip = idt + idx; 1055 1056 ip->gd_looffset = (int)func; 1057 ip->gd_selector = selec; 1058 ip->gd_stkcpy = 0; 1059 ip->gd_xx = 0; 1060 ip->gd_type = typ; 1061 ip->gd_dpl = dpl; 1062 ip->gd_p = 1; 1063 ip->gd_hioffset = ((int)func)>>16 ; 1064} 1065 1066#define IDTVEC(name) __CONCAT(X,name) 1067 1068extern inthand_t 1069 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 1070 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 1071 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 1072 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 1073 IDTVEC(syscall), IDTVEC(int0x80_syscall); 1074 1075void 1076sdtossd(sd, ssd) 1077 struct segment_descriptor *sd; 1078 struct soft_segment_descriptor *ssd; 1079{ 1080 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 1081 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 1082 ssd->ssd_type = sd->sd_type; 1083 ssd->ssd_dpl = sd->sd_dpl; 1084 ssd->ssd_p = sd->sd_p; 1085 ssd->ssd_def32 = sd->sd_def32; 1086 ssd->ssd_gran = sd->sd_gran; 1087} 1088 1089void 1090init386(first) 1091 int first; 1092{ 1093 int x; 1094 unsigned biosbasemem, biosextmem; 1095 struct gate_descriptor *gdp; 1096 int gsel_tss; 1097 1098 struct isa_device *idp; 1099#ifndef SMP 1100 /* table descriptors - used to load tables by microp */ 1101 struct region_descriptor r_gdt, r_idt; 1102#endif 1103 int pagesinbase, pagesinext; 1104 int target_page, pa_indx; 1105 int off; 1106 int speculative_mprobe; 1107 1108 /* 1109 * Prevent lowering of the ipl if we call tsleep() early. 1110 */ 1111 safepri = cpl; 1112 1113 proc0.p_addr = proc0paddr; 1114 1115 atdevbase = ISA_HOLE_START + KERNBASE; 1116 1117 /* 1118 * Initialize the console before we print anything out. 1119 */ 1120 cninit(); 1121 1122 /* 1123 * make gdt memory segments, the code segment goes up to end of the 1124 * page with etext in it, the data segment goes to the end of 1125 * the address space 1126 */ 1127 /* 1128 * XXX text protection is temporarily (?) disabled. The limit was 1129 * i386_btop(round_page(etext)) - 1. 1130 */ 1131 gdt_segs[GCODE_SEL].ssd_limit = i386_btop(0) - 1; 1132 gdt_segs[GDATA_SEL].ssd_limit = i386_btop(0) - 1; 1133#ifdef BDE_DEBUGGER 1134#define NGDT1 8 /* avoid overwriting db entries with APM ones */ 1135#else 1136#define NGDT1 (sizeof gdt_segs / sizeof gdt_segs[0]) 1137#endif 1138 for (x = 0; x < NGDT1; x++) 1139 ssdtosd(&gdt_segs[x], &gdt[x].sd); 1140#ifdef VM86 1141 common_tssd = gdt[GPROC0_SEL].sd; 1142#endif /* VM86 */ 1143 1144#ifdef SMP 1145 /* 1146 * Spin these up now. init_secondary() grabs them. We could use 1147 * #for(x,y,z) / #endfor cpp directives if they existed. 1148 */ 1149 for (x = 0; x < NCPU; x++) { 1150 gdt_segs[NGDT + x] = gdt_segs[GPROC0_SEL]; 1151 ssdtosd(&gdt_segs[NGDT + x], &gdt[NGDT + x].sd); 1152 } 1153#endif 1154 1155 /* make ldt memory segments */ 1156 /* 1157 * The data segment limit must not cover the user area because we 1158 * don't want the user area to be writable in copyout() etc. (page 1159 * level protection is lost in kernel mode on 386's). Also, we 1160 * don't want the user area to be writable directly (page level 1161 * protection of the user area is not available on 486's with 1162 * CR0_WP set, because there is no user-read/kernel-write mode). 1163 * 1164 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it 1165 * should be spelled ...MAX_USER... 1166 */ 1167#define VM_END_USER_RW_ADDRESS VM_MAXUSER_ADDRESS 1168 /* 1169 * The code segment limit has to cover the user area until we move 1170 * the signal trampoline out of the user area. This is safe because 1171 * the code segment cannot be written to directly. 1172 */ 1173#define VM_END_USER_R_ADDRESS (VM_END_USER_RW_ADDRESS + UPAGES * PAGE_SIZE) 1174 ldt_segs[LUCODE_SEL].ssd_limit = i386_btop(VM_END_USER_R_ADDRESS) - 1; 1175 ldt_segs[LUDATA_SEL].ssd_limit = i386_btop(VM_END_USER_RW_ADDRESS) - 1; 1176 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++) 1177 ssdtosd(&ldt_segs[x], &ldt[x].sd); 1178 1179 /* exceptions */ 1180 for (x = 0; x < NIDT; x++) 1181 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1182 setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1183 setidt(1, &IDTVEC(dbg), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1184 setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1185 setidt(3, &IDTVEC(bpt), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1186 setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1187 setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1188 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1189 setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1190 setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL)); 1191 setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1192 setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1193 setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1194 setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1195 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1196 setidt(14, &IDTVEC(page), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1197 setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1198 setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1199 setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1200 setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1201 setidt(0x80, &IDTVEC(int0x80_syscall), 1202 SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1203 1204#include "isa.h" 1205#if NISA >0 1206 isa_defaultirq(); 1207#endif 1208 rand_initialize(); 1209 1210 r_gdt.rd_limit = sizeof(gdt) - 1; 1211 r_gdt.rd_base = (int) gdt; 1212 lgdt(&r_gdt); 1213 1214 r_idt.rd_limit = sizeof(idt) - 1; 1215 r_idt.rd_base = (int) idt; 1216 lidt(&r_idt); 1217 1218 _default_ldt = GSEL(GLDT_SEL, SEL_KPL); 1219 lldt(_default_ldt); 1220 currentldt = _default_ldt; 1221 1222#ifdef DDB 1223 kdb_init(); 1224 if (boothowto & RB_KDB) 1225 Debugger("Boot flags requested debugger"); 1226#endif 1227 1228 finishidentcpu(); /* Final stage of CPU initialization */ 1229 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1230 initializecpu(); /* Initialize CPU registers */ 1231 1232 /* Use BIOS values stored in RTC CMOS RAM, since probing 1233 * breaks certain 386 AT relics. 1234 */ 1235 biosbasemem = rtcin(RTC_BASELO)+ (rtcin(RTC_BASEHI)<<8); 1236 biosextmem = rtcin(RTC_EXTLO)+ (rtcin(RTC_EXTHI)<<8); 1237 1238 /* 1239 * If BIOS tells us that it has more than 640k in the basemem, 1240 * don't believe it - set it to 640k. 1241 */ 1242 if (biosbasemem > 640) { 1243 printf("Preposterous RTC basemem of %dK, truncating to 640K\n", 1244 biosbasemem); 1245 biosbasemem = 640; 1246 } 1247 if (bootinfo.bi_memsizes_valid && bootinfo.bi_basemem > 640) { 1248 printf("Preposterous BIOS basemem of %dK, truncating to 640K\n", 1249 bootinfo.bi_basemem); 1250 bootinfo.bi_basemem = 640; 1251 } 1252 1253 /* 1254 * Warn if the official BIOS interface disagrees with the RTC 1255 * interface used above about the amount of base memory or the 1256 * amount of extended memory. Prefer the BIOS value for the base 1257 * memory. This is necessary for machines that `steal' base 1258 * memory for use as BIOS memory, at least if we are going to use 1259 * the BIOS for apm. Prefer the RTC value for extended memory. 1260 * Eventually the hackish interface shouldn't even be looked at. 1261 */ 1262 if (bootinfo.bi_memsizes_valid) { 1263 if (bootinfo.bi_basemem != biosbasemem) { 1264 vm_offset_t pa; 1265 1266 printf( 1267 "BIOS basemem (%ldK) != RTC basemem (%dK), setting to BIOS value\n", 1268 bootinfo.bi_basemem, biosbasemem); 1269 biosbasemem = bootinfo.bi_basemem; 1270 1271 /* 1272 * XXX if biosbasemem is now < 640, there is `hole' 1273 * between the end of base memory and the start of 1274 * ISA memory. The hole may be empty or it may 1275 * contain BIOS code or data. Map it read/write so 1276 * that the BIOS can write to it. (Memory from 0 to 1277 * the physical end of the kernel is mapped read-only 1278 * to begin with and then parts of it are remapped. 1279 * The parts that aren't remapped form holes that 1280 * remain read-only and are unused by the kernel. 1281 * The base memory area is below the physical end of 1282 * the kernel and right now forms a read-only hole. 1283 * The part of it from 0 to 1284 * (trunc_page(biosbasemem * 1024) - 1) will be 1285 * remapped and used by the kernel later.) 1286 * 1287 * This code is similar to the code used in 1288 * pmap_mapdev, but since no memory needs to be 1289 * allocated we simply change the mapping. 1290 */ 1291 for (pa = trunc_page(biosbasemem * 1024); 1292 pa < ISA_HOLE_START; pa += PAGE_SIZE) { 1293 unsigned *pte; 1294 1295 pte = (unsigned *)vtopte(pa + KERNBASE); 1296 *pte = pa | PG_RW | PG_V; 1297 } 1298 } 1299 if (bootinfo.bi_extmem != biosextmem) 1300 printf("BIOS extmem (%ldK) != RTC extmem (%dK)\n", 1301 bootinfo.bi_extmem, biosextmem); 1302 } 1303 1304#ifdef SMP 1305 /* make hole for AP bootstrap code */ 1306 pagesinbase = mp_bootaddress(biosbasemem) / PAGE_SIZE; 1307#else 1308 pagesinbase = biosbasemem * 1024 / PAGE_SIZE; 1309#endif 1310 1311 pagesinext = biosextmem * 1024 / PAGE_SIZE; 1312 1313 /* 1314 * Special hack for chipsets that still remap the 384k hole when 1315 * there's 16MB of memory - this really confuses people that 1316 * are trying to use bus mastering ISA controllers with the 1317 * "16MB limit"; they only have 16MB, but the remapping puts 1318 * them beyond the limit. 1319 */ 1320 /* 1321 * If extended memory is between 15-16MB (16-17MB phys address range), 1322 * chop it to 15MB. 1323 */ 1324 if ((pagesinext > 3840) && (pagesinext < 4096)) 1325 pagesinext = 3840; 1326 1327 /* 1328 * Maxmem isn't the "maximum memory", it's one larger than the 1329 * highest page of the physical address space. It should be 1330 * called something like "Maxphyspage". 1331 */ 1332 Maxmem = pagesinext + 0x100000/PAGE_SIZE; 1333 /* 1334 * Indicate that we wish to do a speculative search for memory beyond 1335 * the end of the reported size if the indicated amount is 64MB (0x4000 1336 * pages) - which is the largest amount that the BIOS/bootblocks can 1337 * currently report. If a specific amount of memory is indicated via 1338 * the MAXMEM option or the npx0 "msize", then don't do the speculative 1339 * memory probe. 1340 */ 1341 if (Maxmem >= 0x4000) 1342 speculative_mprobe = TRUE; 1343 else 1344 speculative_mprobe = FALSE; 1345 1346#ifdef MAXMEM 1347 Maxmem = MAXMEM/4; 1348 speculative_mprobe = FALSE; 1349#endif 1350 1351#if NNPX > 0 1352 idp = find_isadev(isa_devtab_null, &npxdriver, 0); 1353 if (idp != NULL && idp->id_msize != 0) { 1354 Maxmem = idp->id_msize / 4; 1355 speculative_mprobe = FALSE; 1356 } 1357#endif 1358 1359#ifdef SMP 1360 /* look for the MP hardware - needed for apic addresses */ 1361 mp_probe(); 1362#endif 1363 1364 /* call pmap initialization to make new kernel address space */ 1365 pmap_bootstrap (first, 0); 1366 1367 /* 1368 * Size up each available chunk of physical memory. 1369 */ 1370 1371 /* 1372 * We currently don't bother testing base memory. 1373 * XXX ...but we probably should. 1374 */ 1375 pa_indx = 0; 1376 if (pagesinbase > 1) { 1377 phys_avail[pa_indx++] = PAGE_SIZE; /* skip first page of memory */ 1378 phys_avail[pa_indx] = ptoa(pagesinbase);/* memory up to the ISA hole */ 1379 physmem = pagesinbase - 1; 1380 } else { 1381 /* point at first chunk end */ 1382 pa_indx++; 1383 } 1384 1385 for (target_page = avail_start; target_page < ptoa(Maxmem); target_page += PAGE_SIZE) { 1386 int tmp, page_bad; 1387 1388 page_bad = FALSE; 1389 1390 /* 1391 * map page into kernel: valid, read/write, non-cacheable 1392 */ 1393 *(int *)CMAP1 = PG_V | PG_RW | PG_N | target_page; 1394 invltlb(); 1395 1396 tmp = *(int *)CADDR1; 1397 /* 1398 * Test for alternating 1's and 0's 1399 */ 1400 *(volatile int *)CADDR1 = 0xaaaaaaaa; 1401 if (*(volatile int *)CADDR1 != 0xaaaaaaaa) { 1402 page_bad = TRUE; 1403 } 1404 /* 1405 * Test for alternating 0's and 1's 1406 */ 1407 *(volatile int *)CADDR1 = 0x55555555; 1408 if (*(volatile int *)CADDR1 != 0x55555555) { 1409 page_bad = TRUE; 1410 } 1411 /* 1412 * Test for all 1's 1413 */ 1414 *(volatile int *)CADDR1 = 0xffffffff; 1415 if (*(volatile int *)CADDR1 != 0xffffffff) { 1416 page_bad = TRUE; 1417 } 1418 /* 1419 * Test for all 0's 1420 */ 1421 *(volatile int *)CADDR1 = 0x0; 1422 if (*(volatile int *)CADDR1 != 0x0) { 1423 /* 1424 * test of page failed 1425 */ 1426 page_bad = TRUE; 1427 } 1428 /* 1429 * Restore original value. 1430 */ 1431 *(int *)CADDR1 = tmp; 1432 1433 /* 1434 * Adjust array of valid/good pages. 1435 */ 1436 if (page_bad == FALSE) { 1437 /* 1438 * If this good page is a continuation of the 1439 * previous set of good pages, then just increase 1440 * the end pointer. Otherwise start a new chunk. 1441 * Note that "end" points one higher than end, 1442 * making the range >= start and < end. 1443 * If we're also doing a speculative memory 1444 * test and we at or past the end, bump up Maxmem 1445 * so that we keep going. The first bad page 1446 * will terminate the loop. 1447 */ 1448 if (phys_avail[pa_indx] == target_page) { 1449 phys_avail[pa_indx] += PAGE_SIZE; 1450 if (speculative_mprobe == TRUE && 1451 phys_avail[pa_indx] >= (64*1024*1024)) 1452 Maxmem++; 1453 } else { 1454 pa_indx++; 1455 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1456 printf("Too many holes in the physical address space, giving up\n"); 1457 pa_indx--; 1458 break; 1459 } 1460 phys_avail[pa_indx++] = target_page; /* start */ 1461 phys_avail[pa_indx] = target_page + PAGE_SIZE; /* end */ 1462 } 1463 physmem++; 1464 } 1465 } 1466 1467 *(int *)CMAP1 = 0; 1468 invltlb(); 1469 1470 /* 1471 * XXX 1472 * The last chunk must contain at least one page plus the message 1473 * buffer to avoid complicating other code (message buffer address 1474 * calculation, etc.). 1475 */ 1476 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1477 round_page(sizeof(struct msgbuf)) >= phys_avail[pa_indx]) { 1478 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1479 phys_avail[pa_indx--] = 0; 1480 phys_avail[pa_indx--] = 0; 1481 } 1482 1483 Maxmem = atop(phys_avail[pa_indx]); 1484 1485 /* Trim off space for the message buffer. */ 1486 phys_avail[pa_indx] -= round_page(sizeof(struct msgbuf)); 1487 1488 avail_end = phys_avail[pa_indx]; 1489 1490 /* now running on new page tables, configured,and u/iom is accessible */ 1491 1492 /* Map the message buffer. */ 1493 for (off = 0; off < round_page(sizeof(struct msgbuf)); off += PAGE_SIZE) 1494 pmap_enter(kernel_pmap, (vm_offset_t)msgbufp + off, 1495 avail_end + off, VM_PROT_ALL, TRUE); 1496 msgbufmapped = 1; 1497 1498 /* make an initial tss so cpu can get interrupt stack on syscall! */ 1499#ifdef VM86 1500 common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16; 1501#else 1502 common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE; 1503#endif /* VM86 */ 1504 common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ; 1505 common_tss.tss_ioopt = (sizeof common_tss) << 16; 1506 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1507 ltr(gsel_tss); 1508#ifdef VM86 1509 private_tss = 0; 1510 my_tr = GPROC0_SEL; 1511#endif 1512 1513 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = 1514 dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)]; 1515 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = 1516 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); 1517 dblfault_tss.tss_cr3 = (int)IdlePTD; 1518 dblfault_tss.tss_eip = (int) dblfault_handler; 1519 dblfault_tss.tss_eflags = PSL_KERNEL; 1520 dblfault_tss.tss_ds = dblfault_tss.tss_es = dblfault_tss.tss_fs = 1521 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); 1522 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); 1523 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); 1524 1525 /* make a call gate to reenter kernel with */ 1526 gdp = &ldt[LSYS5CALLS_SEL].gd; 1527 1528 x = (int) &IDTVEC(syscall); 1529 gdp->gd_looffset = x++; 1530 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL); 1531 gdp->gd_stkcpy = 1; 1532 gdp->gd_type = SDT_SYS386CGT; 1533 gdp->gd_dpl = SEL_UPL; 1534 gdp->gd_p = 1; 1535 gdp->gd_hioffset = ((int) &IDTVEC(syscall)) >>16; 1536 1537 /* XXX does this work? */ 1538 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL]; 1539 1540 /* transfer to user mode */ 1541 1542 _ucodesel = LSEL(LUCODE_SEL, SEL_UPL); 1543 _udatasel = LSEL(LUDATA_SEL, SEL_UPL); 1544 1545 /* setup proc 0's pcb */ 1546 proc0.p_addr->u_pcb.pcb_flags = 0; 1547 proc0.p_addr->u_pcb.pcb_cr3 = (int)IdlePTD; 1548 proc0.p_addr->u_pcb.pcb_mpnest = 1; 1549 proc0.p_addr->u_pcb.pcb_ext = 0; 1550} 1551 1552#if defined(I586_CPU) && !defined(NO_F00F_HACK) 1553void f00f_hack(void); 1554SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL); 1555 1556void 1557f00f_hack(void) { 1558 struct region_descriptor r_idt; 1559 unsigned char *tmp; 1560 int i; 1561 1562 if (!has_f00f_bug) 1563 return; 1564 1565 printf("Intel Pentium F00F detected, installing workaround\n"); 1566 1567 r_idt.rd_limit = sizeof(idt) - 1; 1568 1569 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2); 1570 if (tmp == 0) 1571 panic("kmem_alloc returned 0"); 1572 if (((unsigned int)tmp & (PAGE_SIZE-1)) != 0) 1573 panic("kmem_alloc returned non-page-aligned memory"); 1574 /* Put the first seven entries in the lower page */ 1575 t_idt = (struct gate_descriptor*)(tmp + PAGE_SIZE - (7*8)); 1576 bcopy(idt, t_idt, sizeof(idt)); 1577 r_idt.rd_base = (int)t_idt; 1578 lidt(&r_idt); 1579 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE, 1580 VM_PROT_READ, FALSE) != KERN_SUCCESS) 1581 panic("vm_map_protect failed"); 1582 return; 1583} 1584#endif /* defined(I586_CPU) && !NO_F00F_HACK */ 1585 1586int 1587ptrace_set_pc(p, addr) 1588 struct proc *p; 1589 unsigned int addr; 1590{ 1591 p->p_md.md_regs->tf_eip = addr; 1592 return (0); 1593} 1594 1595int 1596ptrace_single_step(p) 1597 struct proc *p; 1598{ 1599 p->p_md.md_regs->tf_eflags |= PSL_T; 1600 return (0); 1601} 1602 1603int ptrace_write_u(p, off, data) 1604 struct proc *p; 1605 vm_offset_t off; 1606 int data; 1607{ 1608 struct trapframe frame_copy; 1609 vm_offset_t min; 1610 struct trapframe *tp; 1611 1612 /* 1613 * Privileged kernel state is scattered all over the user area. 1614 * Only allow write access to parts of regs and to fpregs. 1615 */ 1616 min = (char *)p->p_md.md_regs - (char *)p->p_addr; 1617 if (off >= min && off <= min + sizeof(struct trapframe) - sizeof(int)) { 1618 tp = p->p_md.md_regs; 1619 frame_copy = *tp; 1620 *(int *)((char *)&frame_copy + (off - min)) = data; 1621 if (!EFLAGS_SECURE(frame_copy.tf_eflags, tp->tf_eflags) || 1622 !CS_SECURE(frame_copy.tf_cs)) 1623 return (EINVAL); 1624 *(int*)((char *)p->p_addr + off) = data; 1625 return (0); 1626 } 1627 min = offsetof(struct user, u_pcb) + offsetof(struct pcb, pcb_savefpu); 1628 if (off >= min && off <= min + sizeof(struct save87) - sizeof(int)) { 1629 *(int*)((char *)p->p_addr + off) = data; 1630 return (0); 1631 } 1632 return (EFAULT); 1633} 1634 1635int 1636fill_regs(p, regs) 1637 struct proc *p; 1638 struct reg *regs; 1639{ 1640 struct pcb *pcb; 1641 struct trapframe *tp; 1642 1643 tp = p->p_md.md_regs; 1644 regs->r_es = tp->tf_es; 1645 regs->r_ds = tp->tf_ds; 1646 regs->r_edi = tp->tf_edi; 1647 regs->r_esi = tp->tf_esi; 1648 regs->r_ebp = tp->tf_ebp; 1649 regs->r_ebx = tp->tf_ebx; 1650 regs->r_edx = tp->tf_edx; 1651 regs->r_ecx = tp->tf_ecx; 1652 regs->r_eax = tp->tf_eax; 1653 regs->r_eip = tp->tf_eip; 1654 regs->r_cs = tp->tf_cs; 1655 regs->r_eflags = tp->tf_eflags; 1656 regs->r_esp = tp->tf_esp; 1657 regs->r_ss = tp->tf_ss; 1658 pcb = &p->p_addr->u_pcb; 1659 regs->r_fs = pcb->pcb_fs; 1660 regs->r_gs = pcb->pcb_gs; 1661 return (0); 1662} 1663 1664int 1665set_regs(p, regs) 1666 struct proc *p; 1667 struct reg *regs; 1668{ 1669 struct pcb *pcb; 1670 struct trapframe *tp; 1671 1672 tp = p->p_md.md_regs; 1673 if (!EFLAGS_SECURE(regs->r_eflags, tp->tf_eflags) || 1674 !CS_SECURE(regs->r_cs)) 1675 return (EINVAL); 1676 tp->tf_es = regs->r_es; 1677 tp->tf_ds = regs->r_ds; 1678 tp->tf_edi = regs->r_edi; 1679 tp->tf_esi = regs->r_esi; 1680 tp->tf_ebp = regs->r_ebp; 1681 tp->tf_ebx = regs->r_ebx; 1682 tp->tf_edx = regs->r_edx; 1683 tp->tf_ecx = regs->r_ecx; 1684 tp->tf_eax = regs->r_eax; 1685 tp->tf_eip = regs->r_eip; 1686 tp->tf_cs = regs->r_cs; 1687 tp->tf_eflags = regs->r_eflags; 1688 tp->tf_esp = regs->r_esp; 1689 tp->tf_ss = regs->r_ss; 1690 pcb = &p->p_addr->u_pcb; 1691 pcb->pcb_fs = regs->r_fs; 1692 pcb->pcb_gs = regs->r_gs; 1693 return (0); 1694} 1695 1696#ifndef DDB 1697void 1698Debugger(const char *msg) 1699{ 1700 printf("Debugger(\"%s\") called.\n", msg); 1701} 1702#endif /* no DDB */ 1703 1704#include <sys/disklabel.h> 1705 1706/* 1707 * Determine the size of the transfer, and make sure it is 1708 * within the boundaries of the partition. Adjust transfer 1709 * if needed, and signal errors or early completion. 1710 */ 1711int 1712bounds_check_with_label(struct buf *bp, struct disklabel *lp, int wlabel) 1713{ 1714 struct partition *p = lp->d_partitions + dkpart(bp->b_dev); 1715 int labelsect = lp->d_partitions[0].p_offset; 1716 int maxsz = p->p_size, 1717 sz = (bp->b_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT; 1718 1719 /* overwriting disk label ? */ 1720 /* XXX should also protect bootstrap in first 8K */ 1721 if (bp->b_blkno + p->p_offset <= LABELSECTOR + labelsect && 1722#if LABELSECTOR != 0 1723 bp->b_blkno + p->p_offset + sz > LABELSECTOR + labelsect && 1724#endif 1725 (bp->b_flags & B_READ) == 0 && wlabel == 0) { 1726 bp->b_error = EROFS; 1727 goto bad; 1728 } 1729 1730#if defined(DOSBBSECTOR) && defined(notyet) 1731 /* overwriting master boot record? */ 1732 if (bp->b_blkno + p->p_offset <= DOSBBSECTOR && 1733 (bp->b_flags & B_READ) == 0 && wlabel == 0) { 1734 bp->b_error = EROFS; 1735 goto bad; 1736 } 1737#endif 1738 1739 /* beyond partition? */ 1740 if (bp->b_blkno < 0 || bp->b_blkno + sz > maxsz) { 1741 /* if exactly at end of disk, return an EOF */ 1742 if (bp->b_blkno == maxsz) { 1743 bp->b_resid = bp->b_bcount; 1744 return(0); 1745 } 1746 /* or truncate if part of it fits */ 1747 sz = maxsz - bp->b_blkno; 1748 if (sz <= 0) { 1749 bp->b_error = EINVAL; 1750 goto bad; 1751 } 1752 bp->b_bcount = sz << DEV_BSHIFT; 1753 } 1754 1755 bp->b_pblkno = bp->b_blkno + p->p_offset; 1756 return(1); 1757 1758bad: 1759 bp->b_flags |= B_ERROR; 1760 return(-1); 1761} 1762 1763#ifdef DDB 1764 1765/* 1766 * Provide inb() and outb() as functions. They are normally only 1767 * available as macros calling inlined functions, thus cannot be 1768 * called inside DDB. 1769 * 1770 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 1771 */ 1772 1773#undef inb 1774#undef outb 1775 1776/* silence compiler warnings */ 1777u_char inb(u_int); 1778void outb(u_int, u_char); 1779 1780u_char 1781inb(u_int port) 1782{ 1783 u_char data; 1784 /* 1785 * We use %%dx and not %1 here because i/o is done at %dx and not at 1786 * %edx, while gcc generates inferior code (movw instead of movl) 1787 * if we tell it to load (u_short) port. 1788 */ 1789 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 1790 return (data); 1791} 1792 1793void 1794outb(u_int port, u_char data) 1795{ 1796 u_char al; 1797 /* 1798 * Use an unnecessary assignment to help gcc's register allocator. 1799 * This make a large difference for gcc-1.40 and a tiny difference 1800 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 1801 * best results. gcc-2.6.0 can't handle this. 1802 */ 1803 al = data; 1804 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 1805} 1806 1807#endif /* DDB */ 1808