machdep.c revision 30994
1/*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 38 * $Id: machdep.c,v 1.269 1997/10/12 20:23:17 phk Exp $ 39 */ 40 41#include "apm.h" 42#include "npx.h" 43#include "opt_sysvipc.h" 44#include "opt_ddb.h" 45#include "opt_bounce.h" 46#include "opt_maxmem.h" 47#include "opt_perfmon.h" 48#include "opt_smp.h" 49#include "opt_userconfig.h" 50#include "opt_vm86.h" 51 52#include <sys/param.h> 53#include <sys/systm.h> 54#include <sys/sysproto.h> 55#include <sys/signalvar.h> 56#include <sys/kernel.h> 57#include <sys/proc.h> 58#include <sys/buf.h> 59#include <sys/reboot.h> 60#include <sys/conf.h> 61#include <sys/callout.h> 62#include <sys/malloc.h> 63#include <sys/mbuf.h> 64#include <sys/msgbuf.h> 65#include <sys/sysent.h> 66#include <sys/sysctl.h> 67#include <sys/vmmeter.h> 68 69#ifdef SYSVSHM 70#include <sys/shm.h> 71#endif 72 73#ifdef SYSVMSG 74#include <sys/msg.h> 75#endif 76 77#ifdef SYSVSEM 78#include <sys/sem.h> 79#endif 80 81#include <vm/vm.h> 82#include <vm/vm_param.h> 83#include <vm/vm_prot.h> 84#include <sys/lock.h> 85#include <vm/vm_kern.h> 86#include <vm/vm_object.h> 87#include <vm/vm_page.h> 88#include <vm/vm_map.h> 89#include <vm/vm_pager.h> 90#include <vm/vm_extern.h> 91 92#include <sys/user.h> 93#include <sys/exec.h> 94 95#include <ddb/ddb.h> 96 97#include <net/netisr.h> 98 99#if NAPM > 0 100#include <machine/apm_bios.h> 101#endif 102#include <machine/cpu.h> 103#include <machine/reg.h> 104#include <machine/clock.h> 105#include <machine/specialreg.h> 106#include <machine/cons.h> 107#include <machine/bootinfo.h> 108#include <machine/md_var.h> 109#include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */ 110#ifdef SMP 111#include <machine/smp.h> 112#endif 113#ifdef PERFMON 114#include <machine/perfmon.h> 115#endif 116 117#include <i386/isa/isa_device.h> 118#include <i386/isa/intr_machdep.h> 119#include <i386/isa/rtc.h> 120#include <machine/random.h> 121 122extern void init386 __P((int first)); 123extern int ptrace_set_pc __P((struct proc *p, unsigned int addr)); 124extern int ptrace_single_step __P((struct proc *p)); 125extern int ptrace_write_u __P((struct proc *p, vm_offset_t off, int data)); 126extern void dblfault_handler __P((void)); 127 128extern void printcpuinfo(void); /* XXX header file */ 129extern void earlysetcpuclass(void); /* same header file */ 130extern void finishidentcpu(void); 131extern void panicifcpuunsupported(void); 132extern void initializecpu(void); 133 134static void cpu_startup __P((void *)); 135SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) 136 137static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf"); 138 139#ifdef BOUNCE_BUFFERS 140extern char *bouncememory; 141extern int maxbkva; 142#ifdef BOUNCEPAGES 143int bouncepages = BOUNCEPAGES; 144#else 145int bouncepages = 0; 146#endif 147#endif /* BOUNCE_BUFFERS */ 148 149extern int freebufspace; 150int msgbufmapped = 0; /* set when safe to use msgbuf */ 151int _udatasel, _ucodesel; 152u_int atdevbase; 153 154 155int physmem = 0; 156int cold = 1; 157 158static int 159sysctl_hw_physmem SYSCTL_HANDLER_ARGS 160{ 161 int error = sysctl_handle_int(oidp, 0, ctob(physmem), req); 162 return (error); 163} 164 165SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD, 166 0, 0, sysctl_hw_physmem, "I", ""); 167 168static int 169sysctl_hw_usermem SYSCTL_HANDLER_ARGS 170{ 171 int error = sysctl_handle_int(oidp, 0, 172 ctob(physmem - cnt.v_wire_count), req); 173 return (error); 174} 175 176SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD, 177 0, 0, sysctl_hw_usermem, "I", ""); 178 179int boothowto = 0, bootverbose = 0, Maxmem = 0; 180long dumplo; 181extern int bootdev; 182 183vm_offset_t phys_avail[10]; 184 185/* must be 2 less so 0 0 can signal end of chunks */ 186#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2) 187 188static void setup_netisrs __P((struct linker_set *)); /* XXX declare elsewhere */ 189 190static vm_offset_t buffer_sva, buffer_eva; 191vm_offset_t clean_sva, clean_eva; 192static vm_offset_t pager_sva, pager_eva; 193extern struct linker_set netisr_set; 194 195#define offsetof(type, member) ((size_t)(&((type *)0)->member)) 196 197static void 198cpu_startup(dummy) 199 void *dummy; 200{ 201 register unsigned i; 202 register caddr_t v; 203 vm_offset_t maxaddr; 204 vm_size_t size = 0; 205 int firstaddr; 206 vm_offset_t minaddr; 207 208 if (boothowto & RB_VERBOSE) 209 bootverbose++; 210 211 /* 212 * Good {morning,afternoon,evening,night}. 213 */ 214 printf(version); 215 earlysetcpuclass(); 216 startrtclock(); 217 printcpuinfo(); 218 panicifcpuunsupported(); 219#ifdef PERFMON 220 perfmon_init(); 221#endif 222 printf("real memory = %d (%dK bytes)\n", ptoa(Maxmem), ptoa(Maxmem) / 1024); 223 /* 224 * Display any holes after the first chunk of extended memory. 225 */ 226 if (bootverbose) { 227 int indx; 228 229 printf("Physical memory chunk(s):\n"); 230 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 231 int size = phys_avail[indx + 1] - phys_avail[indx]; 232 233 printf("0x%08lx - 0x%08lx, %d bytes (%d pages)\n", phys_avail[indx], 234 phys_avail[indx + 1] - 1, size, size / PAGE_SIZE); 235 } 236 } 237 238 /* 239 * Quickly wire in netisrs. 240 */ 241 setup_netisrs(&netisr_set); 242 243 /* 244 * Calculate callout wheel size 245 */ 246 for (callwheelsize = 1, callwheelbits = 0; 247 callwheelsize < ncallout; 248 callwheelsize <<= 1, ++callwheelbits) 249 ; 250 callwheelmask = callwheelsize - 1; 251 252 /* 253 * Allocate space for system data structures. 254 * The first available kernel virtual address is in "v". 255 * As pages of kernel virtual memory are allocated, "v" is incremented. 256 * As pages of memory are allocated and cleared, 257 * "firstaddr" is incremented. 258 * An index into the kernel page table corresponding to the 259 * virtual memory address maintained in "v" is kept in "mapaddr". 260 */ 261 262 /* 263 * Make two passes. The first pass calculates how much memory is 264 * needed and allocates it. The second pass assigns virtual 265 * addresses to the various data structures. 266 */ 267 firstaddr = 0; 268again: 269 v = (caddr_t)firstaddr; 270 271#define valloc(name, type, num) \ 272 (name) = (type *)v; v = (caddr_t)((name)+(num)) 273#define valloclim(name, type, num, lim) \ 274 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num))) 275 valloc(callout, struct callout, ncallout); 276 valloc(callwheel, struct callout_tailq, callwheelsize); 277#ifdef SYSVSHM 278 valloc(shmsegs, struct shmid_ds, shminfo.shmmni); 279#endif 280#ifdef SYSVSEM 281 valloc(sema, struct semid_ds, seminfo.semmni); 282 valloc(sem, struct sem, seminfo.semmns); 283 /* This is pretty disgusting! */ 284 valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int)); 285#endif 286#ifdef SYSVMSG 287 valloc(msgpool, char, msginfo.msgmax); 288 valloc(msgmaps, struct msgmap, msginfo.msgseg); 289 valloc(msghdrs, struct msg, msginfo.msgtql); 290 valloc(msqids, struct msqid_ds, msginfo.msgmni); 291#endif 292 293 if (nbuf == 0) { 294 nbuf = 30; 295 if( physmem > 1024) 296 nbuf += min((physmem - 1024) / 8, 2048); 297 } 298 nswbuf = max(min(nbuf/4, 128), 16); 299 300 valloc(swbuf, struct buf, nswbuf); 301 valloc(buf, struct buf, nbuf); 302 303#ifdef BOUNCE_BUFFERS 304 /* 305 * If there is more than 16MB of memory, allocate some bounce buffers 306 */ 307 if (Maxmem > 4096) { 308 if (bouncepages == 0) { 309 bouncepages = 64; 310 bouncepages += ((Maxmem - 4096) / 2048) * 32; 311 if (bouncepages > 128) 312 bouncepages = 128; 313 } 314 v = (caddr_t)((vm_offset_t)round_page(v)); 315 valloc(bouncememory, char, bouncepages * PAGE_SIZE); 316 } 317#endif 318 319 /* 320 * End of first pass, size has been calculated so allocate memory 321 */ 322 if (firstaddr == 0) { 323 size = (vm_size_t)(v - firstaddr); 324 firstaddr = (int)kmem_alloc(kernel_map, round_page(size)); 325 if (firstaddr == 0) 326 panic("startup: no room for tables"); 327 goto again; 328 } 329 330 /* 331 * End of second pass, addresses have been assigned 332 */ 333 if ((vm_size_t)(v - firstaddr) != size) 334 panic("startup: table size inconsistency"); 335 336#ifdef BOUNCE_BUFFERS 337 clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva, 338 (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + 339 maxbkva + pager_map_size, TRUE); 340 io_map = kmem_suballoc(clean_map, &minaddr, &maxaddr, maxbkva, FALSE); 341#else 342 clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva, 343 (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size, TRUE); 344#endif 345 buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva, 346 (nbuf*BKVASIZE), TRUE); 347 pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva, 348 (nswbuf*MAXPHYS) + pager_map_size, TRUE); 349 pager_map->system_map = 1; 350 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 351 (16*ARG_MAX), TRUE); 352 u_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 353 (maxproc*UPAGES*PAGE_SIZE), FALSE); 354 355 /* 356 * Finally, allocate mbuf pool. Since mclrefcnt is an off-size 357 * we use the more space efficient malloc in place of kmem_alloc. 358 */ 359 { 360 vm_offset_t mb_map_size; 361 362 mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES; 363 mb_map_size = roundup2(mb_map_size, max(MCLBYTES, PAGE_SIZE)); 364 mclrefcnt = malloc(mb_map_size / MCLBYTES, M_MBUF, M_NOWAIT); 365 bzero(mclrefcnt, mb_map_size / MCLBYTES); 366 mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr, 367 mb_map_size, FALSE); 368 mb_map->system_map = 1; 369 } 370 371 /* 372 * Initialize callouts 373 */ 374 SLIST_INIT(&callfree); 375 for (i = 0; i < ncallout; i++) { 376 SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); 377 } 378 379 for (i = 0; i < callwheelsize; i++) { 380 TAILQ_INIT(&callwheel[i]); 381 } 382 383#if defined(USERCONFIG) 384#if defined(USERCONFIG_BOOT) 385 if (1) { 386#else 387 if (boothowto & RB_CONFIG) { 388#endif 389 userconfig(); 390 cninit(); /* the preferred console may have changed */ 391 } 392#endif 393 394#ifdef BOUNCE_BUFFERS 395 /* 396 * init bounce buffers 397 */ 398 vm_bounce_init(); 399#endif 400 401 printf("avail memory = %d (%dK bytes)\n", ptoa(cnt.v_free_count), 402 ptoa(cnt.v_free_count) / 1024); 403 404 /* 405 * Set up buffers, so they can be used to read disk labels. 406 */ 407 bufinit(); 408 vm_pager_bufferinit(); 409 410#ifdef SMP 411 /* 412 * OK, enough kmem_alloc/malloc state should be up, lets get on with it! 413 */ 414 mp_start(); /* fire up the APs and APICs */ 415 mp_announce(); 416#endif /* SMP */ 417} 418 419int 420register_netisr(num, handler) 421 int num; 422 netisr_t *handler; 423{ 424 425 if (num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs)) ) { 426 printf("register_netisr: bad isr number: %d\n", num); 427 return (EINVAL); 428 } 429 netisrs[num] = handler; 430 return (0); 431} 432 433static void 434setup_netisrs(ls) 435 struct linker_set *ls; 436{ 437 int i; 438 const struct netisrtab *nit; 439 440 for(i = 0; ls->ls_items[i]; i++) { 441 nit = (const struct netisrtab *)ls->ls_items[i]; 442 register_netisr(nit->nit_num, nit->nit_isr); 443 } 444} 445 446/* 447 * Send an interrupt to process. 448 * 449 * Stack is set up to allow sigcode stored 450 * at top to call routine, followed by kcall 451 * to sigreturn routine below. After sigreturn 452 * resets the signal mask, the stack, and the 453 * frame pointer, it returns to the user 454 * specified pc, psl. 455 */ 456void 457sendsig(catcher, sig, mask, code) 458 sig_t catcher; 459 int sig, mask; 460 u_long code; 461{ 462 register struct proc *p = curproc; 463 register struct trapframe *regs; 464 register struct sigframe *fp; 465 struct sigframe sf; 466 struct sigacts *psp = p->p_sigacts; 467 int oonstack; 468 469 regs = p->p_md.md_regs; 470 oonstack = psp->ps_sigstk.ss_flags & SS_ONSTACK; 471 /* 472 * Allocate and validate space for the signal handler context. 473 */ 474 if ((psp->ps_flags & SAS_ALTSTACK) && !oonstack && 475 (psp->ps_sigonstack & sigmask(sig))) { 476 fp = (struct sigframe *)(psp->ps_sigstk.ss_sp + 477 psp->ps_sigstk.ss_size - sizeof(struct sigframe)); 478 psp->ps_sigstk.ss_flags |= SS_ONSTACK; 479 } else { 480 fp = (struct sigframe *)regs->tf_esp - 1; 481 } 482 483 /* 484 * grow() will return FALSE if the fp will not fit inside the stack 485 * and the stack can not be grown. useracc will return FALSE 486 * if access is denied. 487 */ 488 if ((grow(p, (int)fp) == FALSE) || 489 (useracc((caddr_t)fp, sizeof(struct sigframe), B_WRITE) == FALSE)) { 490 /* 491 * Process has trashed its stack; give it an illegal 492 * instruction to halt it in its tracks. 493 */ 494 SIGACTION(p, SIGILL) = SIG_DFL; 495 sig = sigmask(SIGILL); 496 p->p_sigignore &= ~sig; 497 p->p_sigcatch &= ~sig; 498 p->p_sigmask &= ~sig; 499 psignal(p, SIGILL); 500 return; 501 } 502 503 /* 504 * Build the argument list for the signal handler. 505 */ 506 if (p->p_sysent->sv_sigtbl) { 507 if (sig < p->p_sysent->sv_sigsize) 508 sig = p->p_sysent->sv_sigtbl[sig]; 509 else 510 sig = p->p_sysent->sv_sigsize + 1; 511 } 512 sf.sf_signum = sig; 513 sf.sf_code = code; 514 sf.sf_scp = &fp->sf_sc; 515 sf.sf_addr = (char *) regs->tf_err; 516 sf.sf_handler = catcher; 517 518 /* save scratch registers */ 519 sf.sf_sc.sc_eax = regs->tf_eax; 520 sf.sf_sc.sc_ebx = regs->tf_ebx; 521 sf.sf_sc.sc_ecx = regs->tf_ecx; 522 sf.sf_sc.sc_edx = regs->tf_edx; 523 sf.sf_sc.sc_esi = regs->tf_esi; 524 sf.sf_sc.sc_edi = regs->tf_edi; 525 sf.sf_sc.sc_cs = regs->tf_cs; 526 sf.sf_sc.sc_ds = regs->tf_ds; 527 sf.sf_sc.sc_ss = regs->tf_ss; 528 sf.sf_sc.sc_es = regs->tf_es; 529 sf.sf_sc.sc_isp = regs->tf_isp; 530 531 /* 532 * Build the signal context to be used by sigreturn. 533 */ 534 sf.sf_sc.sc_onstack = oonstack; 535 sf.sf_sc.sc_mask = mask; 536 sf.sf_sc.sc_sp = regs->tf_esp; 537 sf.sf_sc.sc_fp = regs->tf_ebp; 538 sf.sf_sc.sc_pc = regs->tf_eip; 539 sf.sf_sc.sc_ps = regs->tf_eflags; 540 sf.sf_sc.sc_trapno = regs->tf_trapno; 541 sf.sf_sc.sc_err = regs->tf_err; 542 543 /* 544 * If we're a vm86 process, we want to save the segment registers. 545 * We also change eflags to be our emulated eflags, not the actual 546 * eflags. 547 */ 548 if (regs->tf_eflags & PSL_VM) { 549 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 550 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 551 552 sf.sf_sc.sc_gs = tf->tf_vm86_gs; 553 sf.sf_sc.sc_fs = tf->tf_vm86_fs; 554 sf.sf_sc.sc_es = tf->tf_vm86_es; 555 sf.sf_sc.sc_ds = tf->tf_vm86_ds; 556 557 if (vm86->vm86_has_vme == 0) 558 sf.sf_sc.sc_ps = (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) 559 | (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 560 561 /* 562 * We should never have PSL_T set when returning from vm86 563 * mode. It may be set here if we deliver a signal before 564 * getting to vm86 mode, so turn it off. 565 */ 566 tf->tf_eflags &= ~(PSL_VM | PSL_T | PSL_VIF | PSL_VIP); 567 } 568 569 /* 570 * Copy the sigframe out to the user's stack. 571 */ 572 if (copyout(&sf, fp, sizeof(struct sigframe)) != 0) { 573 /* 574 * Something is wrong with the stack pointer. 575 * ...Kill the process. 576 */ 577 sigexit(p, SIGILL); 578 } 579 580 regs->tf_esp = (int)fp; 581 regs->tf_eip = (int)(((char *)PS_STRINGS) - *(p->p_sysent->sv_szsigcode)); 582 regs->tf_cs = _ucodesel; 583 regs->tf_ds = _udatasel; 584 regs->tf_es = _udatasel; 585 regs->tf_ss = _udatasel; 586} 587 588/* 589 * System call to cleanup state after a signal 590 * has been taken. Reset signal mask and 591 * stack state from context left by sendsig (above). 592 * Return to previous pc and psl as specified by 593 * context left by sendsig. Check carefully to 594 * make sure that the user has not modified the 595 * state to gain improper privileges. 596 */ 597int 598sigreturn(p, uap) 599 struct proc *p; 600 struct sigreturn_args /* { 601 struct sigcontext *sigcntxp; 602 } */ *uap; 603{ 604 register struct sigcontext *scp; 605 register struct sigframe *fp; 606 register struct trapframe *regs = p->p_md.md_regs; 607 int eflags; 608 609 /* 610 * (XXX old comment) regs->tf_esp points to the return address. 611 * The user scp pointer is above that. 612 * The return address is faked in the signal trampoline code 613 * for consistency. 614 */ 615 scp = uap->sigcntxp; 616 fp = (struct sigframe *) 617 ((caddr_t)scp - offsetof(struct sigframe, sf_sc)); 618 619 if (useracc((caddr_t)fp, sizeof (*fp), B_WRITE) == 0) 620 return(EFAULT); 621 622 eflags = scp->sc_ps; 623 if (eflags & PSL_VM) { 624 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 625 struct vm86_kernel *vm86; 626 627 /* 628 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 629 * set up the vm86 area, and we can't enter vm86 mode. 630 */ 631 if (p->p_addr->u_pcb.pcb_ext == 0) 632 return (EINVAL); 633 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 634 if (vm86->vm86_inited == 0) 635 return (EINVAL); 636 637 /* go back to user mode if both flags are set */ 638 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 639 trapsignal(p, SIGBUS, 0); 640 641#define VM_USERCHANGE (PSL_USERCHANGE | PSL_RF) 642#define VME_USERCHANGE (VM_USERCHANGE | PSL_VIP | PSL_VIF) 643 if (vm86->vm86_has_vme) { 644 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 645 (eflags & VME_USERCHANGE) | PSL_VM; 646 } else { 647 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 648 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 649 } 650 tf->tf_vm86_ds = scp->sc_ds; 651 tf->tf_vm86_es = scp->sc_es; 652 tf->tf_vm86_fs = scp->sc_fs; 653 tf->tf_vm86_gs = scp->sc_gs; 654 tf->tf_ds = _udatasel; 655 tf->tf_es = _udatasel; 656 } else { 657 /* 658 * Don't allow users to change privileged or reserved flags. 659 */ 660#define EFLAGS_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 661 /* 662 * XXX do allow users to change the privileged flag PSL_RF. 663 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 664 * should sometimes set it there too. tf_eflags is kept in 665 * the signal context during signal handling and there is no 666 * other place to remember it, so the PSL_RF bit may be 667 * corrupted by the signal handler without us knowing. 668 * Corruption of the PSL_RF bit at worst causes one more or 669 * one less debugger trap, so allowing it is fairly harmless. 670 */ 671 if (!EFLAGS_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 672#ifdef DEBUG 673 printf("sigreturn: eflags = 0x%x\n", eflags); 674#endif 675 return(EINVAL); 676 } 677 678 /* 679 * Don't allow users to load a valid privileged %cs. Let the 680 * hardware check for invalid selectors, excess privilege in 681 * other selectors, invalid %eip's and invalid %esp's. 682 */ 683#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 684 if (!CS_SECURE(scp->sc_cs)) { 685#ifdef DEBUG 686 printf("sigreturn: cs = 0x%x\n", scp->sc_cs); 687#endif 688 trapsignal(p, SIGBUS, T_PROTFLT); 689 return(EINVAL); 690 } 691 regs->tf_ds = scp->sc_ds; 692 regs->tf_es = scp->sc_es; 693 } 694 /* restore scratch registers */ 695 regs->tf_eax = scp->sc_eax; 696 regs->tf_ebx = scp->sc_ebx; 697 regs->tf_ecx = scp->sc_ecx; 698 regs->tf_edx = scp->sc_edx; 699 regs->tf_esi = scp->sc_esi; 700 regs->tf_edi = scp->sc_edi; 701 regs->tf_cs = scp->sc_cs; 702 regs->tf_ss = scp->sc_ss; 703 regs->tf_isp = scp->sc_isp; 704 705 if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0) 706 return(EINVAL); 707 708 if (scp->sc_onstack & 01) 709 p->p_sigacts->ps_sigstk.ss_flags |= SS_ONSTACK; 710 else 711 p->p_sigacts->ps_sigstk.ss_flags &= ~SS_ONSTACK; 712 p->p_sigmask = scp->sc_mask & ~sigcantmask; 713 regs->tf_ebp = scp->sc_fp; 714 regs->tf_esp = scp->sc_sp; 715 regs->tf_eip = scp->sc_pc; 716 regs->tf_eflags = eflags; 717 return(EJUSTRETURN); 718} 719 720/* 721 * Machine dependent boot() routine 722 * 723 * I haven't seen anything to put here yet 724 * Possibly some stuff might be grafted back here from boot() 725 */ 726void 727cpu_boot(int howto) 728{ 729} 730 731/* 732 * Shutdown the CPU as much as possible 733 */ 734void 735cpu_halt(void) 736{ 737 for (;;) 738 __asm__ ("hlt"); 739} 740 741/* 742 * Turn the power off. 743 */ 744void 745cpu_power_down(void) 746{ 747#if NAPM > 0 748 apm_power_off(); 749#endif 750} 751 752/* 753 * Clear registers on exec 754 */ 755void 756setregs(p, entry, stack) 757 struct proc *p; 758 u_long entry; 759 u_long stack; 760{ 761 struct trapframe *regs = p->p_md.md_regs; 762 763#ifdef USER_LDT 764 struct pcb *pcb = &p->p_addr->u_pcb; 765 766 /* was i386_user_cleanup() in NetBSD */ 767 if (pcb->pcb_ldt) { 768 if (pcb == curpcb) 769 lldt(GSEL(GUSERLDT_SEL, SEL_KPL)); 770 kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ldt, 771 pcb->pcb_ldt_len * sizeof(union descriptor)); 772 pcb->pcb_ldt_len = (int)pcb->pcb_ldt = 0; 773 } 774#endif 775 776 bzero((char *)regs, sizeof(struct trapframe)); 777 regs->tf_eip = entry; 778 regs->tf_esp = stack; 779 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T); 780 regs->tf_ss = _udatasel; 781 regs->tf_ds = _udatasel; 782 regs->tf_es = _udatasel; 783 regs->tf_cs = _ucodesel; 784 785 /* 786 * Initialize the math emulator (if any) for the current process. 787 * Actually, just clear the bit that says that the emulator has 788 * been initialized. Initialization is delayed until the process 789 * traps to the emulator (if it is done at all) mainly because 790 * emulators don't provide an entry point for initialization. 791 */ 792 p->p_addr->u_pcb.pcb_flags &= ~FP_SOFTFP; 793 794 /* 795 * Arrange to trap the next npx or `fwait' instruction (see npx.c 796 * for why fwait must be trapped at least if there is an npx or an 797 * emulator). This is mainly to handle the case where npx0 is not 798 * configured, since the npx routines normally set up the trap 799 * otherwise. It should be done only at boot time, but doing it 800 * here allows modifying `npx_exists' for testing the emulator on 801 * systems with an npx. 802 */ 803 load_cr0(rcr0() | CR0_MP | CR0_TS); 804 805#if NNPX > 0 806 /* Initialize the npx (if any) for the current process. */ 807 npxinit(__INITIAL_NPXCW__); 808#endif 809} 810 811static int 812sysctl_machdep_adjkerntz SYSCTL_HANDLER_ARGS 813{ 814 int error; 815 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 816 req); 817 if (!error && req->newptr) 818 resettodr(); 819 return (error); 820} 821 822SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 823 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 824 825SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, 826 CTLFLAG_RW, &disable_rtc_set, 0, ""); 827 828SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo, 829 CTLFLAG_RD, &bootinfo, bootinfo, ""); 830 831SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, 832 CTLFLAG_RW, &wall_cmos_clock, 0, ""); 833 834/* 835 * Initialize 386 and configure to run kernel 836 */ 837 838/* 839 * Initialize segments & interrupt table 840 */ 841 842int currentldt; 843int _default_ldt; 844#ifdef SMP 845union descriptor gdt[NGDT + NCPU]; /* global descriptor table */ 846#else 847union descriptor gdt[NGDT]; /* global descriptor table */ 848#endif 849struct gate_descriptor idt[NIDT]; /* interrupt descriptor table */ 850union descriptor ldt[NLDT]; /* local descriptor table */ 851#ifdef SMP 852/* table descriptors - used to load tables by microp */ 853struct region_descriptor r_gdt, r_idt; 854#endif 855 856#ifdef SMP 857extern struct i386tss common_tss; /* One tss per cpu */ 858#ifdef VM86 859extern struct segment_descriptor common_tssd; 860extern int private_tss; 861extern u_int my_tr; 862#endif /* VM86 */ 863#else 864struct i386tss common_tss; 865#ifdef VM86 866struct segment_descriptor common_tssd; 867u_int private_tss; /* flag indicating private tss */ 868u_int my_tr; /* which task register setting */ 869#endif /* VM86 */ 870#endif 871 872static struct i386tss dblfault_tss; 873static char dblfault_stack[PAGE_SIZE]; 874 875extern struct user *proc0paddr; 876 877 878/* software prototypes -- in more palatable form */ 879struct soft_segment_descriptor gdt_segs[ 880#ifdef SMP 881 NGDT + NCPU 882#endif 883 ] = { 884/* GNULL_SEL 0 Null Descriptor */ 885{ 0x0, /* segment base address */ 886 0x0, /* length */ 887 0, /* segment type */ 888 0, /* segment descriptor priority level */ 889 0, /* segment descriptor present */ 890 0, 0, 891 0, /* default 32 vs 16 bit size */ 892 0 /* limit granularity (byte/page units)*/ }, 893/* GCODE_SEL 1 Code Descriptor for kernel */ 894{ 0x0, /* segment base address */ 895 0xfffff, /* length - all address space */ 896 SDT_MEMERA, /* segment type */ 897 0, /* segment descriptor priority level */ 898 1, /* segment descriptor present */ 899 0, 0, 900 1, /* default 32 vs 16 bit size */ 901 1 /* limit granularity (byte/page units)*/ }, 902/* GDATA_SEL 2 Data Descriptor for kernel */ 903{ 0x0, /* segment base address */ 904 0xfffff, /* length - all address space */ 905 SDT_MEMRWA, /* segment type */ 906 0, /* segment descriptor priority level */ 907 1, /* segment descriptor present */ 908 0, 0, 909 1, /* default 32 vs 16 bit size */ 910 1 /* limit granularity (byte/page units)*/ }, 911/* GLDT_SEL 3 LDT Descriptor */ 912{ (int) ldt, /* segment base address */ 913 sizeof(ldt)-1, /* length - all address space */ 914 SDT_SYSLDT, /* segment type */ 915 SEL_UPL, /* segment descriptor priority level */ 916 1, /* segment descriptor present */ 917 0, 0, 918 0, /* unused - default 32 vs 16 bit size */ 919 0 /* limit granularity (byte/page units)*/ }, 920/* GTGATE_SEL 4 Null Descriptor - Placeholder */ 921{ 0x0, /* segment base address */ 922 0x0, /* length - all address space */ 923 0, /* segment type */ 924 0, /* segment descriptor priority level */ 925 0, /* segment descriptor present */ 926 0, 0, 927 0, /* default 32 vs 16 bit size */ 928 0 /* limit granularity (byte/page units)*/ }, 929/* GPANIC_SEL 5 Panic Tss Descriptor */ 930{ (int) &dblfault_tss, /* segment base address */ 931 sizeof(struct i386tss)-1,/* length - all address space */ 932 SDT_SYS386TSS, /* segment type */ 933 0, /* segment descriptor priority level */ 934 1, /* segment descriptor present */ 935 0, 0, 936 0, /* unused - default 32 vs 16 bit size */ 937 0 /* limit granularity (byte/page units)*/ }, 938/* GPROC0_SEL 6 Proc 0 Tss Descriptor */ 939{ 940 (int) &common_tss, /* segment base address */ 941 sizeof(struct i386tss)-1,/* length - all address space */ 942 SDT_SYS386TSS, /* segment type */ 943 0, /* segment descriptor priority level */ 944 1, /* segment descriptor present */ 945 0, 0, 946 0, /* unused - default 32 vs 16 bit size */ 947 0 /* limit granularity (byte/page units)*/ }, 948/* GUSERLDT_SEL 7 User LDT Descriptor per process */ 949{ (int) ldt, /* segment base address */ 950 (512 * sizeof(union descriptor)-1), /* length */ 951 SDT_SYSLDT, /* segment type */ 952 0, /* segment descriptor priority level */ 953 1, /* segment descriptor present */ 954 0, 0, 955 0, /* unused - default 32 vs 16 bit size */ 956 0 /* limit granularity (byte/page units)*/ }, 957/* GAPMCODE32_SEL 8 APM BIOS 32-bit interface (32bit Code) */ 958{ 0, /* segment base address (overwritten by APM) */ 959 0xfffff, /* length */ 960 SDT_MEMERA, /* segment type */ 961 0, /* segment descriptor priority level */ 962 1, /* segment descriptor present */ 963 0, 0, 964 1, /* default 32 vs 16 bit size */ 965 1 /* limit granularity (byte/page units)*/ }, 966/* GAPMCODE16_SEL 9 APM BIOS 32-bit interface (16bit Code) */ 967{ 0, /* segment base address (overwritten by APM) */ 968 0xfffff, /* length */ 969 SDT_MEMERA, /* segment type */ 970 0, /* segment descriptor priority level */ 971 1, /* segment descriptor present */ 972 0, 0, 973 0, /* default 32 vs 16 bit size */ 974 1 /* limit granularity (byte/page units)*/ }, 975/* GAPMDATA_SEL 10 APM BIOS 32-bit interface (Data) */ 976{ 0, /* segment base address (overwritten by APM) */ 977 0xfffff, /* length */ 978 SDT_MEMRWA, /* segment type */ 979 0, /* segment descriptor priority level */ 980 1, /* segment descriptor present */ 981 0, 0, 982 1, /* default 32 vs 16 bit size */ 983 1 /* limit granularity (byte/page units)*/ }, 984}; 985 986static struct soft_segment_descriptor ldt_segs[] = { 987 /* Null Descriptor - overwritten by call gate */ 988{ 0x0, /* segment base address */ 989 0x0, /* length - all address space */ 990 0, /* segment type */ 991 0, /* segment descriptor priority level */ 992 0, /* segment descriptor present */ 993 0, 0, 994 0, /* default 32 vs 16 bit size */ 995 0 /* limit granularity (byte/page units)*/ }, 996 /* Null Descriptor - overwritten by call gate */ 997{ 0x0, /* segment base address */ 998 0x0, /* length - all address space */ 999 0, /* segment type */ 1000 0, /* segment descriptor priority level */ 1001 0, /* segment descriptor present */ 1002 0, 0, 1003 0, /* default 32 vs 16 bit size */ 1004 0 /* limit granularity (byte/page units)*/ }, 1005 /* Null Descriptor - overwritten by call gate */ 1006{ 0x0, /* segment base address */ 1007 0x0, /* length - all address space */ 1008 0, /* segment type */ 1009 0, /* segment descriptor priority level */ 1010 0, /* segment descriptor present */ 1011 0, 0, 1012 0, /* default 32 vs 16 bit size */ 1013 0 /* limit granularity (byte/page units)*/ }, 1014 /* Code Descriptor for user */ 1015{ 0x0, /* segment base address */ 1016 0xfffff, /* length - all address space */ 1017 SDT_MEMERA, /* segment type */ 1018 SEL_UPL, /* segment descriptor priority level */ 1019 1, /* segment descriptor present */ 1020 0, 0, 1021 1, /* default 32 vs 16 bit size */ 1022 1 /* limit granularity (byte/page units)*/ }, 1023 /* Data Descriptor for user */ 1024{ 0x0, /* segment base address */ 1025 0xfffff, /* length - all address space */ 1026 SDT_MEMRWA, /* segment type */ 1027 SEL_UPL, /* segment descriptor priority level */ 1028 1, /* segment descriptor present */ 1029 0, 0, 1030 1, /* default 32 vs 16 bit size */ 1031 1 /* limit granularity (byte/page units)*/ }, 1032}; 1033 1034void 1035setidt(idx, func, typ, dpl, selec) 1036 int idx; 1037 inthand_t *func; 1038 int typ; 1039 int dpl; 1040 int selec; 1041{ 1042 struct gate_descriptor *ip = idt + idx; 1043 1044 ip->gd_looffset = (int)func; 1045 ip->gd_selector = selec; 1046 ip->gd_stkcpy = 0; 1047 ip->gd_xx = 0; 1048 ip->gd_type = typ; 1049 ip->gd_dpl = dpl; 1050 ip->gd_p = 1; 1051 ip->gd_hioffset = ((int)func)>>16 ; 1052} 1053 1054#define IDTVEC(name) __CONCAT(X,name) 1055 1056extern inthand_t 1057 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 1058 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 1059 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 1060 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 1061 IDTVEC(syscall), IDTVEC(int0x80_syscall); 1062 1063void 1064sdtossd(sd, ssd) 1065 struct segment_descriptor *sd; 1066 struct soft_segment_descriptor *ssd; 1067{ 1068 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 1069 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 1070 ssd->ssd_type = sd->sd_type; 1071 ssd->ssd_dpl = sd->sd_dpl; 1072 ssd->ssd_p = sd->sd_p; 1073 ssd->ssd_def32 = sd->sd_def32; 1074 ssd->ssd_gran = sd->sd_gran; 1075} 1076 1077void 1078init386(first) 1079 int first; 1080{ 1081 int x; 1082 unsigned biosbasemem, biosextmem; 1083 struct gate_descriptor *gdp; 1084 int gsel_tss; 1085 1086 struct isa_device *idp; 1087#ifndef SMP 1088 /* table descriptors - used to load tables by microp */ 1089 struct region_descriptor r_gdt, r_idt; 1090#endif 1091 int pagesinbase, pagesinext; 1092 int target_page, pa_indx; 1093 int off; 1094 int speculative_mprobe; 1095 1096 proc0.p_addr = proc0paddr; 1097 1098 atdevbase = ISA_HOLE_START + KERNBASE; 1099 1100 /* 1101 * Initialize the console before we print anything out. 1102 */ 1103 cninit(); 1104 1105 /* 1106 * make gdt memory segments, the code segment goes up to end of the 1107 * page with etext in it, the data segment goes to the end of 1108 * the address space 1109 */ 1110 /* 1111 * XXX text protection is temporarily (?) disabled. The limit was 1112 * i386_btop(round_page(etext)) - 1. 1113 */ 1114 gdt_segs[GCODE_SEL].ssd_limit = i386_btop(0) - 1; 1115 gdt_segs[GDATA_SEL].ssd_limit = i386_btop(0) - 1; 1116#ifdef BDE_DEBUGGER 1117#define NGDT1 8 /* avoid overwriting db entries with APM ones */ 1118#else 1119#define NGDT1 (sizeof gdt_segs / sizeof gdt_segs[0]) 1120#endif 1121 for (x = 0; x < NGDT1; x++) 1122 ssdtosd(&gdt_segs[x], &gdt[x].sd); 1123#ifdef VM86 1124 common_tssd = gdt[GPROC0_SEL].sd; 1125#endif /* VM86 */ 1126 1127#ifdef SMP 1128 /* 1129 * Spin these up now. init_secondary() grabs them. We could use 1130 * #for(x,y,z) / #endfor cpp directives if they existed. 1131 */ 1132 for (x = 0; x < NCPU; x++) { 1133 gdt_segs[NGDT + x] = gdt_segs[GPROC0_SEL]; 1134 ssdtosd(&gdt_segs[NGDT + x], &gdt[NGDT + x].sd); 1135 } 1136#endif 1137 1138 /* make ldt memory segments */ 1139 /* 1140 * The data segment limit must not cover the user area because we 1141 * don't want the user area to be writable in copyout() etc. (page 1142 * level protection is lost in kernel mode on 386's). Also, we 1143 * don't want the user area to be writable directly (page level 1144 * protection of the user area is not available on 486's with 1145 * CR0_WP set, because there is no user-read/kernel-write mode). 1146 * 1147 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it 1148 * should be spelled ...MAX_USER... 1149 */ 1150#define VM_END_USER_RW_ADDRESS VM_MAXUSER_ADDRESS 1151 /* 1152 * The code segment limit has to cover the user area until we move 1153 * the signal trampoline out of the user area. This is safe because 1154 * the code segment cannot be written to directly. 1155 */ 1156#define VM_END_USER_R_ADDRESS (VM_END_USER_RW_ADDRESS + UPAGES * PAGE_SIZE) 1157 ldt_segs[LUCODE_SEL].ssd_limit = i386_btop(VM_END_USER_R_ADDRESS) - 1; 1158 ldt_segs[LUDATA_SEL].ssd_limit = i386_btop(VM_END_USER_RW_ADDRESS) - 1; 1159 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++) 1160 ssdtosd(&ldt_segs[x], &ldt[x].sd); 1161 1162 /* exceptions */ 1163 for (x = 0; x < NIDT; x++) 1164 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1165 setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1166 setidt(1, &IDTVEC(dbg), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1167 setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1168 setidt(3, &IDTVEC(bpt), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1169 setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1170 setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1171 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1172 setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1173 setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL)); 1174 setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1175 setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1176 setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1177 setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1178 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1179 setidt(14, &IDTVEC(page), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1180 setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1181 setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1182 setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1183 setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1184 setidt(0x80, &IDTVEC(int0x80_syscall), 1185 SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1186 1187#include "isa.h" 1188#if NISA >0 1189 isa_defaultirq(); 1190#endif 1191 rand_initialize(); 1192 1193 r_gdt.rd_limit = sizeof(gdt) - 1; 1194 r_gdt.rd_base = (int) gdt; 1195 lgdt(&r_gdt); 1196 1197 r_idt.rd_limit = sizeof(idt) - 1; 1198 r_idt.rd_base = (int) idt; 1199 lidt(&r_idt); 1200 1201 _default_ldt = GSEL(GLDT_SEL, SEL_KPL); 1202 lldt(_default_ldt); 1203 currentldt = _default_ldt; 1204 1205#ifdef DDB 1206 kdb_init(); 1207 if (boothowto & RB_KDB) 1208 Debugger("Boot flags requested debugger"); 1209#endif 1210 1211 finishidentcpu(); /* Final stage of CPU initialization */ 1212 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1213 initializecpu(); /* Initialize CPU registers */ 1214 1215 /* Use BIOS values stored in RTC CMOS RAM, since probing 1216 * breaks certain 386 AT relics. 1217 */ 1218 biosbasemem = rtcin(RTC_BASELO)+ (rtcin(RTC_BASEHI)<<8); 1219 biosextmem = rtcin(RTC_EXTLO)+ (rtcin(RTC_EXTHI)<<8); 1220 1221 /* 1222 * If BIOS tells us that it has more than 640k in the basemem, 1223 * don't believe it - set it to 640k. 1224 */ 1225 if (biosbasemem > 640) { 1226 printf("Preposterous RTC basemem of %dK, truncating to 640K\n", 1227 biosbasemem); 1228 biosbasemem = 640; 1229 } 1230 if (bootinfo.bi_memsizes_valid && bootinfo.bi_basemem > 640) { 1231 printf("Preposterous BIOS basemem of %dK, truncating to 640K\n", 1232 bootinfo.bi_basemem); 1233 bootinfo.bi_basemem = 640; 1234 } 1235 1236 /* 1237 * Warn if the official BIOS interface disagrees with the RTC 1238 * interface used above about the amount of base memory or the 1239 * amount of extended memory. Prefer the BIOS value for the base 1240 * memory. This is necessary for machines that `steal' base 1241 * memory for use as BIOS memory, at least if we are going to use 1242 * the BIOS for apm. Prefer the RTC value for extended memory. 1243 * Eventually the hackish interface shouldn't even be looked at. 1244 */ 1245 if (bootinfo.bi_memsizes_valid) { 1246 if (bootinfo.bi_basemem != biosbasemem) { 1247 vm_offset_t pa; 1248 1249 printf( 1250 "BIOS basemem (%ldK) != RTC basemem (%dK), setting to BIOS value\n", 1251 bootinfo.bi_basemem, biosbasemem); 1252 biosbasemem = bootinfo.bi_basemem; 1253 1254 /* 1255 * XXX if biosbasemem is now < 640, there is `hole' 1256 * between the end of base memory and the start of 1257 * ISA memory. The hole may be empty or it may 1258 * contain BIOS code or data. Map it read/write so 1259 * that the BIOS can write to it. (Memory from 0 to 1260 * the physical end of the kernel is mapped read-only 1261 * to begin with and then parts of it are remapped. 1262 * The parts that aren't remapped form holes that 1263 * remain read-only and are unused by the kernel. 1264 * The base memory area is below the physical end of 1265 * the kernel and right now forms a read-only hole. 1266 * The part of it from 0 to 1267 * (trunc_page(biosbasemem * 1024) - 1) will be 1268 * remapped and used by the kernel later.) 1269 * 1270 * This code is similar to the code used in 1271 * pmap_mapdev, but since no memory needs to be 1272 * allocated we simply change the mapping. 1273 */ 1274 for (pa = trunc_page(biosbasemem * 1024); 1275 pa < ISA_HOLE_START; pa += PAGE_SIZE) { 1276 unsigned *pte; 1277 1278 pte = (unsigned *)vtopte(pa + KERNBASE); 1279 *pte = pa | PG_RW | PG_V; 1280 } 1281 } 1282 if (bootinfo.bi_extmem != biosextmem) 1283 printf("BIOS extmem (%ldK) != RTC extmem (%dK)\n", 1284 bootinfo.bi_extmem, biosextmem); 1285 } 1286 1287#ifdef SMP 1288 /* make hole for AP bootstrap code */ 1289 pagesinbase = mp_bootaddress(biosbasemem) / PAGE_SIZE; 1290#else 1291 pagesinbase = biosbasemem * 1024 / PAGE_SIZE; 1292#endif 1293 1294 pagesinext = biosextmem * 1024 / PAGE_SIZE; 1295 1296 /* 1297 * Special hack for chipsets that still remap the 384k hole when 1298 * there's 16MB of memory - this really confuses people that 1299 * are trying to use bus mastering ISA controllers with the 1300 * "16MB limit"; they only have 16MB, but the remapping puts 1301 * them beyond the limit. 1302 */ 1303 /* 1304 * If extended memory is between 15-16MB (16-17MB phys address range), 1305 * chop it to 15MB. 1306 */ 1307 if ((pagesinext > 3840) && (pagesinext < 4096)) 1308 pagesinext = 3840; 1309 1310 /* 1311 * Maxmem isn't the "maximum memory", it's one larger than the 1312 * highest page of the physical address space. It should be 1313 * called something like "Maxphyspage". 1314 */ 1315 Maxmem = pagesinext + 0x100000/PAGE_SIZE; 1316 /* 1317 * Indicate that we wish to do a speculative search for memory beyond 1318 * the end of the reported size if the indicated amount is 64MB (0x4000 1319 * pages) - which is the largest amount that the BIOS/bootblocks can 1320 * currently report. If a specific amount of memory is indicated via 1321 * the MAXMEM option or the npx0 "msize", then don't do the speculative 1322 * memory probe. 1323 */ 1324 if (Maxmem >= 0x4000) 1325 speculative_mprobe = TRUE; 1326 else 1327 speculative_mprobe = FALSE; 1328 1329#ifdef MAXMEM 1330 Maxmem = MAXMEM/4; 1331 speculative_mprobe = FALSE; 1332#endif 1333 1334#if NNPX > 0 1335 idp = find_isadev(isa_devtab_null, &npxdriver, 0); 1336 if (idp != NULL && idp->id_msize != 0) { 1337 Maxmem = idp->id_msize / 4; 1338 speculative_mprobe = FALSE; 1339 } 1340#endif 1341 1342#ifdef SMP 1343 /* look for the MP hardware - needed for apic addresses */ 1344 mp_probe(); 1345#endif 1346 1347 /* call pmap initialization to make new kernel address space */ 1348 pmap_bootstrap (first, 0); 1349 1350 /* 1351 * Size up each available chunk of physical memory. 1352 */ 1353 1354 /* 1355 * We currently don't bother testing base memory. 1356 * XXX ...but we probably should. 1357 */ 1358 pa_indx = 0; 1359 if (pagesinbase > 1) { 1360 phys_avail[pa_indx++] = PAGE_SIZE; /* skip first page of memory */ 1361 phys_avail[pa_indx] = ptoa(pagesinbase);/* memory up to the ISA hole */ 1362 physmem = pagesinbase - 1; 1363 } else { 1364 /* point at first chunk end */ 1365 pa_indx++; 1366 } 1367 1368 for (target_page = avail_start; target_page < ptoa(Maxmem); target_page += PAGE_SIZE) { 1369 int tmp, page_bad; 1370 1371 page_bad = FALSE; 1372 1373 /* 1374 * map page into kernel: valid, read/write, non-cacheable 1375 */ 1376 *(int *)CMAP1 = PG_V | PG_RW | PG_N | target_page; 1377 invltlb(); 1378 1379 tmp = *(int *)CADDR1; 1380 /* 1381 * Test for alternating 1's and 0's 1382 */ 1383 *(volatile int *)CADDR1 = 0xaaaaaaaa; 1384 if (*(volatile int *)CADDR1 != 0xaaaaaaaa) { 1385 page_bad = TRUE; 1386 } 1387 /* 1388 * Test for alternating 0's and 1's 1389 */ 1390 *(volatile int *)CADDR1 = 0x55555555; 1391 if (*(volatile int *)CADDR1 != 0x55555555) { 1392 page_bad = TRUE; 1393 } 1394 /* 1395 * Test for all 1's 1396 */ 1397 *(volatile int *)CADDR1 = 0xffffffff; 1398 if (*(volatile int *)CADDR1 != 0xffffffff) { 1399 page_bad = TRUE; 1400 } 1401 /* 1402 * Test for all 0's 1403 */ 1404 *(volatile int *)CADDR1 = 0x0; 1405 if (*(volatile int *)CADDR1 != 0x0) { 1406 /* 1407 * test of page failed 1408 */ 1409 page_bad = TRUE; 1410 } 1411 /* 1412 * Restore original value. 1413 */ 1414 *(int *)CADDR1 = tmp; 1415 1416 /* 1417 * Adjust array of valid/good pages. 1418 */ 1419 if (page_bad == FALSE) { 1420 /* 1421 * If this good page is a continuation of the 1422 * previous set of good pages, then just increase 1423 * the end pointer. Otherwise start a new chunk. 1424 * Note that "end" points one higher than end, 1425 * making the range >= start and < end. 1426 * If we're also doing a speculative memory 1427 * test and we at or past the end, bump up Maxmem 1428 * so that we keep going. The first bad page 1429 * will terminate the loop. 1430 */ 1431 if (phys_avail[pa_indx] == target_page) { 1432 phys_avail[pa_indx] += PAGE_SIZE; 1433 if (speculative_mprobe == TRUE && 1434 phys_avail[pa_indx] >= (64*1024*1024)) 1435 Maxmem++; 1436 } else { 1437 pa_indx++; 1438 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1439 printf("Too many holes in the physical address space, giving up\n"); 1440 pa_indx--; 1441 break; 1442 } 1443 phys_avail[pa_indx++] = target_page; /* start */ 1444 phys_avail[pa_indx] = target_page + PAGE_SIZE; /* end */ 1445 } 1446 physmem++; 1447 } 1448 } 1449 1450 *(int *)CMAP1 = 0; 1451 invltlb(); 1452 1453 /* 1454 * XXX 1455 * The last chunk must contain at least one page plus the message 1456 * buffer to avoid complicating other code (message buffer address 1457 * calculation, etc.). 1458 */ 1459 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1460 round_page(sizeof(struct msgbuf)) >= phys_avail[pa_indx]) { 1461 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1462 phys_avail[pa_indx--] = 0; 1463 phys_avail[pa_indx--] = 0; 1464 } 1465 1466 Maxmem = atop(phys_avail[pa_indx]); 1467 1468 /* Trim off space for the message buffer. */ 1469 phys_avail[pa_indx] -= round_page(sizeof(struct msgbuf)); 1470 1471 avail_end = phys_avail[pa_indx]; 1472 1473 /* now running on new page tables, configured,and u/iom is accessible */ 1474 1475 /* Map the message buffer. */ 1476 for (off = 0; off < round_page(sizeof(struct msgbuf)); off += PAGE_SIZE) 1477 pmap_enter(kernel_pmap, (vm_offset_t)msgbufp + off, 1478 avail_end + off, VM_PROT_ALL, TRUE); 1479 msgbufmapped = 1; 1480 1481 /* make an initial tss so cpu can get interrupt stack on syscall! */ 1482#ifdef VM86 1483 common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16; 1484#else 1485 common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE; 1486#endif /* VM86 */ 1487 common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ; 1488 common_tss.tss_ioopt = (sizeof common_tss) << 16; 1489 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1490 ltr(gsel_tss); 1491#ifdef VM86 1492 private_tss = 0; 1493 my_tr = GPROC0_SEL; 1494#endif 1495 1496 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = 1497 dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)]; 1498 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = 1499 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); 1500 dblfault_tss.tss_cr3 = (int)IdlePTD; 1501 dblfault_tss.tss_eip = (int) dblfault_handler; 1502 dblfault_tss.tss_eflags = PSL_KERNEL; 1503 dblfault_tss.tss_ds = dblfault_tss.tss_es = dblfault_tss.tss_fs = 1504 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); 1505 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); 1506 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); 1507 1508 /* make a call gate to reenter kernel with */ 1509 gdp = &ldt[LSYS5CALLS_SEL].gd; 1510 1511 x = (int) &IDTVEC(syscall); 1512 gdp->gd_looffset = x++; 1513 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL); 1514 gdp->gd_stkcpy = 1; 1515 gdp->gd_type = SDT_SYS386CGT; 1516 gdp->gd_dpl = SEL_UPL; 1517 gdp->gd_p = 1; 1518 gdp->gd_hioffset = ((int) &IDTVEC(syscall)) >>16; 1519 1520 /* XXX does this work? */ 1521 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL]; 1522 1523 /* transfer to user mode */ 1524 1525 _ucodesel = LSEL(LUCODE_SEL, SEL_UPL); 1526 _udatasel = LSEL(LUDATA_SEL, SEL_UPL); 1527 1528 /* setup proc 0's pcb */ 1529 proc0.p_addr->u_pcb.pcb_flags = 0; 1530 proc0.p_addr->u_pcb.pcb_cr3 = (int)IdlePTD; 1531 proc0.p_addr->u_pcb.pcb_mpnest = 1; 1532 proc0.p_addr->u_pcb.pcb_ext = 0; 1533} 1534 1535int 1536ptrace_set_pc(p, addr) 1537 struct proc *p; 1538 unsigned int addr; 1539{ 1540 p->p_md.md_regs->tf_eip = addr; 1541 return (0); 1542} 1543 1544int 1545ptrace_single_step(p) 1546 struct proc *p; 1547{ 1548 p->p_md.md_regs->tf_eflags |= PSL_T; 1549 return (0); 1550} 1551 1552int ptrace_write_u(p, off, data) 1553 struct proc *p; 1554 vm_offset_t off; 1555 int data; 1556{ 1557 struct trapframe frame_copy; 1558 vm_offset_t min; 1559 struct trapframe *tp; 1560 1561 /* 1562 * Privileged kernel state is scattered all over the user area. 1563 * Only allow write access to parts of regs and to fpregs. 1564 */ 1565 min = (char *)p->p_md.md_regs - (char *)p->p_addr; 1566 if (off >= min && off <= min + sizeof(struct trapframe) - sizeof(int)) { 1567 tp = p->p_md.md_regs; 1568 frame_copy = *tp; 1569 *(int *)((char *)&frame_copy + (off - min)) = data; 1570 if (!EFLAGS_SECURE(frame_copy.tf_eflags, tp->tf_eflags) || 1571 !CS_SECURE(frame_copy.tf_cs)) 1572 return (EINVAL); 1573 *(int*)((char *)p->p_addr + off) = data; 1574 return (0); 1575 } 1576 min = offsetof(struct user, u_pcb) + offsetof(struct pcb, pcb_savefpu); 1577 if (off >= min && off <= min + sizeof(struct save87) - sizeof(int)) { 1578 *(int*)((char *)p->p_addr + off) = data; 1579 return (0); 1580 } 1581 return (EFAULT); 1582} 1583 1584int 1585fill_regs(p, regs) 1586 struct proc *p; 1587 struct reg *regs; 1588{ 1589 struct pcb *pcb; 1590 struct trapframe *tp; 1591 1592 tp = p->p_md.md_regs; 1593 regs->r_es = tp->tf_es; 1594 regs->r_ds = tp->tf_ds; 1595 regs->r_edi = tp->tf_edi; 1596 regs->r_esi = tp->tf_esi; 1597 regs->r_ebp = tp->tf_ebp; 1598 regs->r_ebx = tp->tf_ebx; 1599 regs->r_edx = tp->tf_edx; 1600 regs->r_ecx = tp->tf_ecx; 1601 regs->r_eax = tp->tf_eax; 1602 regs->r_eip = tp->tf_eip; 1603 regs->r_cs = tp->tf_cs; 1604 regs->r_eflags = tp->tf_eflags; 1605 regs->r_esp = tp->tf_esp; 1606 regs->r_ss = tp->tf_ss; 1607 pcb = &p->p_addr->u_pcb; 1608 regs->r_fs = pcb->pcb_fs; 1609 regs->r_gs = pcb->pcb_gs; 1610 return (0); 1611} 1612 1613int 1614set_regs(p, regs) 1615 struct proc *p; 1616 struct reg *regs; 1617{ 1618 struct pcb *pcb; 1619 struct trapframe *tp; 1620 1621 tp = p->p_md.md_regs; 1622 if (!EFLAGS_SECURE(regs->r_eflags, tp->tf_eflags) || 1623 !CS_SECURE(regs->r_cs)) 1624 return (EINVAL); 1625 tp->tf_es = regs->r_es; 1626 tp->tf_ds = regs->r_ds; 1627 tp->tf_edi = regs->r_edi; 1628 tp->tf_esi = regs->r_esi; 1629 tp->tf_ebp = regs->r_ebp; 1630 tp->tf_ebx = regs->r_ebx; 1631 tp->tf_edx = regs->r_edx; 1632 tp->tf_ecx = regs->r_ecx; 1633 tp->tf_eax = regs->r_eax; 1634 tp->tf_eip = regs->r_eip; 1635 tp->tf_cs = regs->r_cs; 1636 tp->tf_eflags = regs->r_eflags; 1637 tp->tf_esp = regs->r_esp; 1638 tp->tf_ss = regs->r_ss; 1639 pcb = &p->p_addr->u_pcb; 1640 pcb->pcb_fs = regs->r_fs; 1641 pcb->pcb_gs = regs->r_gs; 1642 return (0); 1643} 1644 1645#ifndef DDB 1646void 1647Debugger(const char *msg) 1648{ 1649 printf("Debugger(\"%s\") called.\n", msg); 1650} 1651#endif /* no DDB */ 1652 1653#include <sys/disklabel.h> 1654 1655/* 1656 * Determine the size of the transfer, and make sure it is 1657 * within the boundaries of the partition. Adjust transfer 1658 * if needed, and signal errors or early completion. 1659 */ 1660int 1661bounds_check_with_label(struct buf *bp, struct disklabel *lp, int wlabel) 1662{ 1663 struct partition *p = lp->d_partitions + dkpart(bp->b_dev); 1664 int labelsect = lp->d_partitions[0].p_offset; 1665 int maxsz = p->p_size, 1666 sz = (bp->b_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT; 1667 1668 /* overwriting disk label ? */ 1669 /* XXX should also protect bootstrap in first 8K */ 1670 if (bp->b_blkno + p->p_offset <= LABELSECTOR + labelsect && 1671#if LABELSECTOR != 0 1672 bp->b_blkno + p->p_offset + sz > LABELSECTOR + labelsect && 1673#endif 1674 (bp->b_flags & B_READ) == 0 && wlabel == 0) { 1675 bp->b_error = EROFS; 1676 goto bad; 1677 } 1678 1679#if defined(DOSBBSECTOR) && defined(notyet) 1680 /* overwriting master boot record? */ 1681 if (bp->b_blkno + p->p_offset <= DOSBBSECTOR && 1682 (bp->b_flags & B_READ) == 0 && wlabel == 0) { 1683 bp->b_error = EROFS; 1684 goto bad; 1685 } 1686#endif 1687 1688 /* beyond partition? */ 1689 if (bp->b_blkno < 0 || bp->b_blkno + sz > maxsz) { 1690 /* if exactly at end of disk, return an EOF */ 1691 if (bp->b_blkno == maxsz) { 1692 bp->b_resid = bp->b_bcount; 1693 return(0); 1694 } 1695 /* or truncate if part of it fits */ 1696 sz = maxsz - bp->b_blkno; 1697 if (sz <= 0) { 1698 bp->b_error = EINVAL; 1699 goto bad; 1700 } 1701 bp->b_bcount = sz << DEV_BSHIFT; 1702 } 1703 1704 bp->b_pblkno = bp->b_blkno + p->p_offset; 1705 return(1); 1706 1707bad: 1708 bp->b_flags |= B_ERROR; 1709 return(-1); 1710} 1711 1712#ifdef DDB 1713 1714/* 1715 * Provide inb() and outb() as functions. They are normally only 1716 * available as macros calling inlined functions, thus cannot be 1717 * called inside DDB. 1718 * 1719 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 1720 */ 1721 1722#undef inb 1723#undef outb 1724 1725/* silence compiler warnings */ 1726u_char inb(u_int); 1727void outb(u_int, u_char); 1728 1729u_char 1730inb(u_int port) 1731{ 1732 u_char data; 1733 /* 1734 * We use %%dx and not %1 here because i/o is done at %dx and not at 1735 * %edx, while gcc generates inferior code (movw instead of movl) 1736 * if we tell it to load (u_short) port. 1737 */ 1738 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 1739 return (data); 1740} 1741 1742void 1743outb(u_int port, u_char data) 1744{ 1745 u_char al; 1746 /* 1747 * Use an unnecessary assignment to help gcc's register allocator. 1748 * This make a large difference for gcc-1.40 and a tiny difference 1749 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 1750 * best results. gcc-2.6.0 can't handle this. 1751 */ 1752 al = data; 1753 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 1754} 1755 1756#endif /* DDB */ 1757