machdep.c revision 48405
1/*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 38 * $Id: machdep.c,v 1.346 1999/07/01 18:27:15 peter Exp $ 39 */ 40 41#include "apm.h" 42#include "ether.h" 43#include "npx.h" 44#include "opt_atalk.h" 45#include "opt_cpu.h" 46#include "opt_ddb.h" 47#include "opt_inet.h" 48#include "opt_ipx.h" 49#include "opt_maxmem.h" 50#include "opt_msgbuf.h" 51#include "opt_perfmon.h" 52#include "opt_smp.h" 53#include "opt_sysvipc.h" 54#include "opt_user_ldt.h" 55#include "opt_userconfig.h" 56 57#include <sys/param.h> 58#include <sys/systm.h> 59#include <sys/sysproto.h> 60#include <sys/signalvar.h> 61#include <sys/kernel.h> 62#include <sys/linker.h> 63#include <sys/proc.h> 64#include <sys/buf.h> 65#include <sys/reboot.h> 66#include <sys/callout.h> 67#include <sys/malloc.h> 68#include <sys/mbuf.h> 69#include <sys/msgbuf.h> 70#include <sys/sysent.h> 71#include <sys/sysctl.h> 72#include <sys/vmmeter.h> 73#include <sys/bus.h> 74 75#ifdef SYSVSHM 76#include <sys/shm.h> 77#endif 78 79#ifdef SYSVMSG 80#include <sys/msg.h> 81#endif 82 83#ifdef SYSVSEM 84#include <sys/sem.h> 85#endif 86 87#include <vm/vm.h> 88#include <vm/vm_param.h> 89#include <vm/vm_prot.h> 90#include <sys/lock.h> 91#include <vm/vm_kern.h> 92#include <vm/vm_object.h> 93#include <vm/vm_page.h> 94#include <vm/vm_map.h> 95#include <vm/vm_pager.h> 96#include <vm/vm_extern.h> 97 98#include <sys/user.h> 99#include <sys/exec.h> 100 101#include <ddb/ddb.h> 102 103#include <net/netisr.h> 104 105#include <machine/cpu.h> 106#include <machine/reg.h> 107#include <machine/clock.h> 108#include <machine/specialreg.h> 109#include <machine/cons.h> 110#include <machine/bootinfo.h> 111#include <machine/ipl.h> 112#include <machine/md_var.h> 113#include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */ 114#ifdef SMP 115#include <machine/smp.h> 116#include <machine/globaldata.h> 117#endif 118#ifdef PERFMON 119#include <machine/perfmon.h> 120#endif 121 122#ifdef OLD_BUS_ARCH 123#include <i386/isa/isa_device.h> 124#endif 125#include <i386/isa/intr_machdep.h> 126#include <isa/rtc.h> 127#include <machine/vm86.h> 128#include <machine/random.h> 129#include <sys/ptrace.h> 130 131extern void init386 __P((int first)); 132extern void dblfault_handler __P((void)); 133 134extern void printcpuinfo(void); /* XXX header file */ 135extern void earlysetcpuclass(void); /* same header file */ 136extern void finishidentcpu(void); 137extern void panicifcpuunsupported(void); 138extern void initializecpu(void); 139 140static void cpu_startup __P((void *)); 141SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) 142 143static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf"); 144 145int _udatasel, _ucodesel; 146u_int atdevbase; 147 148#if defined(SWTCH_OPTIM_STATS) 149extern int swtch_optim_stats; 150SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, 151 CTLFLAG_RD, &swtch_optim_stats, 0, ""); 152SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, 153 CTLFLAG_RD, &tlb_flush_count, 0, ""); 154#endif 155 156#ifdef PC98 157static int ispc98 = 1; 158#else 159static int ispc98 = 0; 160#endif 161SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, ""); 162 163int physmem = 0; 164int cold = 1; 165 166static int 167sysctl_hw_physmem SYSCTL_HANDLER_ARGS 168{ 169 int error = sysctl_handle_int(oidp, 0, ctob(physmem), req); 170 return (error); 171} 172 173SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD, 174 0, 0, sysctl_hw_physmem, "I", ""); 175 176static int 177sysctl_hw_usermem SYSCTL_HANDLER_ARGS 178{ 179 int error = sysctl_handle_int(oidp, 0, 180 ctob(physmem - cnt.v_wire_count), req); 181 return (error); 182} 183 184SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD, 185 0, 0, sysctl_hw_usermem, "I", ""); 186 187static int 188sysctl_hw_availpages SYSCTL_HANDLER_ARGS 189{ 190 int error = sysctl_handle_int(oidp, 0, 191 i386_btop(avail_end - avail_start), req); 192 return (error); 193} 194 195SYSCTL_PROC(_hw, OID_AUTO, availpages, CTLTYPE_INT|CTLFLAG_RD, 196 0, 0, sysctl_hw_availpages, "I", ""); 197 198static int 199sysctl_machdep_msgbuf SYSCTL_HANDLER_ARGS 200{ 201 int error; 202 203 /* Unwind the buffer, so that it's linear (possibly starting with 204 * some initial nulls). 205 */ 206 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr+msgbufp->msg_bufr, 207 msgbufp->msg_size-msgbufp->msg_bufr,req); 208 if(error) return(error); 209 if(msgbufp->msg_bufr>0) { 210 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr, 211 msgbufp->msg_bufr,req); 212 } 213 return(error); 214} 215 216SYSCTL_PROC(_machdep, OID_AUTO, msgbuf, CTLTYPE_STRING|CTLFLAG_RD, 217 0, 0, sysctl_machdep_msgbuf, "A","Contents of kernel message buffer"); 218 219static int msgbuf_clear; 220 221static int 222sysctl_machdep_msgbuf_clear SYSCTL_HANDLER_ARGS 223{ 224 int error; 225 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 226 req); 227 if (!error && req->newptr) { 228 /* Clear the buffer and reset write pointer */ 229 bzero(msgbufp->msg_ptr,msgbufp->msg_size); 230 msgbufp->msg_bufr=msgbufp->msg_bufx=0; 231 msgbuf_clear=0; 232 } 233 return (error); 234} 235 236SYSCTL_PROC(_machdep, OID_AUTO, msgbuf_clear, CTLTYPE_INT|CTLFLAG_RW, 237 &msgbuf_clear, 0, sysctl_machdep_msgbuf_clear, "I", 238 "Clear kernel message buffer"); 239 240int bootverbose = 0, Maxmem = 0; 241long dumplo; 242 243vm_offset_t phys_avail[10]; 244 245/* must be 2 less so 0 0 can signal end of chunks */ 246#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2) 247 248static vm_offset_t buffer_sva, buffer_eva; 249vm_offset_t clean_sva, clean_eva; 250static vm_offset_t pager_sva, pager_eva; 251#if NNPX > 0 252extern struct isa_driver npxdriver; 253#endif 254 255#define offsetof(type, member) ((size_t)(&((type *)0)->member)) 256 257static void 258cpu_startup(dummy) 259 void *dummy; 260{ 261 register unsigned i; 262 register caddr_t v; 263 vm_offset_t maxaddr; 264 vm_size_t size = 0; 265 int firstaddr; 266 vm_offset_t minaddr; 267 268 if (boothowto & RB_VERBOSE) 269 bootverbose++; 270 271 /* 272 * Good {morning,afternoon,evening,night}. 273 */ 274 printf(version); 275 earlysetcpuclass(); 276 startrtclock(); 277 printcpuinfo(); 278 panicifcpuunsupported(); 279#ifdef PERFMON 280 perfmon_init(); 281#endif 282 printf("real memory = %u (%uK bytes)\n", ptoa(Maxmem), ptoa(Maxmem) / 1024); 283 /* 284 * Display any holes after the first chunk of extended memory. 285 */ 286 if (bootverbose) { 287 int indx; 288 289 printf("Physical memory chunk(s):\n"); 290 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 291 int size1 = phys_avail[indx + 1] - phys_avail[indx]; 292 293 printf("0x%08x - 0x%08x, %u bytes (%u pages)\n", 294 phys_avail[indx], phys_avail[indx + 1] - 1, size1, 295 size1 / PAGE_SIZE); 296 } 297 } 298 299 /* 300 * Calculate callout wheel size 301 */ 302 for (callwheelsize = 1, callwheelbits = 0; 303 callwheelsize < ncallout; 304 callwheelsize <<= 1, ++callwheelbits) 305 ; 306 callwheelmask = callwheelsize - 1; 307 308 /* 309 * Allocate space for system data structures. 310 * The first available kernel virtual address is in "v". 311 * As pages of kernel virtual memory are allocated, "v" is incremented. 312 * As pages of memory are allocated and cleared, 313 * "firstaddr" is incremented. 314 * An index into the kernel page table corresponding to the 315 * virtual memory address maintained in "v" is kept in "mapaddr". 316 */ 317 318 /* 319 * Make two passes. The first pass calculates how much memory is 320 * needed and allocates it. The second pass assigns virtual 321 * addresses to the various data structures. 322 */ 323 firstaddr = 0; 324again: 325 v = (caddr_t)firstaddr; 326 327#define valloc(name, type, num) \ 328 (name) = (type *)v; v = (caddr_t)((name)+(num)) 329#define valloclim(name, type, num, lim) \ 330 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num))) 331 332 valloc(callout, struct callout, ncallout); 333 valloc(callwheel, struct callout_tailq, callwheelsize); 334#ifdef SYSVSHM 335 valloc(shmsegs, struct shmid_ds, shminfo.shmmni); 336#endif 337#ifdef SYSVSEM 338 valloc(sema, struct semid_ds, seminfo.semmni); 339 valloc(sem, struct sem, seminfo.semmns); 340 /* This is pretty disgusting! */ 341 valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int)); 342#endif 343#ifdef SYSVMSG 344 valloc(msgpool, char, msginfo.msgmax); 345 valloc(msgmaps, struct msgmap, msginfo.msgseg); 346 valloc(msghdrs, struct msg, msginfo.msgtql); 347 valloc(msqids, struct msqid_ds, msginfo.msgmni); 348#endif 349 350 if (nbuf == 0) { 351 nbuf = 30; 352 if( physmem > 1024) 353 nbuf += min((physmem - 1024) / 8, 2048); 354 } 355 nswbuf = max(min(nbuf/4, 64), 16); 356 357 valloc(swbuf, struct buf, nswbuf); 358 valloc(buf, struct buf, nbuf); 359 360 361 /* 362 * End of first pass, size has been calculated so allocate memory 363 */ 364 if (firstaddr == 0) { 365 size = (vm_size_t)(v - firstaddr); 366 firstaddr = (int)kmem_alloc(kernel_map, round_page(size)); 367 if (firstaddr == 0) 368 panic("startup: no room for tables"); 369 goto again; 370 } 371 372 /* 373 * End of second pass, addresses have been assigned 374 */ 375 if ((vm_size_t)(v - firstaddr) != size) 376 panic("startup: table size inconsistency"); 377 378 clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva, 379 (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size); 380 buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva, 381 (nbuf*BKVASIZE)); 382 pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva, 383 (nswbuf*MAXPHYS) + pager_map_size); 384 pager_map->system_map = 1; 385 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 386 (16*(ARG_MAX+(PAGE_SIZE*3)))); 387 388 /* 389 * Finally, allocate mbuf pool. Since mclrefcnt is an off-size 390 * we use the more space efficient malloc in place of kmem_alloc. 391 */ 392 { 393 vm_offset_t mb_map_size; 394 int xclusters; 395 396 /* Allow override of NMBCLUSTERS from the kernel environment */ 397 if (getenv_int("kern.ipc.nmbclusters", &xclusters) && 398 xclusters > nmbclusters) 399 nmbclusters = xclusters; 400 401 mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES; 402 mb_map_size = roundup2(mb_map_size, max(MCLBYTES, PAGE_SIZE)); 403 mclrefcnt = malloc(mb_map_size / MCLBYTES, M_MBUF, M_NOWAIT); 404 bzero(mclrefcnt, mb_map_size / MCLBYTES); 405 mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr, 406 mb_map_size); 407 mb_map->system_map = 1; 408 } 409 410 /* 411 * Initialize callouts 412 */ 413 SLIST_INIT(&callfree); 414 for (i = 0; i < ncallout; i++) { 415 callout_init(&callout[i]); 416 callout[i].c_flags = CALLOUT_LOCAL_ALLOC; 417 SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); 418 } 419 420 for (i = 0; i < callwheelsize; i++) { 421 TAILQ_INIT(&callwheel[i]); 422 } 423 424#if defined(USERCONFIG) 425 userconfig(); 426 cninit(); /* the preferred console may have changed */ 427#endif 428 429 printf("avail memory = %u (%uK bytes)\n", ptoa(cnt.v_free_count), 430 ptoa(cnt.v_free_count) / 1024); 431 432 /* 433 * Set up buffers, so they can be used to read disk labels. 434 */ 435 bufinit(); 436 vm_pager_bufferinit(); 437 438#ifdef SMP 439 /* 440 * OK, enough kmem_alloc/malloc state should be up, lets get on with it! 441 */ 442 mp_start(); /* fire up the APs and APICs */ 443 mp_announce(); 444#endif /* SMP */ 445} 446 447int 448register_netisr(num, handler) 449 int num; 450 netisr_t *handler; 451{ 452 453 if (num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs)) ) { 454 printf("register_netisr: bad isr number: %d\n", num); 455 return (EINVAL); 456 } 457 netisrs[num] = handler; 458 return (0); 459} 460 461void 462netisr_sysinit(data) 463 void *data; 464{ 465 const struct netisrtab *nit; 466 467 nit = (const struct netisrtab *)data; 468 register_netisr(nit->nit_num, nit->nit_isr); 469} 470 471/* 472 * Send an interrupt to process. 473 * 474 * Stack is set up to allow sigcode stored 475 * at top to call routine, followed by kcall 476 * to sigreturn routine below. After sigreturn 477 * resets the signal mask, the stack, and the 478 * frame pointer, it returns to the user 479 * specified pc, psl. 480 */ 481void 482sendsig(catcher, sig, mask, code) 483 sig_t catcher; 484 int sig, mask; 485 u_long code; 486{ 487 register struct proc *p = curproc; 488 register struct trapframe *regs; 489 register struct sigframe *fp; 490 struct sigframe sf; 491 struct sigacts *psp = p->p_sigacts; 492 int oonstack; 493 494 regs = p->p_md.md_regs; 495 oonstack = psp->ps_sigstk.ss_flags & SS_ONSTACK; 496 /* 497 * Allocate and validate space for the signal handler context. 498 */ 499 if ((psp->ps_flags & SAS_ALTSTACK) && !oonstack && 500 (psp->ps_sigonstack & sigmask(sig))) { 501 fp = (struct sigframe *)(psp->ps_sigstk.ss_sp + 502 psp->ps_sigstk.ss_size - sizeof(struct sigframe)); 503 psp->ps_sigstk.ss_flags |= SS_ONSTACK; 504 } else { 505 fp = (struct sigframe *)regs->tf_esp - 1; 506 } 507 508 /* 509 * grow() will return FALSE if the fp will not fit inside the stack 510 * and the stack can not be grown. useracc will return FALSE 511 * if access is denied. 512 */ 513 if ((grow_stack (p, (int)fp) == FALSE) || 514 (useracc((caddr_t)fp, sizeof(struct sigframe), B_WRITE) == FALSE)) { 515 /* 516 * Process has trashed its stack; give it an illegal 517 * instruction to halt it in its tracks. 518 */ 519 SIGACTION(p, SIGILL) = SIG_DFL; 520 sig = sigmask(SIGILL); 521 p->p_sigignore &= ~sig; 522 p->p_sigcatch &= ~sig; 523 p->p_sigmask &= ~sig; 524 psignal(p, SIGILL); 525 return; 526 } 527 528 /* 529 * Build the argument list for the signal handler. 530 */ 531 if (p->p_sysent->sv_sigtbl) { 532 if (sig < p->p_sysent->sv_sigsize) 533 sig = p->p_sysent->sv_sigtbl[sig]; 534 else 535 sig = p->p_sysent->sv_sigsize + 1; 536 } 537 sf.sf_signum = sig; 538 sf.sf_code = code; 539 sf.sf_scp = &fp->sf_sc; 540 sf.sf_addr = (char *) regs->tf_err; 541 sf.sf_handler = catcher; 542 543 /* save scratch registers */ 544 sf.sf_sc.sc_eax = regs->tf_eax; 545 sf.sf_sc.sc_ebx = regs->tf_ebx; 546 sf.sf_sc.sc_ecx = regs->tf_ecx; 547 sf.sf_sc.sc_edx = regs->tf_edx; 548 sf.sf_sc.sc_esi = regs->tf_esi; 549 sf.sf_sc.sc_edi = regs->tf_edi; 550 sf.sf_sc.sc_cs = regs->tf_cs; 551 sf.sf_sc.sc_ds = regs->tf_ds; 552 sf.sf_sc.sc_ss = regs->tf_ss; 553 sf.sf_sc.sc_es = regs->tf_es; 554 sf.sf_sc.sc_fs = regs->tf_fs; 555 sf.sf_sc.sc_isp = regs->tf_isp; 556 557 /* 558 * Build the signal context to be used by sigreturn. 559 */ 560 sf.sf_sc.sc_onstack = oonstack; 561 sf.sf_sc.sc_mask = mask; 562 sf.sf_sc.sc_sp = regs->tf_esp; 563 sf.sf_sc.sc_fp = regs->tf_ebp; 564 sf.sf_sc.sc_pc = regs->tf_eip; 565 sf.sf_sc.sc_ps = regs->tf_eflags; 566 sf.sf_sc.sc_trapno = regs->tf_trapno; 567 sf.sf_sc.sc_err = regs->tf_err; 568 569 /* 570 * If we're a vm86 process, we want to save the segment registers. 571 * We also change eflags to be our emulated eflags, not the actual 572 * eflags. 573 */ 574 if (regs->tf_eflags & PSL_VM) { 575 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 576 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 577 578 sf.sf_sc.sc_gs = tf->tf_vm86_gs; 579 sf.sf_sc.sc_fs = tf->tf_vm86_fs; 580 sf.sf_sc.sc_es = tf->tf_vm86_es; 581 sf.sf_sc.sc_ds = tf->tf_vm86_ds; 582 583 if (vm86->vm86_has_vme == 0) 584 sf.sf_sc.sc_ps = (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) 585 | (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 586 587 /* 588 * We should never have PSL_T set when returning from vm86 589 * mode. It may be set here if we deliver a signal before 590 * getting to vm86 mode, so turn it off. 591 * 592 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 593 * syscalls made by the signal handler. This just avoids 594 * wasting time for our lazy fixup of such faults. PSL_NT 595 * does nothing in vm86 mode, but vm86 programs can set it 596 * almost legitimately in probes for old cpu types. 597 */ 598 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP); 599 } 600 601 /* 602 * Copy the sigframe out to the user's stack. 603 */ 604 if (copyout(&sf, fp, sizeof(struct sigframe)) != 0) { 605 /* 606 * Something is wrong with the stack pointer. 607 * ...Kill the process. 608 */ 609 sigexit(p, SIGILL); 610 } 611 612 regs->tf_esp = (int)fp; 613 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 614 regs->tf_cs = _ucodesel; 615 regs->tf_ds = _udatasel; 616 regs->tf_es = _udatasel; 617 regs->tf_fs = _udatasel; 618 regs->tf_ss = _udatasel; 619} 620 621/* 622 * System call to cleanup state after a signal 623 * has been taken. Reset signal mask and 624 * stack state from context left by sendsig (above). 625 * Return to previous pc and psl as specified by 626 * context left by sendsig. Check carefully to 627 * make sure that the user has not modified the 628 * state to gain improper privileges. 629 */ 630int 631sigreturn(p, uap) 632 struct proc *p; 633 struct sigreturn_args /* { 634 struct sigcontext *sigcntxp; 635 } */ *uap; 636{ 637 register struct sigcontext *scp; 638 register struct sigframe *fp; 639 register struct trapframe *regs = p->p_md.md_regs; 640 int eflags; 641 642 /* 643 * (XXX old comment) regs->tf_esp points to the return address. 644 * The user scp pointer is above that. 645 * The return address is faked in the signal trampoline code 646 * for consistency. 647 */ 648 scp = uap->sigcntxp; 649 fp = (struct sigframe *) 650 ((caddr_t)scp - offsetof(struct sigframe, sf_sc)); 651 652 if (useracc((caddr_t)fp, sizeof (*fp), B_WRITE) == 0) 653 return(EFAULT); 654 655 eflags = scp->sc_ps; 656 if (eflags & PSL_VM) { 657 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 658 struct vm86_kernel *vm86; 659 660 /* 661 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 662 * set up the vm86 area, and we can't enter vm86 mode. 663 */ 664 if (p->p_addr->u_pcb.pcb_ext == 0) 665 return (EINVAL); 666 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 667 if (vm86->vm86_inited == 0) 668 return (EINVAL); 669 670 /* go back to user mode if both flags are set */ 671 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 672 trapsignal(p, SIGBUS, 0); 673 674 if (vm86->vm86_has_vme) { 675 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 676 (eflags & VME_USERCHANGE) | PSL_VM; 677 } else { 678 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 679 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 680 } 681 tf->tf_vm86_ds = scp->sc_ds; 682 tf->tf_vm86_es = scp->sc_es; 683 tf->tf_vm86_fs = scp->sc_fs; 684 tf->tf_vm86_gs = scp->sc_gs; 685 tf->tf_ds = _udatasel; 686 tf->tf_es = _udatasel; 687 tf->tf_fs = _udatasel; 688 } else { 689 /* 690 * Don't allow users to change privileged or reserved flags. 691 */ 692#define EFLAGS_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 693 /* 694 * XXX do allow users to change the privileged flag PSL_RF. 695 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 696 * should sometimes set it there too. tf_eflags is kept in 697 * the signal context during signal handling and there is no 698 * other place to remember it, so the PSL_RF bit may be 699 * corrupted by the signal handler without us knowing. 700 * Corruption of the PSL_RF bit at worst causes one more or 701 * one less debugger trap, so allowing it is fairly harmless. 702 */ 703 if (!EFLAGS_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 704#ifdef DEBUG 705 printf("sigreturn: eflags = 0x%x\n", eflags); 706#endif 707 return(EINVAL); 708 } 709 710 /* 711 * Don't allow users to load a valid privileged %cs. Let the 712 * hardware check for invalid selectors, excess privilege in 713 * other selectors, invalid %eip's and invalid %esp's. 714 */ 715#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 716 if (!CS_SECURE(scp->sc_cs)) { 717#ifdef DEBUG 718 printf("sigreturn: cs = 0x%x\n", scp->sc_cs); 719#endif 720 trapsignal(p, SIGBUS, T_PROTFLT); 721 return(EINVAL); 722 } 723 regs->tf_ds = scp->sc_ds; 724 regs->tf_es = scp->sc_es; 725 regs->tf_fs = scp->sc_fs; 726 } 727 728 /* restore scratch registers */ 729 regs->tf_eax = scp->sc_eax; 730 regs->tf_ebx = scp->sc_ebx; 731 regs->tf_ecx = scp->sc_ecx; 732 regs->tf_edx = scp->sc_edx; 733 regs->tf_esi = scp->sc_esi; 734 regs->tf_edi = scp->sc_edi; 735 regs->tf_cs = scp->sc_cs; 736 regs->tf_ss = scp->sc_ss; 737 regs->tf_isp = scp->sc_isp; 738 739 if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0) 740 return(EINVAL); 741 742 if (scp->sc_onstack & 01) 743 p->p_sigacts->ps_sigstk.ss_flags |= SS_ONSTACK; 744 else 745 p->p_sigacts->ps_sigstk.ss_flags &= ~SS_ONSTACK; 746 p->p_sigmask = scp->sc_mask & ~sigcantmask; 747 regs->tf_ebp = scp->sc_fp; 748 regs->tf_esp = scp->sc_sp; 749 regs->tf_eip = scp->sc_pc; 750 regs->tf_eflags = eflags; 751 return(EJUSTRETURN); 752} 753 754/* 755 * Machine dependent boot() routine 756 * 757 * I haven't seen anything to put here yet 758 * Possibly some stuff might be grafted back here from boot() 759 */ 760void 761cpu_boot(int howto) 762{ 763} 764 765/* 766 * Shutdown the CPU as much as possible 767 */ 768void 769cpu_halt(void) 770{ 771 for (;;) 772 __asm__ ("hlt"); 773} 774 775/* 776 * Clear registers on exec 777 */ 778void 779setregs(p, entry, stack, ps_strings) 780 struct proc *p; 781 u_long entry; 782 u_long stack; 783 u_long ps_strings; 784{ 785 struct trapframe *regs = p->p_md.md_regs; 786 struct pcb *pcb = &p->p_addr->u_pcb; 787 788#ifdef USER_LDT 789 /* was i386_user_cleanup() in NetBSD */ 790 if (pcb->pcb_ldt) { 791 if (pcb == curpcb) { 792 lldt(_default_ldt); 793 currentldt = _default_ldt; 794 } 795 kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ldt, 796 pcb->pcb_ldt_len * sizeof(union descriptor)); 797 pcb->pcb_ldt_len = (int)pcb->pcb_ldt = 0; 798 } 799#endif 800 801 bzero((char *)regs, sizeof(struct trapframe)); 802 regs->tf_eip = entry; 803 regs->tf_esp = stack; 804 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T); 805 regs->tf_ss = _udatasel; 806 regs->tf_ds = _udatasel; 807 regs->tf_es = _udatasel; 808 regs->tf_fs = _udatasel; 809 regs->tf_cs = _ucodesel; 810 811 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */ 812 regs->tf_ebx = ps_strings; 813 814 /* reset %gs as well */ 815 pcb->pcb_gs = _udatasel; 816 if (pcb == curpcb) { 817 load_gs(_udatasel); 818 } 819 820 /* 821 * Initialize the math emulator (if any) for the current process. 822 * Actually, just clear the bit that says that the emulator has 823 * been initialized. Initialization is delayed until the process 824 * traps to the emulator (if it is done at all) mainly because 825 * emulators don't provide an entry point for initialization. 826 */ 827 p->p_addr->u_pcb.pcb_flags &= ~FP_SOFTFP; 828 829 /* 830 * Arrange to trap the next npx or `fwait' instruction (see npx.c 831 * for why fwait must be trapped at least if there is an npx or an 832 * emulator). This is mainly to handle the case where npx0 is not 833 * configured, since the npx routines normally set up the trap 834 * otherwise. It should be done only at boot time, but doing it 835 * here allows modifying `npx_exists' for testing the emulator on 836 * systems with an npx. 837 */ 838 load_cr0(rcr0() | CR0_MP | CR0_TS); 839 840#if NNPX > 0 841 /* Initialize the npx (if any) for the current process. */ 842 npxinit(__INITIAL_NPXCW__); 843#endif 844 845 /* 846 * XXX - Linux emulator 847 * Make sure sure edx is 0x0 on entry. Linux binaries depend 848 * on it. 849 */ 850 p->p_retval[1] = 0; 851} 852 853static int 854sysctl_machdep_adjkerntz SYSCTL_HANDLER_ARGS 855{ 856 int error; 857 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 858 req); 859 if (!error && req->newptr) 860 resettodr(); 861 return (error); 862} 863 864SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 865 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 866 867SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, 868 CTLFLAG_RW, &disable_rtc_set, 0, ""); 869 870SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo, 871 CTLFLAG_RD, &bootinfo, bootinfo, ""); 872 873SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, 874 CTLFLAG_RW, &wall_cmos_clock, 0, ""); 875 876/* 877 * Initialize 386 and configure to run kernel 878 */ 879 880/* 881 * Initialize segments & interrupt table 882 */ 883 884int _default_ldt; 885#ifdef SMP 886union descriptor gdt[NGDT * NCPU]; /* global descriptor table */ 887#else 888union descriptor gdt[NGDT]; /* global descriptor table */ 889#endif 890static struct gate_descriptor idt0[NIDT]; 891struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 892union descriptor ldt[NLDT]; /* local descriptor table */ 893#ifdef SMP 894/* table descriptors - used to load tables by microp */ 895struct region_descriptor r_gdt, r_idt; 896#endif 897 898#ifndef SMP 899extern struct segment_descriptor common_tssd, *tss_gdt; 900#endif 901int private_tss; /* flag indicating private tss */ 902 903#if defined(I586_CPU) && !defined(NO_F00F_HACK) 904extern int has_f00f_bug; 905#endif 906 907static struct i386tss dblfault_tss; 908static char dblfault_stack[PAGE_SIZE]; 909 910extern struct user *proc0paddr; 911 912 913/* software prototypes -- in more palatable form */ 914struct soft_segment_descriptor gdt_segs[] = { 915/* GNULL_SEL 0 Null Descriptor */ 916{ 0x0, /* segment base address */ 917 0x0, /* length */ 918 0, /* segment type */ 919 0, /* segment descriptor priority level */ 920 0, /* segment descriptor present */ 921 0, 0, 922 0, /* default 32 vs 16 bit size */ 923 0 /* limit granularity (byte/page units)*/ }, 924/* GCODE_SEL 1 Code Descriptor for kernel */ 925{ 0x0, /* segment base address */ 926 0xfffff, /* length - all address space */ 927 SDT_MEMERA, /* segment type */ 928 0, /* segment descriptor priority level */ 929 1, /* segment descriptor present */ 930 0, 0, 931 1, /* default 32 vs 16 bit size */ 932 1 /* limit granularity (byte/page units)*/ }, 933/* GDATA_SEL 2 Data Descriptor for kernel */ 934{ 0x0, /* segment base address */ 935 0xfffff, /* length - all address space */ 936 SDT_MEMRWA, /* segment type */ 937 0, /* segment descriptor priority level */ 938 1, /* segment descriptor present */ 939 0, 0, 940 1, /* default 32 vs 16 bit size */ 941 1 /* limit granularity (byte/page units)*/ }, 942/* GPRIV_SEL 3 SMP Per-Processor Private Data Descriptor */ 943{ 0x0, /* segment base address */ 944 0xfffff, /* length - all address space */ 945 SDT_MEMRWA, /* segment type */ 946 0, /* segment descriptor priority level */ 947 1, /* segment descriptor present */ 948 0, 0, 949 1, /* default 32 vs 16 bit size */ 950 1 /* limit granularity (byte/page units)*/ }, 951/* GPROC0_SEL 4 Proc 0 Tss Descriptor */ 952{ 953 0x0, /* segment base address */ 954 sizeof(struct i386tss)-1,/* length - all address space */ 955 SDT_SYS386TSS, /* segment type */ 956 0, /* segment descriptor priority level */ 957 1, /* segment descriptor present */ 958 0, 0, 959 0, /* unused - default 32 vs 16 bit size */ 960 0 /* limit granularity (byte/page units)*/ }, 961/* GLDT_SEL 5 LDT Descriptor */ 962{ (int) ldt, /* segment base address */ 963 sizeof(ldt)-1, /* length - all address space */ 964 SDT_SYSLDT, /* segment type */ 965 SEL_UPL, /* segment descriptor priority level */ 966 1, /* segment descriptor present */ 967 0, 0, 968 0, /* unused - default 32 vs 16 bit size */ 969 0 /* limit granularity (byte/page units)*/ }, 970/* GUSERLDT_SEL 6 User LDT Descriptor per process */ 971{ (int) ldt, /* segment base address */ 972 (512 * sizeof(union descriptor)-1), /* length */ 973 SDT_SYSLDT, /* segment type */ 974 0, /* segment descriptor priority level */ 975 1, /* segment descriptor present */ 976 0, 0, 977 0, /* unused - default 32 vs 16 bit size */ 978 0 /* limit granularity (byte/page units)*/ }, 979/* GTGATE_SEL 7 Null Descriptor - Placeholder */ 980{ 0x0, /* segment base address */ 981 0x0, /* length - all address space */ 982 0, /* segment type */ 983 0, /* segment descriptor priority level */ 984 0, /* segment descriptor present */ 985 0, 0, 986 0, /* default 32 vs 16 bit size */ 987 0 /* limit granularity (byte/page units)*/ }, 988/* GPANIC_SEL 8 Panic Tss Descriptor */ 989{ (int) &dblfault_tss, /* segment base address */ 990 sizeof(struct i386tss)-1,/* length - all address space */ 991 SDT_SYS386TSS, /* segment type */ 992 0, /* segment descriptor priority level */ 993 1, /* segment descriptor present */ 994 0, 0, 995 0, /* unused - default 32 vs 16 bit size */ 996 0 /* limit granularity (byte/page units)*/ }, 997/* GAPMCODE32_SEL 9 APM BIOS 32-bit interface (32bit Code) */ 998{ 0, /* segment base address (overwritten by APM) */ 999 0xfffff, /* length */ 1000 SDT_MEMERA, /* segment type */ 1001 0, /* segment descriptor priority level */ 1002 1, /* segment descriptor present */ 1003 0, 0, 1004 1, /* default 32 vs 16 bit size */ 1005 1 /* limit granularity (byte/page units)*/ }, 1006/* GAPMCODE16_SEL 10 APM BIOS 32-bit interface (16bit Code) */ 1007{ 0, /* segment base address (overwritten by APM) */ 1008 0xfffff, /* length */ 1009 SDT_MEMERA, /* segment type */ 1010 0, /* segment descriptor priority level */ 1011 1, /* segment descriptor present */ 1012 0, 0, 1013 0, /* default 32 vs 16 bit size */ 1014 1 /* limit granularity (byte/page units)*/ }, 1015/* GAPMDATA_SEL 11 APM BIOS 32-bit interface (Data) */ 1016{ 0, /* segment base address (overwritten by APM) */ 1017 0xfffff, /* length */ 1018 SDT_MEMRWA, /* segment type */ 1019 0, /* segment descriptor priority level */ 1020 1, /* segment descriptor present */ 1021 0, 0, 1022 1, /* default 32 vs 16 bit size */ 1023 1 /* limit granularity (byte/page units)*/ }, 1024}; 1025 1026static struct soft_segment_descriptor ldt_segs[] = { 1027 /* Null Descriptor - overwritten by call gate */ 1028{ 0x0, /* segment base address */ 1029 0x0, /* length - all address space */ 1030 0, /* segment type */ 1031 0, /* segment descriptor priority level */ 1032 0, /* segment descriptor present */ 1033 0, 0, 1034 0, /* default 32 vs 16 bit size */ 1035 0 /* limit granularity (byte/page units)*/ }, 1036 /* Null Descriptor - overwritten by call gate */ 1037{ 0x0, /* segment base address */ 1038 0x0, /* length - all address space */ 1039 0, /* segment type */ 1040 0, /* segment descriptor priority level */ 1041 0, /* segment descriptor present */ 1042 0, 0, 1043 0, /* default 32 vs 16 bit size */ 1044 0 /* limit granularity (byte/page units)*/ }, 1045 /* Null Descriptor - overwritten by call gate */ 1046{ 0x0, /* segment base address */ 1047 0x0, /* length - all address space */ 1048 0, /* segment type */ 1049 0, /* segment descriptor priority level */ 1050 0, /* segment descriptor present */ 1051 0, 0, 1052 0, /* default 32 vs 16 bit size */ 1053 0 /* limit granularity (byte/page units)*/ }, 1054 /* Code Descriptor for user */ 1055{ 0x0, /* segment base address */ 1056 0xfffff, /* length - all address space */ 1057 SDT_MEMERA, /* segment type */ 1058 SEL_UPL, /* segment descriptor priority level */ 1059 1, /* segment descriptor present */ 1060 0, 0, 1061 1, /* default 32 vs 16 bit size */ 1062 1 /* limit granularity (byte/page units)*/ }, 1063 /* Null Descriptor - overwritten by call gate */ 1064{ 0x0, /* segment base address */ 1065 0x0, /* length - all address space */ 1066 0, /* segment type */ 1067 0, /* segment descriptor priority level */ 1068 0, /* segment descriptor present */ 1069 0, 0, 1070 0, /* default 32 vs 16 bit size */ 1071 0 /* limit granularity (byte/page units)*/ }, 1072 /* Data Descriptor for user */ 1073{ 0x0, /* segment base address */ 1074 0xfffff, /* length - all address space */ 1075 SDT_MEMRWA, /* segment type */ 1076 SEL_UPL, /* segment descriptor priority level */ 1077 1, /* segment descriptor present */ 1078 0, 0, 1079 1, /* default 32 vs 16 bit size */ 1080 1 /* limit granularity (byte/page units)*/ }, 1081}; 1082 1083void 1084setidt(idx, func, typ, dpl, selec) 1085 int idx; 1086 inthand_t *func; 1087 int typ; 1088 int dpl; 1089 int selec; 1090{ 1091 struct gate_descriptor *ip; 1092 1093 ip = idt + idx; 1094 ip->gd_looffset = (int)func; 1095 ip->gd_selector = selec; 1096 ip->gd_stkcpy = 0; 1097 ip->gd_xx = 0; 1098 ip->gd_type = typ; 1099 ip->gd_dpl = dpl; 1100 ip->gd_p = 1; 1101 ip->gd_hioffset = ((int)func)>>16 ; 1102} 1103 1104#define IDTVEC(name) __CONCAT(X,name) 1105 1106extern inthand_t 1107 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 1108 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 1109 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 1110 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 1111 IDTVEC(syscall), IDTVEC(int0x80_syscall); 1112 1113void 1114sdtossd(sd, ssd) 1115 struct segment_descriptor *sd; 1116 struct soft_segment_descriptor *ssd; 1117{ 1118 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 1119 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 1120 ssd->ssd_type = sd->sd_type; 1121 ssd->ssd_dpl = sd->sd_dpl; 1122 ssd->ssd_p = sd->sd_p; 1123 ssd->ssd_def32 = sd->sd_def32; 1124 ssd->ssd_gran = sd->sd_gran; 1125} 1126 1127#define PHYSMAP_SIZE (2 * 8) 1128 1129static void 1130getmemsize(int first) 1131{ 1132 int i, physmap_idx, pa_indx; 1133 u_int basemem, extmem; 1134 int speculative_mprobe = FALSE; 1135 struct vm86frame vmf; 1136 struct vm86context vmc; 1137 vm_offset_t pa, physmap[PHYSMAP_SIZE]; 1138 pt_entry_t pte; 1139 struct { 1140 u_int64_t base; 1141 u_int64_t length; 1142 u_int32_t type; 1143 } *smap; 1144 int msize; 1145 1146 bzero(&vmf, sizeof(struct vm86frame)); 1147 bzero(physmap, sizeof(physmap)); 1148 1149 vm86_intcall(0x12, &vmf); 1150 basemem = vmf.vmf_ax; 1151 if (basemem > 640) { 1152 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", 1153 basemem); 1154 basemem = 640; 1155 } 1156 1157 /* 1158 * XXX if biosbasemem is now < 640, there is `hole' 1159 * between the end of base memory and the start of 1160 * ISA memory. The hole may be empty or it may 1161 * contain BIOS code or data. Map it read/write so 1162 * that the BIOS can write to it. (Memory from 0 to 1163 * the physical end of the kernel is mapped read-only 1164 * to begin with and then parts of it are remapped. 1165 * The parts that aren't remapped form holes that 1166 * remain read-only and are unused by the kernel. 1167 * The base memory area is below the physical end of 1168 * the kernel and right now forms a read-only hole. 1169 * The part of it from PAGE_SIZE to 1170 * (trunc_page(biosbasemem * 1024) - 1) will be 1171 * remapped and used by the kernel later.) 1172 * 1173 * This code is similar to the code used in 1174 * pmap_mapdev, but since no memory needs to be 1175 * allocated we simply change the mapping. 1176 */ 1177 for (pa = trunc_page(basemem * 1024); 1178 pa < ISA_HOLE_START; pa += PAGE_SIZE) { 1179 pte = (pt_entry_t)vtopte(pa + KERNBASE); 1180 *pte = pa | PG_RW | PG_V; 1181 } 1182 1183 /* 1184 * if basemem != 640, map pages r/w into vm86 page table so 1185 * that the bios can scribble on it. 1186 */ 1187 pte = (pt_entry_t)vm86paddr; 1188 for (i = basemem / 4; i < 160; i++) 1189 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; 1190 1191 /* 1192 * map page 1 R/W into the kernel page table so we can use it 1193 * as a buffer. The kernel will unmap this page later. 1194 */ 1195 pte = (pt_entry_t)vtopte(KERNBASE + (1 << PAGE_SHIFT)); 1196 *pte = (1 << PAGE_SHIFT) | PG_RW | PG_V; 1197 1198 /* 1199 * get memory map with INT 15:E820 1200 */ 1201#define SMAPSIZ sizeof(*smap) 1202#define SMAP_SIG 0x534D4150 /* 'SMAP' */ 1203 1204 vmc.npages = 0; 1205 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT)); 1206 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di); 1207 1208 physmap_idx = 0; 1209 vmf.vmf_ebx = 0; 1210 do { 1211 vmf.vmf_eax = 0xE820; 1212 vmf.vmf_edx = SMAP_SIG; 1213 vmf.vmf_ecx = SMAPSIZ; 1214 i = vm86_datacall(0x15, &vmf, &vmc); 1215 if (i || vmf.vmf_eax != SMAP_SIG) 1216 break; 1217 if (boothowto & RB_VERBOSE) 1218 printf("SMAP type=%02x base=%08x %08x len=%08x %08x\n", 1219 smap->type, 1220 *(u_int32_t *)((char *)&smap->base + 4), 1221 (u_int32_t)smap->base, 1222 *(u_int32_t *)((char *)&smap->length + 4), 1223 (u_int32_t)smap->length); 1224 1225 if (smap->type != 0x01) 1226 goto next_run; 1227 1228 if (smap->length == 0) 1229 goto next_run; 1230 1231 if (smap->base > 0xffffffff) { 1232 printf("%dK of memory above 4GB ignored\n", 1233 (u_int32_t)(smap->length / 1024)); 1234 goto next_run; 1235 } 1236 1237 for (i = 0; i <= physmap_idx; i += 2) { 1238 if (smap->base < physmap[i + 1]) { 1239 if (boothowto & RB_VERBOSE) 1240 printf( 1241 "Overlapping or non-montonic memory region, ignoring second region\n"); 1242 goto next_run; 1243 } 1244 } 1245 1246 if (smap->base == physmap[physmap_idx + 1]) { 1247 physmap[physmap_idx + 1] += smap->length; 1248 goto next_run; 1249 } 1250 1251 physmap_idx += 2; 1252 if (physmap_idx == PHYSMAP_SIZE) { 1253 printf( 1254 "Too many segments in the physical address map, giving up\n"); 1255 break; 1256 } 1257 physmap[physmap_idx] = smap->base; 1258 physmap[physmap_idx + 1] = smap->base + smap->length; 1259next_run: 1260 } while (vmf.vmf_ebx != 0); 1261 1262 if (physmap[1] != 0) 1263 goto physmap_done; 1264 1265 /* 1266 * try memory map with INT 15:E801 1267 */ 1268 vmf.vmf_ax = 0xE801; 1269 if (vm86_intcall(0x15, &vmf) == 0) { 1270 extmem = vmf.vmf_cx + vmf.vmf_dx * 64; 1271 } else { 1272#if 0 1273 vmf.vmf_ah = 0x88; 1274 vm86_intcall(0x15, &vmf); 1275 extmem = vmf.vmf_ax; 1276#else 1277 /* 1278 * Prefer the RTC value for extended memory. 1279 */ 1280 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8); 1281#endif 1282 } 1283 1284 /* 1285 * Only perform calculations in this section if there is no system 1286 * map; any system new enough that supports SMAP probably does not 1287 * need these workarounds. 1288 */ 1289 /* 1290 * Special hack for chipsets that still remap the 384k hole when 1291 * there's 16MB of memory - this really confuses people that 1292 * are trying to use bus mastering ISA controllers with the 1293 * "16MB limit"; they only have 16MB, but the remapping puts 1294 * them beyond the limit. 1295 */ 1296 /* 1297 * If extended memory is between 15-16MB (16-17MB phys address range), 1298 * chop it to 15MB. 1299 */ 1300 if ((extmem > 15 * 1024) && (extmem < 16 * 1024)) 1301 extmem = 15 * 1024; 1302 1303 physmap[0] = 0; 1304 physmap[1] = basemem * 1024; 1305 physmap_idx = 2; 1306 physmap[physmap_idx] = 0x100000; 1307 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024; 1308 1309 /* 1310 * Indicate that we wish to do a speculative search for memory 1311 * beyond the end of the reported size if the indicated amount 1312 * is 64M (or more). 1313 * 1314 * XXX we should only do this in the RTC / 0x88 case 1315 */ 1316 if (extmem >= 16 * 1024) 1317 speculative_mprobe = TRUE; 1318 1319physmap_done: 1320 /* 1321 * Now, physmap contains a map of physical memory. 1322 */ 1323 1324#ifdef SMP 1325 /* make hole for AP bootstrap code */ 1326 physmap[1] = mp_bootaddress(physmap[1] / 1024); 1327#endif 1328 1329 /* 1330 * Maxmem isn't the "maximum memory", it's one larger than the 1331 * highest page of the physical address space. It should be 1332 * called something like "Maxphyspage". 1333 */ 1334 Maxmem = physmap[physmap_idx + 1] / PAGE_SIZE; 1335 1336 /* 1337 * If a specific amount of memory is indicated via the MAXMEM 1338 * option or the npx0 "msize", then don't do the speculative 1339 * memory probe. 1340 */ 1341#ifdef MAXMEM 1342 Maxmem = MAXMEM / 4; 1343 speculative_mprobe = FALSE; 1344#endif 1345 1346#if NNPX > 0 1347 if (resource_int_value("npx", 0, "msize", &msize) == 0) { 1348 if (msize != 0) { 1349 Maxmem = msize / 4; 1350 speculative_mprobe = FALSE; 1351 } 1352 } 1353#endif 1354 1355 /* Allow final override from the kernel environment */ 1356 if (getenv_int("MAXMEM", &msize)) { 1357 if (msize != 0) { 1358 Maxmem = msize / 4; 1359 speculative_mprobe = FALSE; 1360 } 1361 } 1362 1363#ifdef SMP 1364 /* look for the MP hardware - needed for apic addresses */ 1365 mp_probe(); 1366#endif 1367 /* call pmap initialization to make new kernel address space */ 1368 pmap_bootstrap(first, 0); 1369 1370 /* 1371 * Size up each available chunk of physical memory. 1372 */ 1373 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 1374 pa_indx = 0; 1375 phys_avail[pa_indx++] = physmap[0]; 1376 phys_avail[pa_indx] = physmap[0]; 1377#if 0 1378 pte = (pt_entry_t)vtopte(KERNBASE); 1379#else 1380 pte = (pt_entry_t)CMAP1; 1381#endif 1382 1383 /* 1384 * physmap is in bytes, so when converting to page boundaries, 1385 * round up the start address and round down the end address. 1386 */ 1387 for (i = 0; i <= physmap_idx; i += 2) { 1388 vm_offset_t end; 1389 1390 end = ptoa(Maxmem); 1391 if (physmap[i + 1] < end) 1392 end = trunc_page(physmap[i + 1]); 1393 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 1394 int tmp, page_bad; 1395#if 0 1396 int *ptr = 0; 1397#else 1398 int *ptr = (int *)CADDR1; 1399#endif 1400 1401 /* 1402 * block out kernel memory as not available. 1403 */ 1404 if (pa >= 0x100000 && pa < first) 1405 continue; 1406 1407 page_bad = FALSE; 1408 1409 /* 1410 * map page into kernel: valid, read/write,non-cacheable 1411 */ 1412 *pte = pa | PG_V | PG_RW | PG_N; 1413 invltlb(); 1414 1415 tmp = *(int *)ptr; 1416 /* 1417 * Test for alternating 1's and 0's 1418 */ 1419 *(volatile int *)ptr = 0xaaaaaaaa; 1420 if (*(volatile int *)ptr != 0xaaaaaaaa) { 1421 page_bad = TRUE; 1422 } 1423 /* 1424 * Test for alternating 0's and 1's 1425 */ 1426 *(volatile int *)ptr = 0x55555555; 1427 if (*(volatile int *)ptr != 0x55555555) { 1428 page_bad = TRUE; 1429 } 1430 /* 1431 * Test for all 1's 1432 */ 1433 *(volatile int *)ptr = 0xffffffff; 1434 if (*(volatile int *)ptr != 0xffffffff) { 1435 page_bad = TRUE; 1436 } 1437 /* 1438 * Test for all 0's 1439 */ 1440 *(volatile int *)ptr = 0x0; 1441 if (*(volatile int *)ptr != 0x0) { 1442 page_bad = TRUE; 1443 } 1444 /* 1445 * Restore original value. 1446 */ 1447 *(int *)ptr = tmp; 1448 1449 /* 1450 * Adjust array of valid/good pages. 1451 */ 1452 if (page_bad == TRUE) { 1453 continue; 1454 } 1455 /* 1456 * If this good page is a continuation of the 1457 * previous set of good pages, then just increase 1458 * the end pointer. Otherwise start a new chunk. 1459 * Note that "end" points one higher than end, 1460 * making the range >= start and < end. 1461 * If we're also doing a speculative memory 1462 * test and we at or past the end, bump up Maxmem 1463 * so that we keep going. The first bad page 1464 * will terminate the loop. 1465 */ 1466 if (phys_avail[pa_indx] == pa) { 1467 phys_avail[pa_indx] += PAGE_SIZE; 1468 if (speculative_mprobe == TRUE && 1469 phys_avail[pa_indx] >= (64*1024*1024)) 1470 end += PAGE_SIZE; 1471 } else { 1472 pa_indx++; 1473 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1474 printf("Too many holes in the physical address space, giving up\n"); 1475 pa_indx--; 1476 break; 1477 } 1478 phys_avail[pa_indx++] = pa; /* start */ 1479 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1480 } 1481 physmem++; 1482 } 1483 } 1484 *pte = 0; 1485 invltlb(); 1486 1487 /* 1488 * XXX 1489 * The last chunk must contain at least one page plus the message 1490 * buffer to avoid complicating other code (message buffer address 1491 * calculation, etc.). 1492 */ 1493 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1494 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { 1495 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1496 phys_avail[pa_indx--] = 0; 1497 phys_avail[pa_indx--] = 0; 1498 } 1499 1500 Maxmem = atop(phys_avail[pa_indx]); 1501 1502 /* Trim off space for the message buffer. */ 1503 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); 1504 1505 avail_end = phys_avail[pa_indx]; 1506} 1507 1508void 1509init386(first) 1510 int first; 1511{ 1512 int x; 1513 struct gate_descriptor *gdp; 1514 int gsel_tss; 1515#ifndef SMP 1516 /* table descriptors - used to load tables by microp */ 1517 struct region_descriptor r_gdt, r_idt; 1518#endif 1519 int off; 1520 1521 /* 1522 * Prevent lowering of the ipl if we call tsleep() early. 1523 */ 1524 safepri = cpl; 1525 1526 proc0.p_addr = proc0paddr; 1527 1528 atdevbase = ISA_HOLE_START + KERNBASE; 1529 1530 if (bootinfo.bi_modulep) { 1531 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE; 1532 preload_bootstrap_relocate(KERNBASE); 1533 } 1534 if (bootinfo.bi_envp) 1535 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE; 1536 1537 /* 1538 * make gdt memory segments, the code segment goes up to end of the 1539 * page with etext in it, the data segment goes to the end of 1540 * the address space 1541 */ 1542 /* 1543 * XXX text protection is temporarily (?) disabled. The limit was 1544 * i386_btop(round_page(etext)) - 1. 1545 */ 1546 gdt_segs[GCODE_SEL].ssd_limit = i386_btop(0) - 1; 1547 gdt_segs[GDATA_SEL].ssd_limit = i386_btop(0) - 1; 1548#ifdef SMP 1549 gdt_segs[GPRIV_SEL].ssd_limit = 1550 i386_btop(sizeof(struct privatespace)) - 1; 1551 gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[0]; 1552 gdt_segs[GPROC0_SEL].ssd_base = 1553 (int) &SMP_prvspace[0].globaldata.gd_common_tss; 1554 SMP_prvspace[0].globaldata.gd_prvspace = &SMP_prvspace[0]; 1555#else 1556 gdt_segs[GPRIV_SEL].ssd_limit = i386_btop(0) - 1; 1557 gdt_segs[GPROC0_SEL].ssd_base = (int) &common_tss; 1558#endif 1559 1560 for (x = 0; x < NGDT; x++) { 1561#ifdef BDE_DEBUGGER 1562 /* avoid overwriting db entries with APM ones */ 1563 if (x >= GAPMCODE32_SEL && x <= GAPMDATA_SEL) 1564 continue; 1565#endif 1566 ssdtosd(&gdt_segs[x], &gdt[x].sd); 1567 } 1568 1569 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1570 r_gdt.rd_base = (int) gdt; 1571 lgdt(&r_gdt); 1572 1573 /* make ldt memory segments */ 1574 /* 1575 * The data segment limit must not cover the user area because we 1576 * don't want the user area to be writable in copyout() etc. (page 1577 * level protection is lost in kernel mode on 386's). Also, we 1578 * don't want the user area to be writable directly (page level 1579 * protection of the user area is not available on 486's with 1580 * CR0_WP set, because there is no user-read/kernel-write mode). 1581 * 1582 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it 1583 * should be spelled ...MAX_USER... 1584 */ 1585#define VM_END_USER_RW_ADDRESS VM_MAXUSER_ADDRESS 1586 /* 1587 * The code segment limit has to cover the user area until we move 1588 * the signal trampoline out of the user area. This is safe because 1589 * the code segment cannot be written to directly. 1590 */ 1591#define VM_END_USER_R_ADDRESS (VM_END_USER_RW_ADDRESS + UPAGES * PAGE_SIZE) 1592 ldt_segs[LUCODE_SEL].ssd_limit = i386_btop(VM_END_USER_R_ADDRESS) - 1; 1593 ldt_segs[LUDATA_SEL].ssd_limit = i386_btop(VM_END_USER_RW_ADDRESS) - 1; 1594 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++) 1595 ssdtosd(&ldt_segs[x], &ldt[x].sd); 1596 1597 _default_ldt = GSEL(GLDT_SEL, SEL_KPL); 1598 lldt(_default_ldt); 1599#ifdef USER_LDT 1600 currentldt = _default_ldt; 1601#endif 1602 1603 /* exceptions */ 1604 for (x = 0; x < NIDT; x++) 1605 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1606 setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1607 setidt(1, &IDTVEC(dbg), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1608 setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1609 setidt(3, &IDTVEC(bpt), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1610 setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1611 setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1612 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1613 setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1614 setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL)); 1615 setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1616 setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1617 setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1618 setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1619 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1620 setidt(14, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1621 setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1622 setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1623 setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1624 setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1625 setidt(0x80, &IDTVEC(int0x80_syscall), 1626 SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1627 1628 r_idt.rd_limit = sizeof(idt0) - 1; 1629 r_idt.rd_base = (int) idt; 1630 lidt(&r_idt); 1631 1632 /* 1633 * Initialize the console before we print anything out. 1634 */ 1635 cninit(); 1636 1637#include "isa.h" 1638#if NISA >0 1639 isa_defaultirq(); 1640#endif 1641 rand_initialize(); 1642 1643#ifdef DDB 1644 kdb_init(); 1645 if (boothowto & RB_KDB) 1646 Debugger("Boot flags requested debugger"); 1647#endif 1648 1649 finishidentcpu(); /* Final stage of CPU initialization */ 1650 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1651 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1652 initializecpu(); /* Initialize CPU registers */ 1653 1654 /* make an initial tss so cpu can get interrupt stack on syscall! */ 1655 common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16; 1656 common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ; 1657 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1658 private_tss = 0; 1659 tss_gdt = &gdt[GPROC0_SEL].sd; 1660 common_tssd = *tss_gdt; 1661 common_tss.tss_ioopt = (sizeof common_tss) << 16; 1662 ltr(gsel_tss); 1663 1664 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = 1665 dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)]; 1666 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = 1667 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); 1668 dblfault_tss.tss_cr3 = (int)IdlePTD; 1669 dblfault_tss.tss_eip = (int) dblfault_handler; 1670 dblfault_tss.tss_eflags = PSL_KERNEL; 1671 dblfault_tss.tss_ds = dblfault_tss.tss_es = 1672 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); 1673 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL); 1674 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); 1675 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); 1676 1677 vm86_initialize(); 1678 getmemsize(first); 1679 1680 /* now running on new page tables, configured,and u/iom is accessible */ 1681 1682 /* Map the message buffer. */ 1683 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) 1684 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off); 1685 1686 msgbufinit(msgbufp, MSGBUF_SIZE); 1687 1688 /* make a call gate to reenter kernel with */ 1689 gdp = &ldt[LSYS5CALLS_SEL].gd; 1690 1691 x = (int) &IDTVEC(syscall); 1692 gdp->gd_looffset = x++; 1693 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL); 1694 gdp->gd_stkcpy = 1; 1695 gdp->gd_type = SDT_SYS386CGT; 1696 gdp->gd_dpl = SEL_UPL; 1697 gdp->gd_p = 1; 1698 gdp->gd_hioffset = ((int) &IDTVEC(syscall)) >>16; 1699 1700 /* XXX does this work? */ 1701 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL]; 1702 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL]; 1703 1704 /* transfer to user mode */ 1705 1706 _ucodesel = LSEL(LUCODE_SEL, SEL_UPL); 1707 _udatasel = LSEL(LUDATA_SEL, SEL_UPL); 1708 1709 /* setup proc 0's pcb */ 1710 proc0.p_addr->u_pcb.pcb_flags = 0; 1711 proc0.p_addr->u_pcb.pcb_cr3 = (int)IdlePTD; 1712#ifdef SMP 1713 proc0.p_addr->u_pcb.pcb_mpnest = 1; 1714#endif 1715 proc0.p_addr->u_pcb.pcb_ext = 0; 1716} 1717 1718#if defined(I586_CPU) && !defined(NO_F00F_HACK) 1719static void f00f_hack(void *unused); 1720SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL); 1721 1722static void 1723f00f_hack(void *unused) { 1724 struct gate_descriptor *new_idt; 1725#ifndef SMP 1726 struct region_descriptor r_idt; 1727#endif 1728 vm_offset_t tmp; 1729 1730 if (!has_f00f_bug) 1731 return; 1732 1733 printf("Intel Pentium detected, installing workaround for F00F bug\n"); 1734 1735 r_idt.rd_limit = sizeof(idt0) - 1; 1736 1737 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2); 1738 if (tmp == 0) 1739 panic("kmem_alloc returned 0"); 1740 if (((unsigned int)tmp & (PAGE_SIZE-1)) != 0) 1741 panic("kmem_alloc returned non-page-aligned memory"); 1742 /* Put the first seven entries in the lower page */ 1743 new_idt = (struct gate_descriptor*)(tmp + PAGE_SIZE - (7*8)); 1744 bcopy(idt, new_idt, sizeof(idt0)); 1745 r_idt.rd_base = (int)new_idt; 1746 lidt(&r_idt); 1747 idt = new_idt; 1748 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE, 1749 VM_PROT_READ, FALSE) != KERN_SUCCESS) 1750 panic("vm_map_protect failed"); 1751 return; 1752} 1753#endif /* defined(I586_CPU) && !NO_F00F_HACK */ 1754 1755int 1756ptrace_set_pc(p, addr) 1757 struct proc *p; 1758 unsigned long addr; 1759{ 1760 p->p_md.md_regs->tf_eip = addr; 1761 return (0); 1762} 1763 1764int 1765ptrace_single_step(p) 1766 struct proc *p; 1767{ 1768 p->p_md.md_regs->tf_eflags |= PSL_T; 1769 return (0); 1770} 1771 1772int ptrace_read_u_check(p, addr, len) 1773 struct proc *p; 1774 vm_offset_t addr; 1775 size_t len; 1776{ 1777 vm_offset_t gap; 1778 1779 if ((vm_offset_t) (addr + len) < addr) 1780 return EPERM; 1781 if ((vm_offset_t) (addr + len) <= sizeof(struct user)) 1782 return 0; 1783 1784 gap = (char *) p->p_md.md_regs - (char *) p->p_addr; 1785 1786 if ((vm_offset_t) addr < gap) 1787 return EPERM; 1788 if ((vm_offset_t) (addr + len) <= 1789 (vm_offset_t) (gap + sizeof(struct trapframe))) 1790 return 0; 1791 return EPERM; 1792} 1793 1794int ptrace_write_u(p, off, data) 1795 struct proc *p; 1796 vm_offset_t off; 1797 long data; 1798{ 1799 struct trapframe frame_copy; 1800 vm_offset_t min; 1801 struct trapframe *tp; 1802 1803 /* 1804 * Privileged kernel state is scattered all over the user area. 1805 * Only allow write access to parts of regs and to fpregs. 1806 */ 1807 min = (char *)p->p_md.md_regs - (char *)p->p_addr; 1808 if (off >= min && off <= min + sizeof(struct trapframe) - sizeof(int)) { 1809 tp = p->p_md.md_regs; 1810 frame_copy = *tp; 1811 *(int *)((char *)&frame_copy + (off - min)) = data; 1812 if (!EFLAGS_SECURE(frame_copy.tf_eflags, tp->tf_eflags) || 1813 !CS_SECURE(frame_copy.tf_cs)) 1814 return (EINVAL); 1815 *(int*)((char *)p->p_addr + off) = data; 1816 return (0); 1817 } 1818 min = offsetof(struct user, u_pcb) + offsetof(struct pcb, pcb_savefpu); 1819 if (off >= min && off <= min + sizeof(struct save87) - sizeof(int)) { 1820 *(int*)((char *)p->p_addr + off) = data; 1821 return (0); 1822 } 1823 return (EFAULT); 1824} 1825 1826int 1827fill_regs(p, regs) 1828 struct proc *p; 1829 struct reg *regs; 1830{ 1831 struct pcb *pcb; 1832 struct trapframe *tp; 1833 1834 tp = p->p_md.md_regs; 1835 regs->r_fs = tp->tf_fs; 1836 regs->r_es = tp->tf_es; 1837 regs->r_ds = tp->tf_ds; 1838 regs->r_edi = tp->tf_edi; 1839 regs->r_esi = tp->tf_esi; 1840 regs->r_ebp = tp->tf_ebp; 1841 regs->r_ebx = tp->tf_ebx; 1842 regs->r_edx = tp->tf_edx; 1843 regs->r_ecx = tp->tf_ecx; 1844 regs->r_eax = tp->tf_eax; 1845 regs->r_eip = tp->tf_eip; 1846 regs->r_cs = tp->tf_cs; 1847 regs->r_eflags = tp->tf_eflags; 1848 regs->r_esp = tp->tf_esp; 1849 regs->r_ss = tp->tf_ss; 1850 pcb = &p->p_addr->u_pcb; 1851 regs->r_gs = pcb->pcb_gs; 1852 return (0); 1853} 1854 1855int 1856set_regs(p, regs) 1857 struct proc *p; 1858 struct reg *regs; 1859{ 1860 struct pcb *pcb; 1861 struct trapframe *tp; 1862 1863 tp = p->p_md.md_regs; 1864 if (!EFLAGS_SECURE(regs->r_eflags, tp->tf_eflags) || 1865 !CS_SECURE(regs->r_cs)) 1866 return (EINVAL); 1867 tp->tf_fs = regs->r_fs; 1868 tp->tf_es = regs->r_es; 1869 tp->tf_ds = regs->r_ds; 1870 tp->tf_edi = regs->r_edi; 1871 tp->tf_esi = regs->r_esi; 1872 tp->tf_ebp = regs->r_ebp; 1873 tp->tf_ebx = regs->r_ebx; 1874 tp->tf_edx = regs->r_edx; 1875 tp->tf_ecx = regs->r_ecx; 1876 tp->tf_eax = regs->r_eax; 1877 tp->tf_eip = regs->r_eip; 1878 tp->tf_cs = regs->r_cs; 1879 tp->tf_eflags = regs->r_eflags; 1880 tp->tf_esp = regs->r_esp; 1881 tp->tf_ss = regs->r_ss; 1882 pcb = &p->p_addr->u_pcb; 1883 pcb->pcb_gs = regs->r_gs; 1884 return (0); 1885} 1886 1887int 1888fill_fpregs(p, fpregs) 1889 struct proc *p; 1890 struct fpreg *fpregs; 1891{ 1892 bcopy(&p->p_addr->u_pcb.pcb_savefpu, fpregs, sizeof *fpregs); 1893 return (0); 1894} 1895 1896int 1897set_fpregs(p, fpregs) 1898 struct proc *p; 1899 struct fpreg *fpregs; 1900{ 1901 bcopy(fpregs, &p->p_addr->u_pcb.pcb_savefpu, sizeof *fpregs); 1902 return (0); 1903} 1904 1905#ifndef DDB 1906void 1907Debugger(const char *msg) 1908{ 1909 printf("Debugger(\"%s\") called.\n", msg); 1910} 1911#endif /* no DDB */ 1912 1913#include <sys/disklabel.h> 1914 1915/* 1916 * Determine the size of the transfer, and make sure it is 1917 * within the boundaries of the partition. Adjust transfer 1918 * if needed, and signal errors or early completion. 1919 */ 1920int 1921bounds_check_with_label(struct buf *bp, struct disklabel *lp, int wlabel) 1922{ 1923 struct partition *p = lp->d_partitions + dkpart(bp->b_dev); 1924 int labelsect = lp->d_partitions[0].p_offset; 1925 int maxsz = p->p_size, 1926 sz = (bp->b_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT; 1927 1928 /* overwriting disk label ? */ 1929 /* XXX should also protect bootstrap in first 8K */ 1930 if (bp->b_blkno + p->p_offset <= LABELSECTOR + labelsect && 1931#if LABELSECTOR != 0 1932 bp->b_blkno + p->p_offset + sz > LABELSECTOR + labelsect && 1933#endif 1934 (bp->b_flags & B_READ) == 0 && wlabel == 0) { 1935 bp->b_error = EROFS; 1936 goto bad; 1937 } 1938 1939#if defined(DOSBBSECTOR) && defined(notyet) 1940 /* overwriting master boot record? */ 1941 if (bp->b_blkno + p->p_offset <= DOSBBSECTOR && 1942 (bp->b_flags & B_READ) == 0 && wlabel == 0) { 1943 bp->b_error = EROFS; 1944 goto bad; 1945 } 1946#endif 1947 1948 /* beyond partition? */ 1949 if (bp->b_blkno < 0 || bp->b_blkno + sz > maxsz) { 1950 /* if exactly at end of disk, return an EOF */ 1951 if (bp->b_blkno == maxsz) { 1952 bp->b_resid = bp->b_bcount; 1953 return(0); 1954 } 1955 /* or truncate if part of it fits */ 1956 sz = maxsz - bp->b_blkno; 1957 if (sz <= 0) { 1958 bp->b_error = EINVAL; 1959 goto bad; 1960 } 1961 bp->b_bcount = sz << DEV_BSHIFT; 1962 } 1963 1964 bp->b_pblkno = bp->b_blkno + p->p_offset; 1965 return(1); 1966 1967bad: 1968 bp->b_flags |= B_ERROR; 1969 return(-1); 1970} 1971 1972#ifdef DDB 1973 1974/* 1975 * Provide inb() and outb() as functions. They are normally only 1976 * available as macros calling inlined functions, thus cannot be 1977 * called inside DDB. 1978 * 1979 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 1980 */ 1981 1982#undef inb 1983#undef outb 1984 1985/* silence compiler warnings */ 1986u_char inb(u_int); 1987void outb(u_int, u_char); 1988 1989u_char 1990inb(u_int port) 1991{ 1992 u_char data; 1993 /* 1994 * We use %%dx and not %1 here because i/o is done at %dx and not at 1995 * %edx, while gcc generates inferior code (movw instead of movl) 1996 * if we tell it to load (u_short) port. 1997 */ 1998 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 1999 return (data); 2000} 2001 2002void 2003outb(u_int port, u_char data) 2004{ 2005 u_char al; 2006 /* 2007 * Use an unnecessary assignment to help gcc's register allocator. 2008 * This make a large difference for gcc-1.40 and a tiny difference 2009 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 2010 * best results. gcc-2.6.0 can't handle this. 2011 */ 2012 al = data; 2013 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 2014} 2015 2016#endif /* DDB */ 2017