machdep.c revision 48677
1/*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 38 * $Id: machdep.c,v 1.353 1999/07/06 07:13:33 cracauer Exp $ 39 */ 40 41#include "apm.h" 42#include "ether.h" 43#include "npx.h" 44#include "opt_atalk.h" 45#include "opt_cpu.h" 46#include "opt_ddb.h" 47#include "opt_inet.h" 48#include "opt_ipx.h" 49#include "opt_maxmem.h" 50#include "opt_msgbuf.h" 51#include "opt_perfmon.h" 52#include "opt_smp.h" 53#include "opt_sysvipc.h" 54#include "opt_user_ldt.h" 55#include "opt_userconfig.h" 56 57#include <sys/param.h> 58#include <sys/systm.h> 59#include <sys/sysproto.h> 60#include <sys/signalvar.h> 61#include <sys/kernel.h> 62#include <sys/linker.h> 63#include <sys/proc.h> 64#include <sys/buf.h> 65#include <sys/reboot.h> 66#include <sys/callout.h> 67#include <sys/malloc.h> 68#include <sys/mbuf.h> 69#include <sys/msgbuf.h> 70#include <sys/sysent.h> 71#include <sys/sysctl.h> 72#include <sys/vmmeter.h> 73#include <sys/bus.h> 74 75#ifdef SYSVSHM 76#include <sys/shm.h> 77#endif 78 79#ifdef SYSVMSG 80#include <sys/msg.h> 81#endif 82 83#ifdef SYSVSEM 84#include <sys/sem.h> 85#endif 86 87#include <vm/vm.h> 88#include <vm/vm_param.h> 89#include <vm/vm_prot.h> 90#include <sys/lock.h> 91#include <vm/vm_kern.h> 92#include <vm/vm_object.h> 93#include <vm/vm_page.h> 94#include <vm/vm_map.h> 95#include <vm/vm_pager.h> 96#include <vm/vm_extern.h> 97 98#include <sys/user.h> 99#include <sys/exec.h> 100 101#include <ddb/ddb.h> 102 103#include <net/netisr.h> 104 105#include <machine/cpu.h> 106#include <machine/reg.h> 107#include <machine/clock.h> 108#include <machine/specialreg.h> 109#include <machine/cons.h> 110#include <machine/bootinfo.h> 111#include <machine/ipl.h> 112#include <machine/md_var.h> 113#include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */ 114#ifdef SMP 115#include <machine/smp.h> 116#include <machine/globaldata.h> 117#endif 118#ifdef PERFMON 119#include <machine/perfmon.h> 120#endif 121 122#ifdef OLD_BUS_ARCH 123#include <i386/isa/isa_device.h> 124#endif 125#include <i386/isa/intr_machdep.h> 126#include <isa/rtc.h> 127#include <machine/vm86.h> 128#include <machine/random.h> 129#include <sys/ptrace.h> 130 131extern void init386 __P((int first)); 132extern void dblfault_handler __P((void)); 133 134extern void printcpuinfo(void); /* XXX header file */ 135extern void earlysetcpuclass(void); /* same header file */ 136extern void finishidentcpu(void); 137extern void panicifcpuunsupported(void); 138extern void initializecpu(void); 139 140static void cpu_startup __P((void *)); 141SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) 142 143static MALLOC_DEFINE(M_MBUF, "mbuf", "mbuf"); 144 145int _udatasel, _ucodesel; 146u_int atdevbase; 147 148#if defined(SWTCH_OPTIM_STATS) 149extern int swtch_optim_stats; 150SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, 151 CTLFLAG_RD, &swtch_optim_stats, 0, ""); 152SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, 153 CTLFLAG_RD, &tlb_flush_count, 0, ""); 154#endif 155 156#ifdef PC98 157static int ispc98 = 1; 158#else 159static int ispc98 = 0; 160#endif 161SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, ""); 162 163int physmem = 0; 164int cold = 1; 165 166static int 167sysctl_hw_physmem SYSCTL_HANDLER_ARGS 168{ 169 int error = sysctl_handle_int(oidp, 0, ctob(physmem), req); 170 return (error); 171} 172 173SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD, 174 0, 0, sysctl_hw_physmem, "I", ""); 175 176static int 177sysctl_hw_usermem SYSCTL_HANDLER_ARGS 178{ 179 int error = sysctl_handle_int(oidp, 0, 180 ctob(physmem - cnt.v_wire_count), req); 181 return (error); 182} 183 184SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD, 185 0, 0, sysctl_hw_usermem, "I", ""); 186 187static int 188sysctl_hw_availpages SYSCTL_HANDLER_ARGS 189{ 190 int error = sysctl_handle_int(oidp, 0, 191 i386_btop(avail_end - avail_start), req); 192 return (error); 193} 194 195SYSCTL_PROC(_hw, OID_AUTO, availpages, CTLTYPE_INT|CTLFLAG_RD, 196 0, 0, sysctl_hw_availpages, "I", ""); 197 198static int 199sysctl_machdep_msgbuf SYSCTL_HANDLER_ARGS 200{ 201 int error; 202 203 /* Unwind the buffer, so that it's linear (possibly starting with 204 * some initial nulls). 205 */ 206 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr+msgbufp->msg_bufr, 207 msgbufp->msg_size-msgbufp->msg_bufr,req); 208 if(error) return(error); 209 if(msgbufp->msg_bufr>0) { 210 error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr, 211 msgbufp->msg_bufr,req); 212 } 213 return(error); 214} 215 216SYSCTL_PROC(_machdep, OID_AUTO, msgbuf, CTLTYPE_STRING|CTLFLAG_RD, 217 0, 0, sysctl_machdep_msgbuf, "A","Contents of kernel message buffer"); 218 219static int msgbuf_clear; 220 221static int 222sysctl_machdep_msgbuf_clear SYSCTL_HANDLER_ARGS 223{ 224 int error; 225 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 226 req); 227 if (!error && req->newptr) { 228 /* Clear the buffer and reset write pointer */ 229 bzero(msgbufp->msg_ptr,msgbufp->msg_size); 230 msgbufp->msg_bufr=msgbufp->msg_bufx=0; 231 msgbuf_clear=0; 232 } 233 return (error); 234} 235 236SYSCTL_PROC(_machdep, OID_AUTO, msgbuf_clear, CTLTYPE_INT|CTLFLAG_RW, 237 &msgbuf_clear, 0, sysctl_machdep_msgbuf_clear, "I", 238 "Clear kernel message buffer"); 239 240int bootverbose = 0, Maxmem = 0; 241long dumplo; 242 243vm_offset_t phys_avail[10]; 244 245/* must be 2 less so 0 0 can signal end of chunks */ 246#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2) 247 248static vm_offset_t buffer_sva, buffer_eva; 249vm_offset_t clean_sva, clean_eva; 250static vm_offset_t pager_sva, pager_eva; 251 252#define offsetof(type, member) ((size_t)(&((type *)0)->member)) 253 254static void 255cpu_startup(dummy) 256 void *dummy; 257{ 258 register unsigned i; 259 register caddr_t v; 260 vm_offset_t maxaddr; 261 vm_size_t size = 0; 262 int firstaddr; 263 vm_offset_t minaddr; 264 265 if (boothowto & RB_VERBOSE) 266 bootverbose++; 267 268 /* 269 * Good {morning,afternoon,evening,night}. 270 */ 271 printf(version); 272 earlysetcpuclass(); 273 startrtclock(); 274 printcpuinfo(); 275 panicifcpuunsupported(); 276#ifdef PERFMON 277 perfmon_init(); 278#endif 279 printf("real memory = %u (%uK bytes)\n", ptoa(Maxmem), ptoa(Maxmem) / 1024); 280 /* 281 * Display any holes after the first chunk of extended memory. 282 */ 283 if (bootverbose) { 284 int indx; 285 286 printf("Physical memory chunk(s):\n"); 287 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 288 int size1 = phys_avail[indx + 1] - phys_avail[indx]; 289 290 printf("0x%08x - 0x%08x, %u bytes (%u pages)\n", 291 phys_avail[indx], phys_avail[indx + 1] - 1, size1, 292 size1 / PAGE_SIZE); 293 } 294 } 295 296 /* 297 * Calculate callout wheel size 298 */ 299 for (callwheelsize = 1, callwheelbits = 0; 300 callwheelsize < ncallout; 301 callwheelsize <<= 1, ++callwheelbits) 302 ; 303 callwheelmask = callwheelsize - 1; 304 305 /* 306 * Allocate space for system data structures. 307 * The first available kernel virtual address is in "v". 308 * As pages of kernel virtual memory are allocated, "v" is incremented. 309 * As pages of memory are allocated and cleared, 310 * "firstaddr" is incremented. 311 * An index into the kernel page table corresponding to the 312 * virtual memory address maintained in "v" is kept in "mapaddr". 313 */ 314 315 /* 316 * Make two passes. The first pass calculates how much memory is 317 * needed and allocates it. The second pass assigns virtual 318 * addresses to the various data structures. 319 */ 320 firstaddr = 0; 321again: 322 v = (caddr_t)firstaddr; 323 324#define valloc(name, type, num) \ 325 (name) = (type *)v; v = (caddr_t)((name)+(num)) 326#define valloclim(name, type, num, lim) \ 327 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num))) 328 329 valloc(callout, struct callout, ncallout); 330 valloc(callwheel, struct callout_tailq, callwheelsize); 331#ifdef SYSVSHM 332 valloc(shmsegs, struct shmid_ds, shminfo.shmmni); 333#endif 334#ifdef SYSVSEM 335 valloc(sema, struct semid_ds, seminfo.semmni); 336 valloc(sem, struct sem, seminfo.semmns); 337 /* This is pretty disgusting! */ 338 valloc(semu, int, (seminfo.semmnu * seminfo.semusz) / sizeof(int)); 339#endif 340#ifdef SYSVMSG 341 valloc(msgpool, char, msginfo.msgmax); 342 valloc(msgmaps, struct msgmap, msginfo.msgseg); 343 valloc(msghdrs, struct msg, msginfo.msgtql); 344 valloc(msqids, struct msqid_ds, msginfo.msgmni); 345#endif 346 347 if (nbuf == 0) { 348 nbuf = 30; 349 if( physmem > 1024) 350 nbuf += min((physmem - 1024) / 8, 2048); 351 if( physmem > 65536) 352 nbuf += (physmem - 65536) / 20; 353 } 354 nswbuf = max(min(nbuf/4, 256), 16); 355 356 valloc(swbuf, struct buf, nswbuf); 357 valloc(buf, struct buf, nbuf); 358 v = bufhashinit(v); 359 360 /* 361 * End of first pass, size has been calculated so allocate memory 362 */ 363 if (firstaddr == 0) { 364 size = (vm_size_t)(v - firstaddr); 365 firstaddr = (int)kmem_alloc(kernel_map, round_page(size)); 366 if (firstaddr == 0) 367 panic("startup: no room for tables"); 368 goto again; 369 } 370 371 /* 372 * End of second pass, addresses have been assigned 373 */ 374 if ((vm_size_t)(v - firstaddr) != size) 375 panic("startup: table size inconsistency"); 376 377 clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva, 378 (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size); 379 buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva, 380 (nbuf*BKVASIZE)); 381 pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva, 382 (nswbuf*MAXPHYS) + pager_map_size); 383 pager_map->system_map = 1; 384 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 385 (16*(ARG_MAX+(PAGE_SIZE*3)))); 386 387 /* 388 * Finally, allocate mbuf pool. Since mclrefcnt is an off-size 389 * we use the more space efficient malloc in place of kmem_alloc. 390 */ 391 { 392 vm_offset_t mb_map_size; 393 394 mb_map_size = nmbufs * MSIZE + nmbclusters * MCLBYTES; 395 mb_map_size = roundup2(mb_map_size, max(MCLBYTES, PAGE_SIZE)); 396 mclrefcnt = malloc(mb_map_size / MCLBYTES, M_MBUF, M_NOWAIT); 397 bzero(mclrefcnt, mb_map_size / MCLBYTES); 398 mb_map = kmem_suballoc(kmem_map, (vm_offset_t *)&mbutl, &maxaddr, 399 mb_map_size); 400 mb_map->system_map = 1; 401 } 402 403 /* 404 * Initialize callouts 405 */ 406 SLIST_INIT(&callfree); 407 for (i = 0; i < ncallout; i++) { 408 callout_init(&callout[i]); 409 callout[i].c_flags = CALLOUT_LOCAL_ALLOC; 410 SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); 411 } 412 413 for (i = 0; i < callwheelsize; i++) { 414 TAILQ_INIT(&callwheel[i]); 415 } 416 417#if defined(USERCONFIG) 418 userconfig(); 419 cninit(); /* the preferred console may have changed */ 420#endif 421 422 printf("avail memory = %u (%uK bytes)\n", ptoa(cnt.v_free_count), 423 ptoa(cnt.v_free_count) / 1024); 424 425 /* 426 * Set up buffers, so they can be used to read disk labels. 427 */ 428 bufinit(); 429 vm_pager_bufferinit(); 430 431#ifdef SMP 432 /* 433 * OK, enough kmem_alloc/malloc state should be up, lets get on with it! 434 */ 435 mp_start(); /* fire up the APs and APICs */ 436 mp_announce(); 437#endif /* SMP */ 438} 439 440int 441register_netisr(num, handler) 442 int num; 443 netisr_t *handler; 444{ 445 446 if (num < 0 || num >= (sizeof(netisrs)/sizeof(*netisrs)) ) { 447 printf("register_netisr: bad isr number: %d\n", num); 448 return (EINVAL); 449 } 450 netisrs[num] = handler; 451 return (0); 452} 453 454void 455netisr_sysinit(data) 456 void *data; 457{ 458 const struct netisrtab *nit; 459 460 nit = (const struct netisrtab *)data; 461 register_netisr(nit->nit_num, nit->nit_isr); 462} 463 464/* 465 * Send an interrupt to process. 466 * 467 * Stack is set up to allow sigcode stored 468 * at top to call routine, followed by kcall 469 * to sigreturn routine below. After sigreturn 470 * resets the signal mask, the stack, and the 471 * frame pointer, it returns to the user 472 * specified pc, psl. 473 */ 474void 475sendsig(catcher, sig, mask, code) 476 sig_t catcher; 477 int sig, mask; 478 u_long code; 479{ 480 register struct proc *p = curproc; 481 register struct trapframe *regs; 482 register struct sigframe *fp; 483 struct sigframe sf; 484 struct sigacts *psp = p->p_sigacts; 485 int oonstack; 486 487 regs = p->p_md.md_regs; 488 oonstack = psp->ps_sigstk.ss_flags & SS_ONSTACK; 489 /* 490 * Allocate and validate space for the signal handler context. 491 */ 492 if ((psp->ps_flags & SAS_ALTSTACK) && !oonstack && 493 (psp->ps_sigonstack & sigmask(sig))) { 494 fp = (struct sigframe *)(psp->ps_sigstk.ss_sp + 495 psp->ps_sigstk.ss_size - sizeof(struct sigframe)); 496 psp->ps_sigstk.ss_flags |= SS_ONSTACK; 497 } else { 498 fp = (struct sigframe *)regs->tf_esp - 1; 499 } 500 501 /* 502 * grow() will return FALSE if the fp will not fit inside the stack 503 * and the stack can not be grown. useracc will return FALSE 504 * if access is denied. 505 */ 506 if ((grow_stack (p, (int)fp) == FALSE) || 507 (useracc((caddr_t)fp, sizeof(struct sigframe), B_WRITE) == FALSE)) { 508 /* 509 * Process has trashed its stack; give it an illegal 510 * instruction to halt it in its tracks. 511 */ 512#ifdef DEBUG 513 printf("process %d has trashed its stack\n", p->p_pid); 514#endif 515 SIGACTION(p, SIGILL) = SIG_DFL; 516 sig = sigmask(SIGILL); 517 p->p_sigignore &= ~sig; 518 p->p_sigcatch &= ~sig; 519 p->p_sigmask &= ~sig; 520 psignal(p, SIGILL); 521 return; 522 } 523 524 /* 525 * Build the argument list for the signal handler. 526 */ 527 if (p->p_sysent->sv_sigtbl) { 528 if (sig < p->p_sysent->sv_sigsize) 529 sig = p->p_sysent->sv_sigtbl[sig]; 530 else 531 sig = p->p_sysent->sv_sigsize + 1; 532 } 533 sf.sf_signum = sig; 534 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc; 535 if (p->p_sigacts->ps_siginfo & sigmask(sig)) { 536 /* 537 * Signal handler installed with SA_SIGINFO. 538 */ 539 sf.sf_arg2 = (register_t)&fp->sf_siginfo; 540 sf.sf_siginfo.si_signo = sig; 541 sf.sf_siginfo.si_code = code; 542 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 543 } else { 544 /* 545 * Old FreeBSD-style arguments. 546 */ 547 sf.sf_arg2 = code; 548 sf.sf_ahu.sf_handler = catcher; 549 } 550 551 sf.sf_addr = (char *) regs->tf_err; 552 553 /* save scratch registers */ 554 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax; 555 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx; 556 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx; 557 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx; 558 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi; 559 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi; 560 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs; 561 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds; 562 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss; 563 sf.sf_siginfo.si_sc.sc_es = regs->tf_es; 564 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs; 565 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp; 566 567 /* 568 * Build the signal context to be used by sigreturn. 569 */ 570 sf.sf_siginfo.si_sc.sc_onstack = oonstack; 571 sf.sf_siginfo.si_sc.sc_mask = mask; 572 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp; 573 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp; 574 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip; 575 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags; 576 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno; 577 sf.sf_siginfo.si_sc.sc_err = regs->tf_err; 578 579 /* 580 * If we're a vm86 process, we want to save the segment registers. 581 * We also change eflags to be our emulated eflags, not the actual 582 * eflags. 583 */ 584 if (regs->tf_eflags & PSL_VM) { 585 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 586 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 587 588 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs; 589 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs; 590 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es; 591 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds; 592 593 if (vm86->vm86_has_vme == 0) 594 sf.sf_siginfo.si_sc.sc_ps = (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) 595 | (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 596 597 /* 598 * We should never have PSL_T set when returning from vm86 599 * mode. It may be set here if we deliver a signal before 600 * getting to vm86 mode, so turn it off. 601 * 602 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 603 * syscalls made by the signal handler. This just avoids 604 * wasting time for our lazy fixup of such faults. PSL_NT 605 * does nothing in vm86 mode, but vm86 programs can set it 606 * almost legitimately in probes for old cpu types. 607 */ 608 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP); 609 } 610 611 /* 612 * Copy the sigframe out to the user's stack. 613 */ 614 if (copyout(&sf, fp, sizeof(struct sigframe)) != 0) { 615 /* 616 * Something is wrong with the stack pointer. 617 * ...Kill the process. 618 */ 619 sigexit(p, SIGILL); 620 } 621 622 regs->tf_esp = (int)fp; 623 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 624 regs->tf_cs = _ucodesel; 625 regs->tf_ds = _udatasel; 626 regs->tf_es = _udatasel; 627 regs->tf_fs = _udatasel; 628 regs->tf_ss = _udatasel; 629} 630 631/* 632 * System call to cleanup state after a signal 633 * has been taken. Reset signal mask and 634 * stack state from context left by sendsig (above). 635 * Return to previous pc and psl as specified by 636 * context left by sendsig. Check carefully to 637 * make sure that the user has not modified the 638 * state to gain improper privileges. 639 */ 640int 641sigreturn(p, uap) 642 struct proc *p; 643 struct sigreturn_args /* { 644 struct sigcontext *sigcntxp; 645 } */ *uap; 646{ 647 register struct sigcontext *scp; 648 register struct sigframe *fp; 649 register struct trapframe *regs = p->p_md.md_regs; 650 int eflags; 651 652 /* 653 * (XXX old comment) regs->tf_esp points to the return address. 654 * The user scp pointer is above that. 655 * The return address is faked in the signal trampoline code 656 * for consistency. 657 */ 658 scp = uap->sigcntxp; 659 fp = (struct sigframe *) 660 ((caddr_t)scp - offsetof(struct sigframe, sf_siginfo.si_sc)); 661 662 if (useracc((caddr_t)fp, sizeof (*fp), B_WRITE) == 0) 663 return(EFAULT); 664 665 eflags = scp->sc_ps; 666 if (eflags & PSL_VM) { 667 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 668 struct vm86_kernel *vm86; 669 670 /* 671 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 672 * set up the vm86 area, and we can't enter vm86 mode. 673 */ 674 if (p->p_addr->u_pcb.pcb_ext == 0) 675 return (EINVAL); 676 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 677 if (vm86->vm86_inited == 0) 678 return (EINVAL); 679 680 /* go back to user mode if both flags are set */ 681 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 682 trapsignal(p, SIGBUS, 0); 683 684 if (vm86->vm86_has_vme) { 685 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 686 (eflags & VME_USERCHANGE) | PSL_VM; 687 } else { 688 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 689 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 690 } 691 tf->tf_vm86_ds = scp->sc_ds; 692 tf->tf_vm86_es = scp->sc_es; 693 tf->tf_vm86_fs = scp->sc_fs; 694 tf->tf_vm86_gs = scp->sc_gs; 695 tf->tf_ds = _udatasel; 696 tf->tf_es = _udatasel; 697 tf->tf_fs = _udatasel; 698 } else { 699 /* 700 * Don't allow users to change privileged or reserved flags. 701 */ 702#define EFLAGS_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 703 /* 704 * XXX do allow users to change the privileged flag PSL_RF. 705 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 706 * should sometimes set it there too. tf_eflags is kept in 707 * the signal context during signal handling and there is no 708 * other place to remember it, so the PSL_RF bit may be 709 * corrupted by the signal handler without us knowing. 710 * Corruption of the PSL_RF bit at worst causes one more or 711 * one less debugger trap, so allowing it is fairly harmless. 712 */ 713 if (!EFLAGS_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 714#ifdef DEBUG 715 printf("sigreturn: eflags = 0x%x\n", eflags); 716#endif 717 return(EINVAL); 718 } 719 720 /* 721 * Don't allow users to load a valid privileged %cs. Let the 722 * hardware check for invalid selectors, excess privilege in 723 * other selectors, invalid %eip's and invalid %esp's. 724 */ 725#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 726 if (!CS_SECURE(scp->sc_cs)) { 727#ifdef DEBUG 728 printf("sigreturn: cs = 0x%x\n", scp->sc_cs); 729#endif 730 trapsignal(p, SIGBUS, T_PROTFLT); 731 return(EINVAL); 732 } 733 regs->tf_ds = scp->sc_ds; 734 regs->tf_es = scp->sc_es; 735 regs->tf_fs = scp->sc_fs; 736 } 737 738 /* restore scratch registers */ 739 regs->tf_eax = scp->sc_eax; 740 regs->tf_ebx = scp->sc_ebx; 741 regs->tf_ecx = scp->sc_ecx; 742 regs->tf_edx = scp->sc_edx; 743 regs->tf_esi = scp->sc_esi; 744 regs->tf_edi = scp->sc_edi; 745 regs->tf_cs = scp->sc_cs; 746 regs->tf_ss = scp->sc_ss; 747 regs->tf_isp = scp->sc_isp; 748 749 if (useracc((caddr_t)scp, sizeof (*scp), B_WRITE) == 0) 750 return(EINVAL); 751 752 if (scp->sc_onstack & 01) 753 p->p_sigacts->ps_sigstk.ss_flags |= SS_ONSTACK; 754 else 755 p->p_sigacts->ps_sigstk.ss_flags &= ~SS_ONSTACK; 756 p->p_sigmask = scp->sc_mask & ~sigcantmask; 757 regs->tf_ebp = scp->sc_fp; 758 regs->tf_esp = scp->sc_sp; 759 regs->tf_eip = scp->sc_pc; 760 regs->tf_eflags = eflags; 761 return(EJUSTRETURN); 762} 763 764/* 765 * Machine dependent boot() routine 766 * 767 * I haven't seen anything to put here yet 768 * Possibly some stuff might be grafted back here from boot() 769 */ 770void 771cpu_boot(int howto) 772{ 773} 774 775/* 776 * Shutdown the CPU as much as possible 777 */ 778void 779cpu_halt(void) 780{ 781 for (;;) 782 __asm__ ("hlt"); 783} 784 785/* 786 * Clear registers on exec 787 */ 788void 789setregs(p, entry, stack, ps_strings) 790 struct proc *p; 791 u_long entry; 792 u_long stack; 793 u_long ps_strings; 794{ 795 struct trapframe *regs = p->p_md.md_regs; 796 struct pcb *pcb = &p->p_addr->u_pcb; 797 798#ifdef USER_LDT 799 /* was i386_user_cleanup() in NetBSD */ 800 if (pcb->pcb_ldt) { 801 if (pcb == curpcb) { 802 lldt(_default_ldt); 803 currentldt = _default_ldt; 804 } 805 kmem_free(kernel_map, (vm_offset_t)pcb->pcb_ldt, 806 pcb->pcb_ldt_len * sizeof(union descriptor)); 807 pcb->pcb_ldt_len = (int)pcb->pcb_ldt = 0; 808 } 809#endif 810 811 bzero((char *)regs, sizeof(struct trapframe)); 812 regs->tf_eip = entry; 813 regs->tf_esp = stack; 814 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T); 815 regs->tf_ss = _udatasel; 816 regs->tf_ds = _udatasel; 817 regs->tf_es = _udatasel; 818 regs->tf_fs = _udatasel; 819 regs->tf_cs = _ucodesel; 820 821 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */ 822 regs->tf_ebx = ps_strings; 823 824 /* reset %gs as well */ 825 pcb->pcb_gs = _udatasel; 826 if (pcb == curpcb) { 827 load_gs(_udatasel); 828 } 829 830 /* 831 * Initialize the math emulator (if any) for the current process. 832 * Actually, just clear the bit that says that the emulator has 833 * been initialized. Initialization is delayed until the process 834 * traps to the emulator (if it is done at all) mainly because 835 * emulators don't provide an entry point for initialization. 836 */ 837 p->p_addr->u_pcb.pcb_flags &= ~FP_SOFTFP; 838 839 /* 840 * Arrange to trap the next npx or `fwait' instruction (see npx.c 841 * for why fwait must be trapped at least if there is an npx or an 842 * emulator). This is mainly to handle the case where npx0 is not 843 * configured, since the npx routines normally set up the trap 844 * otherwise. It should be done only at boot time, but doing it 845 * here allows modifying `npx_exists' for testing the emulator on 846 * systems with an npx. 847 */ 848 load_cr0(rcr0() | CR0_MP | CR0_TS); 849 850#if NNPX > 0 851 /* Initialize the npx (if any) for the current process. */ 852 npxinit(__INITIAL_NPXCW__); 853#endif 854 855 /* 856 * XXX - Linux emulator 857 * Make sure sure edx is 0x0 on entry. Linux binaries depend 858 * on it. 859 */ 860 p->p_retval[1] = 0; 861} 862 863static int 864sysctl_machdep_adjkerntz SYSCTL_HANDLER_ARGS 865{ 866 int error; 867 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 868 req); 869 if (!error && req->newptr) 870 resettodr(); 871 return (error); 872} 873 874SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 875 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 876 877SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, 878 CTLFLAG_RW, &disable_rtc_set, 0, ""); 879 880SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo, 881 CTLFLAG_RD, &bootinfo, bootinfo, ""); 882 883SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, 884 CTLFLAG_RW, &wall_cmos_clock, 0, ""); 885 886/* 887 * Initialize 386 and configure to run kernel 888 */ 889 890/* 891 * Initialize segments & interrupt table 892 */ 893 894int _default_ldt; 895#ifdef SMP 896union descriptor gdt[NGDT * NCPU]; /* global descriptor table */ 897#else 898union descriptor gdt[NGDT]; /* global descriptor table */ 899#endif 900static struct gate_descriptor idt0[NIDT]; 901struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 902union descriptor ldt[NLDT]; /* local descriptor table */ 903#ifdef SMP 904/* table descriptors - used to load tables by microp */ 905struct region_descriptor r_gdt, r_idt; 906#endif 907 908#ifndef SMP 909extern struct segment_descriptor common_tssd, *tss_gdt; 910#endif 911int private_tss; /* flag indicating private tss */ 912 913#if defined(I586_CPU) && !defined(NO_F00F_HACK) 914extern int has_f00f_bug; 915#endif 916 917static struct i386tss dblfault_tss; 918static char dblfault_stack[PAGE_SIZE]; 919 920extern struct user *proc0paddr; 921 922 923/* software prototypes -- in more palatable form */ 924struct soft_segment_descriptor gdt_segs[] = { 925/* GNULL_SEL 0 Null Descriptor */ 926{ 0x0, /* segment base address */ 927 0x0, /* length */ 928 0, /* segment type */ 929 0, /* segment descriptor priority level */ 930 0, /* segment descriptor present */ 931 0, 0, 932 0, /* default 32 vs 16 bit size */ 933 0 /* limit granularity (byte/page units)*/ }, 934/* GCODE_SEL 1 Code Descriptor for kernel */ 935{ 0x0, /* segment base address */ 936 0xfffff, /* length - all address space */ 937 SDT_MEMERA, /* segment type */ 938 0, /* segment descriptor priority level */ 939 1, /* segment descriptor present */ 940 0, 0, 941 1, /* default 32 vs 16 bit size */ 942 1 /* limit granularity (byte/page units)*/ }, 943/* GDATA_SEL 2 Data Descriptor for kernel */ 944{ 0x0, /* segment base address */ 945 0xfffff, /* length - all address space */ 946 SDT_MEMRWA, /* segment type */ 947 0, /* segment descriptor priority level */ 948 1, /* segment descriptor present */ 949 0, 0, 950 1, /* default 32 vs 16 bit size */ 951 1 /* limit granularity (byte/page units)*/ }, 952/* GPRIV_SEL 3 SMP Per-Processor Private Data Descriptor */ 953{ 0x0, /* segment base address */ 954 0xfffff, /* length - all address space */ 955 SDT_MEMRWA, /* segment type */ 956 0, /* segment descriptor priority level */ 957 1, /* segment descriptor present */ 958 0, 0, 959 1, /* default 32 vs 16 bit size */ 960 1 /* limit granularity (byte/page units)*/ }, 961/* GPROC0_SEL 4 Proc 0 Tss Descriptor */ 962{ 963 0x0, /* segment base address */ 964 sizeof(struct i386tss)-1,/* length - all address space */ 965 SDT_SYS386TSS, /* segment type */ 966 0, /* segment descriptor priority level */ 967 1, /* segment descriptor present */ 968 0, 0, 969 0, /* unused - default 32 vs 16 bit size */ 970 0 /* limit granularity (byte/page units)*/ }, 971/* GLDT_SEL 5 LDT Descriptor */ 972{ (int) ldt, /* segment base address */ 973 sizeof(ldt)-1, /* length - all address space */ 974 SDT_SYSLDT, /* segment type */ 975 SEL_UPL, /* segment descriptor priority level */ 976 1, /* segment descriptor present */ 977 0, 0, 978 0, /* unused - default 32 vs 16 bit size */ 979 0 /* limit granularity (byte/page units)*/ }, 980/* GUSERLDT_SEL 6 User LDT Descriptor per process */ 981{ (int) ldt, /* segment base address */ 982 (512 * sizeof(union descriptor)-1), /* length */ 983 SDT_SYSLDT, /* segment type */ 984 0, /* segment descriptor priority level */ 985 1, /* segment descriptor present */ 986 0, 0, 987 0, /* unused - default 32 vs 16 bit size */ 988 0 /* limit granularity (byte/page units)*/ }, 989/* GTGATE_SEL 7 Null Descriptor - Placeholder */ 990{ 0x0, /* segment base address */ 991 0x0, /* length - all address space */ 992 0, /* segment type */ 993 0, /* segment descriptor priority level */ 994 0, /* segment descriptor present */ 995 0, 0, 996 0, /* default 32 vs 16 bit size */ 997 0 /* limit granularity (byte/page units)*/ }, 998/* GPANIC_SEL 8 Panic Tss Descriptor */ 999{ (int) &dblfault_tss, /* segment base address */ 1000 sizeof(struct i386tss)-1,/* length - all address space */ 1001 SDT_SYS386TSS, /* segment type */ 1002 0, /* segment descriptor priority level */ 1003 1, /* segment descriptor present */ 1004 0, 0, 1005 0, /* unused - default 32 vs 16 bit size */ 1006 0 /* limit granularity (byte/page units)*/ }, 1007/* GAPMCODE32_SEL 9 APM BIOS 32-bit interface (32bit Code) */ 1008{ 0, /* segment base address (overwritten by APM) */ 1009 0xfffff, /* length */ 1010 SDT_MEMERA, /* segment type */ 1011 0, /* segment descriptor priority level */ 1012 1, /* segment descriptor present */ 1013 0, 0, 1014 1, /* default 32 vs 16 bit size */ 1015 1 /* limit granularity (byte/page units)*/ }, 1016/* GAPMCODE16_SEL 10 APM BIOS 32-bit interface (16bit Code) */ 1017{ 0, /* segment base address (overwritten by APM) */ 1018 0xfffff, /* length */ 1019 SDT_MEMERA, /* segment type */ 1020 0, /* segment descriptor priority level */ 1021 1, /* segment descriptor present */ 1022 0, 0, 1023 0, /* default 32 vs 16 bit size */ 1024 1 /* limit granularity (byte/page units)*/ }, 1025/* GAPMDATA_SEL 11 APM BIOS 32-bit interface (Data) */ 1026{ 0, /* segment base address (overwritten by APM) */ 1027 0xfffff, /* length */ 1028 SDT_MEMRWA, /* segment type */ 1029 0, /* segment descriptor priority level */ 1030 1, /* segment descriptor present */ 1031 0, 0, 1032 1, /* default 32 vs 16 bit size */ 1033 1 /* limit granularity (byte/page units)*/ }, 1034}; 1035 1036static struct soft_segment_descriptor ldt_segs[] = { 1037 /* Null Descriptor - overwritten by call gate */ 1038{ 0x0, /* segment base address */ 1039 0x0, /* length - all address space */ 1040 0, /* segment type */ 1041 0, /* segment descriptor priority level */ 1042 0, /* segment descriptor present */ 1043 0, 0, 1044 0, /* default 32 vs 16 bit size */ 1045 0 /* limit granularity (byte/page units)*/ }, 1046 /* Null Descriptor - overwritten by call gate */ 1047{ 0x0, /* segment base address */ 1048 0x0, /* length - all address space */ 1049 0, /* segment type */ 1050 0, /* segment descriptor priority level */ 1051 0, /* segment descriptor present */ 1052 0, 0, 1053 0, /* default 32 vs 16 bit size */ 1054 0 /* limit granularity (byte/page units)*/ }, 1055 /* Null Descriptor - overwritten by call gate */ 1056{ 0x0, /* segment base address */ 1057 0x0, /* length - all address space */ 1058 0, /* segment type */ 1059 0, /* segment descriptor priority level */ 1060 0, /* segment descriptor present */ 1061 0, 0, 1062 0, /* default 32 vs 16 bit size */ 1063 0 /* limit granularity (byte/page units)*/ }, 1064 /* Code Descriptor for user */ 1065{ 0x0, /* segment base address */ 1066 0xfffff, /* length - all address space */ 1067 SDT_MEMERA, /* segment type */ 1068 SEL_UPL, /* segment descriptor priority level */ 1069 1, /* segment descriptor present */ 1070 0, 0, 1071 1, /* default 32 vs 16 bit size */ 1072 1 /* limit granularity (byte/page units)*/ }, 1073 /* Null Descriptor - overwritten by call gate */ 1074{ 0x0, /* segment base address */ 1075 0x0, /* length - all address space */ 1076 0, /* segment type */ 1077 0, /* segment descriptor priority level */ 1078 0, /* segment descriptor present */ 1079 0, 0, 1080 0, /* default 32 vs 16 bit size */ 1081 0 /* limit granularity (byte/page units)*/ }, 1082 /* Data Descriptor for user */ 1083{ 0x0, /* segment base address */ 1084 0xfffff, /* length - all address space */ 1085 SDT_MEMRWA, /* segment type */ 1086 SEL_UPL, /* segment descriptor priority level */ 1087 1, /* segment descriptor present */ 1088 0, 0, 1089 1, /* default 32 vs 16 bit size */ 1090 1 /* limit granularity (byte/page units)*/ }, 1091}; 1092 1093void 1094setidt(idx, func, typ, dpl, selec) 1095 int idx; 1096 inthand_t *func; 1097 int typ; 1098 int dpl; 1099 int selec; 1100{ 1101 struct gate_descriptor *ip; 1102 1103 ip = idt + idx; 1104 ip->gd_looffset = (int)func; 1105 ip->gd_selector = selec; 1106 ip->gd_stkcpy = 0; 1107 ip->gd_xx = 0; 1108 ip->gd_type = typ; 1109 ip->gd_dpl = dpl; 1110 ip->gd_p = 1; 1111 ip->gd_hioffset = ((int)func)>>16 ; 1112} 1113 1114#define IDTVEC(name) __CONCAT(X,name) 1115 1116extern inthand_t 1117 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 1118 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 1119 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 1120 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 1121 IDTVEC(syscall), IDTVEC(int0x80_syscall); 1122 1123void 1124sdtossd(sd, ssd) 1125 struct segment_descriptor *sd; 1126 struct soft_segment_descriptor *ssd; 1127{ 1128 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 1129 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 1130 ssd->ssd_type = sd->sd_type; 1131 ssd->ssd_dpl = sd->sd_dpl; 1132 ssd->ssd_p = sd->sd_p; 1133 ssd->ssd_def32 = sd->sd_def32; 1134 ssd->ssd_gran = sd->sd_gran; 1135} 1136 1137#define PHYSMAP_SIZE (2 * 8) 1138 1139/* 1140 * Populate the (physmap) array with base/bound pairs describing the 1141 * available physical memory in the system, then test this memory and 1142 * build the phys_avail array describing the actually-available memory. 1143 * 1144 * If we cannot accurately determine the physical memory map, then use 1145 * value from the 0xE801 call, and failing that, the RTC. 1146 * 1147 * Total memory size may be set by the kernel environment variable 1148 * hw.physmem or the compile-time define MAXMEM. 1149 */ 1150static void 1151getmemsize(int first) 1152{ 1153 int i, physmap_idx, pa_indx; 1154 u_int basemem, extmem; 1155 struct vm86frame vmf; 1156 struct vm86context vmc; 1157 vm_offset_t pa, physmap[PHYSMAP_SIZE]; 1158 pt_entry_t pte; 1159 const char *cp; 1160 struct { 1161 u_int64_t base; 1162 u_int64_t length; 1163 u_int32_t type; 1164 } *smap; 1165 1166 bzero(&vmf, sizeof(struct vm86frame)); 1167 bzero(physmap, sizeof(physmap)); 1168 1169 /* 1170 * Perform "base memory" related probes & setup 1171 */ 1172 vm86_intcall(0x12, &vmf); 1173 basemem = vmf.vmf_ax; 1174 if (basemem > 640) { 1175 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", 1176 basemem); 1177 basemem = 640; 1178 } 1179 1180 /* 1181 * XXX if biosbasemem is now < 640, there is a `hole' 1182 * between the end of base memory and the start of 1183 * ISA memory. The hole may be empty or it may 1184 * contain BIOS code or data. Map it read/write so 1185 * that the BIOS can write to it. (Memory from 0 to 1186 * the physical end of the kernel is mapped read-only 1187 * to begin with and then parts of it are remapped. 1188 * The parts that aren't remapped form holes that 1189 * remain read-only and are unused by the kernel. 1190 * The base memory area is below the physical end of 1191 * the kernel and right now forms a read-only hole. 1192 * The part of it from PAGE_SIZE to 1193 * (trunc_page(biosbasemem * 1024) - 1) will be 1194 * remapped and used by the kernel later.) 1195 * 1196 * This code is similar to the code used in 1197 * pmap_mapdev, but since no memory needs to be 1198 * allocated we simply change the mapping. 1199 */ 1200 for (pa = trunc_page(basemem * 1024); 1201 pa < ISA_HOLE_START; pa += PAGE_SIZE) { 1202 pte = (pt_entry_t)vtopte(pa + KERNBASE); 1203 *pte = pa | PG_RW | PG_V; 1204 } 1205 1206 /* 1207 * if basemem != 640, map pages r/w into vm86 page table so 1208 * that the bios can scribble on it. 1209 */ 1210 pte = (pt_entry_t)vm86paddr; 1211 for (i = basemem / 4; i < 160; i++) 1212 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; 1213 1214 /* 1215 * map page 1 R/W into the kernel page table so we can use it 1216 * as a buffer. The kernel will unmap this page later. 1217 */ 1218 pte = (pt_entry_t)vtopte(KERNBASE + (1 << PAGE_SHIFT)); 1219 *pte = (1 << PAGE_SHIFT) | PG_RW | PG_V; 1220 1221 /* 1222 * get memory map with INT 15:E820 1223 */ 1224#define SMAPSIZ sizeof(*smap) 1225#define SMAP_SIG 0x534D4150 /* 'SMAP' */ 1226 1227 vmc.npages = 0; 1228 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT)); 1229 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di); 1230 1231 physmap_idx = 0; 1232 vmf.vmf_ebx = 0; 1233 do { 1234 vmf.vmf_eax = 0xE820; 1235 vmf.vmf_edx = SMAP_SIG; 1236 vmf.vmf_ecx = SMAPSIZ; 1237 i = vm86_datacall(0x15, &vmf, &vmc); 1238 if (i || vmf.vmf_eax != SMAP_SIG) 1239 break; 1240 if (boothowto & RB_VERBOSE) 1241 printf("SMAP type=%02x base=%08x %08x len=%08x %08x\n", 1242 smap->type, 1243 *(u_int32_t *)((char *)&smap->base + 4), 1244 (u_int32_t)smap->base, 1245 *(u_int32_t *)((char *)&smap->length + 4), 1246 (u_int32_t)smap->length); 1247 1248 if (smap->type != 0x01) 1249 goto next_run; 1250 1251 if (smap->length == 0) 1252 goto next_run; 1253 1254 if (smap->base >= 0xffffffff) { 1255 printf("%uK of memory above 4GB ignored\n", 1256 (u_int)(smap->length / 1024)); 1257 goto next_run; 1258 } 1259 1260 for (i = 0; i <= physmap_idx; i += 2) { 1261 if (smap->base < physmap[i + 1]) { 1262 if (boothowto & RB_VERBOSE) 1263 printf( 1264 "Overlapping or non-montonic memory region, ignoring second region\n"); 1265 goto next_run; 1266 } 1267 } 1268 1269 if (smap->base == physmap[physmap_idx + 1]) { 1270 physmap[physmap_idx + 1] += smap->length; 1271 goto next_run; 1272 } 1273 1274 physmap_idx += 2; 1275 if (physmap_idx == PHYSMAP_SIZE) { 1276 printf( 1277 "Too many segments in the physical address map, giving up\n"); 1278 break; 1279 } 1280 physmap[physmap_idx] = smap->base; 1281 physmap[physmap_idx + 1] = smap->base + smap->length; 1282next_run: 1283 } while (vmf.vmf_ebx != 0); 1284 1285 if (physmap[1] != 0) 1286 goto physmap_done; 1287 1288 /* 1289 * If we failed above, try memory map with INT 15:E801 1290 */ 1291 vmf.vmf_ax = 0xE801; 1292 if (vm86_intcall(0x15, &vmf) == 0) { 1293 extmem = vmf.vmf_cx + vmf.vmf_dx * 64; 1294 } else { 1295#if 0 1296 vmf.vmf_ah = 0x88; 1297 vm86_intcall(0x15, &vmf); 1298 extmem = vmf.vmf_ax; 1299#else 1300 /* 1301 * Prefer the RTC value for extended memory. 1302 */ 1303 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8); 1304#endif 1305 } 1306 1307 /* 1308 * Special hack for chipsets that still remap the 384k hole when 1309 * there's 16MB of memory - this really confuses people that 1310 * are trying to use bus mastering ISA controllers with the 1311 * "16MB limit"; they only have 16MB, but the remapping puts 1312 * them beyond the limit. 1313 * 1314 * If extended memory is between 15-16MB (16-17MB phys address range), 1315 * chop it to 15MB. 1316 */ 1317 if ((extmem > 15 * 1024) && (extmem < 16 * 1024)) 1318 extmem = 15 * 1024; 1319 1320 physmap[0] = 0; 1321 physmap[1] = basemem * 1024; 1322 physmap_idx = 2; 1323 physmap[physmap_idx] = 0x100000; 1324 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024; 1325 1326physmap_done: 1327 /* 1328 * Now, physmap contains a map of physical memory. 1329 */ 1330 1331#ifdef SMP 1332 /* make hole for AP bootstrap code */ 1333 physmap[1] = mp_bootaddress(physmap[1] / 1024); 1334 1335 /* look for the MP hardware - needed for apic addresses */ 1336 mp_probe(); 1337#endif 1338 1339 /* 1340 * Maxmem isn't the "maximum memory", it's one larger than the 1341 * highest page of the physical address space. It should be 1342 * called something like "Maxphyspage". We may adjust this 1343 * based on ``hw.physmem'' and the results of the memory test. 1344 */ 1345 Maxmem = atop(physmap[physmap_idx + 1]); 1346 1347#ifdef MAXMEM 1348 Maxmem = MAXMEM / 4; 1349#endif 1350 1351 /* 1352 * hw.maxmem is a size in bytes; we also allow k, m, and g suffixes 1353 * for the appropriate modifiers. This overrides MAXMEM. 1354 */ 1355 if ((cp = getenv("hw.physmem")) != NULL) { 1356 u_int64_t AllowMem, sanity; 1357 const char *ep; 1358 1359 sanity = AllowMem = strtouq(cp, &ep, 0); 1360 if ((ep != cp) && (*ep != 0)) { 1361 switch(*ep) { 1362 case 'g': 1363 case 'G': 1364 AllowMem <<= 10; 1365 case 'm': 1366 case 'M': 1367 AllowMem <<= 10; 1368 case 'k': 1369 case 'K': 1370 AllowMem <<= 10; 1371 break; 1372 default: 1373 AllowMem = sanity = 0; 1374 } 1375 if (AllowMem < sanity) 1376 AllowMem = 0; 1377 } 1378 if (AllowMem == 0) 1379 printf("Ignoring invalid memory size of '%s'\n", cp); 1380 else 1381 Maxmem = atop(AllowMem); 1382 } 1383 1384 if (atop(physmap[physmap_idx + 1]) != Maxmem && 1385 (boothowto & RB_VERBOSE)) 1386 printf("Physical memory use set to %uK\n", Maxmem * 4); 1387 1388 /* 1389 * If Maxmem has been increased beyond what the system has detected, 1390 * extend the last memory segment to the new limit. 1391 */ 1392 if (atop(physmap[physmap_idx + 1]) < Maxmem) 1393 physmap[physmap_idx + 1] = ptoa(Maxmem); 1394 1395 /* call pmap initialization to make new kernel address space */ 1396 pmap_bootstrap(first, 0); 1397 1398 /* 1399 * Size up each available chunk of physical memory. 1400 */ 1401 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 1402 pa_indx = 0; 1403 phys_avail[pa_indx++] = physmap[0]; 1404 phys_avail[pa_indx] = physmap[0]; 1405#if 0 1406 pte = (pt_entry_t)vtopte(KERNBASE); 1407#else 1408 pte = (pt_entry_t)CMAP1; 1409#endif 1410 1411 /* 1412 * physmap is in bytes, so when converting to page boundaries, 1413 * round up the start address and round down the end address. 1414 */ 1415 for (i = 0; i <= physmap_idx; i += 2) { 1416 vm_offset_t end; 1417 1418 end = ptoa(Maxmem); 1419 if (physmap[i + 1] < end) 1420 end = trunc_page(physmap[i + 1]); 1421 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 1422 int tmp, page_bad; 1423#if 0 1424 int *ptr = 0; 1425#else 1426 int *ptr = (int *)CADDR1; 1427#endif 1428 1429 /* 1430 * block out kernel memory as not available. 1431 */ 1432 if (pa >= 0x100000 && pa < first) 1433 continue; 1434 1435 page_bad = FALSE; 1436 1437 /* 1438 * map page into kernel: valid, read/write,non-cacheable 1439 */ 1440 *pte = pa | PG_V | PG_RW | PG_N; 1441 invltlb(); 1442 1443 tmp = *(int *)ptr; 1444 /* 1445 * Test for alternating 1's and 0's 1446 */ 1447 *(volatile int *)ptr = 0xaaaaaaaa; 1448 if (*(volatile int *)ptr != 0xaaaaaaaa) { 1449 page_bad = TRUE; 1450 } 1451 /* 1452 * Test for alternating 0's and 1's 1453 */ 1454 *(volatile int *)ptr = 0x55555555; 1455 if (*(volatile int *)ptr != 0x55555555) { 1456 page_bad = TRUE; 1457 } 1458 /* 1459 * Test for all 1's 1460 */ 1461 *(volatile int *)ptr = 0xffffffff; 1462 if (*(volatile int *)ptr != 0xffffffff) { 1463 page_bad = TRUE; 1464 } 1465 /* 1466 * Test for all 0's 1467 */ 1468 *(volatile int *)ptr = 0x0; 1469 if (*(volatile int *)ptr != 0x0) { 1470 page_bad = TRUE; 1471 } 1472 /* 1473 * Restore original value. 1474 */ 1475 *(int *)ptr = tmp; 1476 1477 /* 1478 * Adjust array of valid/good pages. 1479 */ 1480 if (page_bad == TRUE) { 1481 continue; 1482 } 1483 /* 1484 * If this good page is a continuation of the 1485 * previous set of good pages, then just increase 1486 * the end pointer. Otherwise start a new chunk. 1487 * Note that "end" points one higher than end, 1488 * making the range >= start and < end. 1489 * If we're also doing a speculative memory 1490 * test and we at or past the end, bump up Maxmem 1491 * so that we keep going. The first bad page 1492 * will terminate the loop. 1493 */ 1494 if (phys_avail[pa_indx] == pa) { 1495 phys_avail[pa_indx] += PAGE_SIZE; 1496 } else { 1497 pa_indx++; 1498 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1499 printf("Too many holes in the physical address space, giving up\n"); 1500 pa_indx--; 1501 break; 1502 } 1503 phys_avail[pa_indx++] = pa; /* start */ 1504 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1505 } 1506 physmem++; 1507 } 1508 } 1509 *pte = 0; 1510 invltlb(); 1511 1512 /* 1513 * XXX 1514 * The last chunk must contain at least one page plus the message 1515 * buffer to avoid complicating other code (message buffer address 1516 * calculation, etc.). 1517 */ 1518 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1519 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { 1520 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1521 phys_avail[pa_indx--] = 0; 1522 phys_avail[pa_indx--] = 0; 1523 } 1524 1525 Maxmem = atop(phys_avail[pa_indx]); 1526 1527 /* Trim off space for the message buffer. */ 1528 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); 1529 1530 avail_end = phys_avail[pa_indx]; 1531} 1532 1533void 1534init386(first) 1535 int first; 1536{ 1537 int x; 1538 struct gate_descriptor *gdp; 1539 int gsel_tss; 1540#ifndef SMP 1541 /* table descriptors - used to load tables by microp */ 1542 struct region_descriptor r_gdt, r_idt; 1543#endif 1544 int off; 1545 1546 /* 1547 * Prevent lowering of the ipl if we call tsleep() early. 1548 */ 1549 safepri = cpl; 1550 1551 proc0.p_addr = proc0paddr; 1552 1553 atdevbase = ISA_HOLE_START + KERNBASE; 1554 1555 if (bootinfo.bi_modulep) { 1556 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE; 1557 preload_bootstrap_relocate(KERNBASE); 1558 } 1559 if (bootinfo.bi_envp) 1560 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE; 1561 1562 /* 1563 * make gdt memory segments, the code segment goes up to end of the 1564 * page with etext in it, the data segment goes to the end of 1565 * the address space 1566 */ 1567 /* 1568 * XXX text protection is temporarily (?) disabled. The limit was 1569 * i386_btop(round_page(etext)) - 1. 1570 */ 1571 gdt_segs[GCODE_SEL].ssd_limit = i386_btop(0) - 1; 1572 gdt_segs[GDATA_SEL].ssd_limit = i386_btop(0) - 1; 1573#ifdef SMP 1574 gdt_segs[GPRIV_SEL].ssd_limit = 1575 i386_btop(sizeof(struct privatespace)) - 1; 1576 gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[0]; 1577 gdt_segs[GPROC0_SEL].ssd_base = 1578 (int) &SMP_prvspace[0].globaldata.gd_common_tss; 1579 SMP_prvspace[0].globaldata.gd_prvspace = &SMP_prvspace[0]; 1580#else 1581 gdt_segs[GPRIV_SEL].ssd_limit = i386_btop(0) - 1; 1582 gdt_segs[GPROC0_SEL].ssd_base = (int) &common_tss; 1583#endif 1584 1585 for (x = 0; x < NGDT; x++) { 1586#ifdef BDE_DEBUGGER 1587 /* avoid overwriting db entries with APM ones */ 1588 if (x >= GAPMCODE32_SEL && x <= GAPMDATA_SEL) 1589 continue; 1590#endif 1591 ssdtosd(&gdt_segs[x], &gdt[x].sd); 1592 } 1593 1594 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1595 r_gdt.rd_base = (int) gdt; 1596 lgdt(&r_gdt); 1597 1598 /* make ldt memory segments */ 1599 /* 1600 * The data segment limit must not cover the user area because we 1601 * don't want the user area to be writable in copyout() etc. (page 1602 * level protection is lost in kernel mode on 386's). Also, we 1603 * don't want the user area to be writable directly (page level 1604 * protection of the user area is not available on 486's with 1605 * CR0_WP set, because there is no user-read/kernel-write mode). 1606 * 1607 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it 1608 * should be spelled ...MAX_USER... 1609 */ 1610#define VM_END_USER_RW_ADDRESS VM_MAXUSER_ADDRESS 1611 /* 1612 * The code segment limit has to cover the user area until we move 1613 * the signal trampoline out of the user area. This is safe because 1614 * the code segment cannot be written to directly. 1615 */ 1616#define VM_END_USER_R_ADDRESS (VM_END_USER_RW_ADDRESS + UPAGES * PAGE_SIZE) 1617 ldt_segs[LUCODE_SEL].ssd_limit = i386_btop(VM_END_USER_R_ADDRESS) - 1; 1618 ldt_segs[LUDATA_SEL].ssd_limit = i386_btop(VM_END_USER_RW_ADDRESS) - 1; 1619 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++) 1620 ssdtosd(&ldt_segs[x], &ldt[x].sd); 1621 1622 _default_ldt = GSEL(GLDT_SEL, SEL_KPL); 1623 lldt(_default_ldt); 1624#ifdef USER_LDT 1625 currentldt = _default_ldt; 1626#endif 1627 1628 /* exceptions */ 1629 for (x = 0; x < NIDT; x++) 1630 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1631 setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1632 setidt(1, &IDTVEC(dbg), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1633 setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1634 setidt(3, &IDTVEC(bpt), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1635 setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1636 setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1637 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1638 setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1639 setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL)); 1640 setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1641 setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1642 setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1643 setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1644 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1645 setidt(14, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1646 setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1647 setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1648 setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1649 setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1650 setidt(0x80, &IDTVEC(int0x80_syscall), 1651 SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1652 1653 r_idt.rd_limit = sizeof(idt0) - 1; 1654 r_idt.rd_base = (int) idt; 1655 lidt(&r_idt); 1656 1657 /* 1658 * Initialize the console before we print anything out. 1659 */ 1660 cninit(); 1661 1662#include "isa.h" 1663#if NISA >0 1664 isa_defaultirq(); 1665#endif 1666 rand_initialize(); 1667 1668#ifdef DDB 1669 kdb_init(); 1670 if (boothowto & RB_KDB) 1671 Debugger("Boot flags requested debugger"); 1672#endif 1673 1674 finishidentcpu(); /* Final stage of CPU initialization */ 1675 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1676 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1677 initializecpu(); /* Initialize CPU registers */ 1678 1679 /* make an initial tss so cpu can get interrupt stack on syscall! */ 1680 common_tss.tss_esp0 = (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16; 1681 common_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL) ; 1682 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1683 private_tss = 0; 1684 tss_gdt = &gdt[GPROC0_SEL].sd; 1685 common_tssd = *tss_gdt; 1686 common_tss.tss_ioopt = (sizeof common_tss) << 16; 1687 ltr(gsel_tss); 1688 1689 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = 1690 dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)]; 1691 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = 1692 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); 1693 dblfault_tss.tss_cr3 = (int)IdlePTD; 1694 dblfault_tss.tss_eip = (int) dblfault_handler; 1695 dblfault_tss.tss_eflags = PSL_KERNEL; 1696 dblfault_tss.tss_ds = dblfault_tss.tss_es = 1697 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); 1698 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL); 1699 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); 1700 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); 1701 1702 vm86_initialize(); 1703 getmemsize(first); 1704 1705 /* now running on new page tables, configured,and u/iom is accessible */ 1706 1707 /* Map the message buffer. */ 1708 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) 1709 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off); 1710 1711 msgbufinit(msgbufp, MSGBUF_SIZE); 1712 1713 /* make a call gate to reenter kernel with */ 1714 gdp = &ldt[LSYS5CALLS_SEL].gd; 1715 1716 x = (int) &IDTVEC(syscall); 1717 gdp->gd_looffset = x++; 1718 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL); 1719 gdp->gd_stkcpy = 1; 1720 gdp->gd_type = SDT_SYS386CGT; 1721 gdp->gd_dpl = SEL_UPL; 1722 gdp->gd_p = 1; 1723 gdp->gd_hioffset = ((int) &IDTVEC(syscall)) >>16; 1724 1725 /* XXX does this work? */ 1726 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL]; 1727 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL]; 1728 1729 /* transfer to user mode */ 1730 1731 _ucodesel = LSEL(LUCODE_SEL, SEL_UPL); 1732 _udatasel = LSEL(LUDATA_SEL, SEL_UPL); 1733 1734 /* setup proc 0's pcb */ 1735 proc0.p_addr->u_pcb.pcb_flags = 0; 1736 proc0.p_addr->u_pcb.pcb_cr3 = (int)IdlePTD; 1737#ifdef SMP 1738 proc0.p_addr->u_pcb.pcb_mpnest = 1; 1739#endif 1740 proc0.p_addr->u_pcb.pcb_ext = 0; 1741} 1742 1743#if defined(I586_CPU) && !defined(NO_F00F_HACK) 1744static void f00f_hack(void *unused); 1745SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL); 1746 1747static void 1748f00f_hack(void *unused) { 1749 struct gate_descriptor *new_idt; 1750#ifndef SMP 1751 struct region_descriptor r_idt; 1752#endif 1753 vm_offset_t tmp; 1754 1755 if (!has_f00f_bug) 1756 return; 1757 1758 printf("Intel Pentium detected, installing workaround for F00F bug\n"); 1759 1760 r_idt.rd_limit = sizeof(idt0) - 1; 1761 1762 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2); 1763 if (tmp == 0) 1764 panic("kmem_alloc returned 0"); 1765 if (((unsigned int)tmp & (PAGE_SIZE-1)) != 0) 1766 panic("kmem_alloc returned non-page-aligned memory"); 1767 /* Put the first seven entries in the lower page */ 1768 new_idt = (struct gate_descriptor*)(tmp + PAGE_SIZE - (7*8)); 1769 bcopy(idt, new_idt, sizeof(idt0)); 1770 r_idt.rd_base = (int)new_idt; 1771 lidt(&r_idt); 1772 idt = new_idt; 1773 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE, 1774 VM_PROT_READ, FALSE) != KERN_SUCCESS) 1775 panic("vm_map_protect failed"); 1776 return; 1777} 1778#endif /* defined(I586_CPU) && !NO_F00F_HACK */ 1779 1780int 1781ptrace_set_pc(p, addr) 1782 struct proc *p; 1783 unsigned long addr; 1784{ 1785 p->p_md.md_regs->tf_eip = addr; 1786 return (0); 1787} 1788 1789int 1790ptrace_single_step(p) 1791 struct proc *p; 1792{ 1793 p->p_md.md_regs->tf_eflags |= PSL_T; 1794 return (0); 1795} 1796 1797int ptrace_read_u_check(p, addr, len) 1798 struct proc *p; 1799 vm_offset_t addr; 1800 size_t len; 1801{ 1802 vm_offset_t gap; 1803 1804 if ((vm_offset_t) (addr + len) < addr) 1805 return EPERM; 1806 if ((vm_offset_t) (addr + len) <= sizeof(struct user)) 1807 return 0; 1808 1809 gap = (char *) p->p_md.md_regs - (char *) p->p_addr; 1810 1811 if ((vm_offset_t) addr < gap) 1812 return EPERM; 1813 if ((vm_offset_t) (addr + len) <= 1814 (vm_offset_t) (gap + sizeof(struct trapframe))) 1815 return 0; 1816 return EPERM; 1817} 1818 1819int ptrace_write_u(p, off, data) 1820 struct proc *p; 1821 vm_offset_t off; 1822 long data; 1823{ 1824 struct trapframe frame_copy; 1825 vm_offset_t min; 1826 struct trapframe *tp; 1827 1828 /* 1829 * Privileged kernel state is scattered all over the user area. 1830 * Only allow write access to parts of regs and to fpregs. 1831 */ 1832 min = (char *)p->p_md.md_regs - (char *)p->p_addr; 1833 if (off >= min && off <= min + sizeof(struct trapframe) - sizeof(int)) { 1834 tp = p->p_md.md_regs; 1835 frame_copy = *tp; 1836 *(int *)((char *)&frame_copy + (off - min)) = data; 1837 if (!EFLAGS_SECURE(frame_copy.tf_eflags, tp->tf_eflags) || 1838 !CS_SECURE(frame_copy.tf_cs)) 1839 return (EINVAL); 1840 *(int*)((char *)p->p_addr + off) = data; 1841 return (0); 1842 } 1843 min = offsetof(struct user, u_pcb) + offsetof(struct pcb, pcb_savefpu); 1844 if (off >= min && off <= min + sizeof(struct save87) - sizeof(int)) { 1845 *(int*)((char *)p->p_addr + off) = data; 1846 return (0); 1847 } 1848 return (EFAULT); 1849} 1850 1851int 1852fill_regs(p, regs) 1853 struct proc *p; 1854 struct reg *regs; 1855{ 1856 struct pcb *pcb; 1857 struct trapframe *tp; 1858 1859 tp = p->p_md.md_regs; 1860 regs->r_fs = tp->tf_fs; 1861 regs->r_es = tp->tf_es; 1862 regs->r_ds = tp->tf_ds; 1863 regs->r_edi = tp->tf_edi; 1864 regs->r_esi = tp->tf_esi; 1865 regs->r_ebp = tp->tf_ebp; 1866 regs->r_ebx = tp->tf_ebx; 1867 regs->r_edx = tp->tf_edx; 1868 regs->r_ecx = tp->tf_ecx; 1869 regs->r_eax = tp->tf_eax; 1870 regs->r_eip = tp->tf_eip; 1871 regs->r_cs = tp->tf_cs; 1872 regs->r_eflags = tp->tf_eflags; 1873 regs->r_esp = tp->tf_esp; 1874 regs->r_ss = tp->tf_ss; 1875 pcb = &p->p_addr->u_pcb; 1876 regs->r_gs = pcb->pcb_gs; 1877 return (0); 1878} 1879 1880int 1881set_regs(p, regs) 1882 struct proc *p; 1883 struct reg *regs; 1884{ 1885 struct pcb *pcb; 1886 struct trapframe *tp; 1887 1888 tp = p->p_md.md_regs; 1889 if (!EFLAGS_SECURE(regs->r_eflags, tp->tf_eflags) || 1890 !CS_SECURE(regs->r_cs)) 1891 return (EINVAL); 1892 tp->tf_fs = regs->r_fs; 1893 tp->tf_es = regs->r_es; 1894 tp->tf_ds = regs->r_ds; 1895 tp->tf_edi = regs->r_edi; 1896 tp->tf_esi = regs->r_esi; 1897 tp->tf_ebp = regs->r_ebp; 1898 tp->tf_ebx = regs->r_ebx; 1899 tp->tf_edx = regs->r_edx; 1900 tp->tf_ecx = regs->r_ecx; 1901 tp->tf_eax = regs->r_eax; 1902 tp->tf_eip = regs->r_eip; 1903 tp->tf_cs = regs->r_cs; 1904 tp->tf_eflags = regs->r_eflags; 1905 tp->tf_esp = regs->r_esp; 1906 tp->tf_ss = regs->r_ss; 1907 pcb = &p->p_addr->u_pcb; 1908 pcb->pcb_gs = regs->r_gs; 1909 return (0); 1910} 1911 1912int 1913fill_fpregs(p, fpregs) 1914 struct proc *p; 1915 struct fpreg *fpregs; 1916{ 1917 bcopy(&p->p_addr->u_pcb.pcb_savefpu, fpregs, sizeof *fpregs); 1918 return (0); 1919} 1920 1921int 1922set_fpregs(p, fpregs) 1923 struct proc *p; 1924 struct fpreg *fpregs; 1925{ 1926 bcopy(fpregs, &p->p_addr->u_pcb.pcb_savefpu, sizeof *fpregs); 1927 return (0); 1928} 1929 1930#ifndef DDB 1931void 1932Debugger(const char *msg) 1933{ 1934 printf("Debugger(\"%s\") called.\n", msg); 1935} 1936#endif /* no DDB */ 1937 1938#include <sys/disklabel.h> 1939 1940/* 1941 * Determine the size of the transfer, and make sure it is 1942 * within the boundaries of the partition. Adjust transfer 1943 * if needed, and signal errors or early completion. 1944 */ 1945int 1946bounds_check_with_label(struct buf *bp, struct disklabel *lp, int wlabel) 1947{ 1948 struct partition *p = lp->d_partitions + dkpart(bp->b_dev); 1949 int labelsect = lp->d_partitions[0].p_offset; 1950 int maxsz = p->p_size, 1951 sz = (bp->b_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT; 1952 1953 /* overwriting disk label ? */ 1954 /* XXX should also protect bootstrap in first 8K */ 1955 if (bp->b_blkno + p->p_offset <= LABELSECTOR + labelsect && 1956#if LABELSECTOR != 0 1957 bp->b_blkno + p->p_offset + sz > LABELSECTOR + labelsect && 1958#endif 1959 (bp->b_flags & B_READ) == 0 && wlabel == 0) { 1960 bp->b_error = EROFS; 1961 goto bad; 1962 } 1963 1964#if defined(DOSBBSECTOR) && defined(notyet) 1965 /* overwriting master boot record? */ 1966 if (bp->b_blkno + p->p_offset <= DOSBBSECTOR && 1967 (bp->b_flags & B_READ) == 0 && wlabel == 0) { 1968 bp->b_error = EROFS; 1969 goto bad; 1970 } 1971#endif 1972 1973 /* beyond partition? */ 1974 if (bp->b_blkno < 0 || bp->b_blkno + sz > maxsz) { 1975 /* if exactly at end of disk, return an EOF */ 1976 if (bp->b_blkno == maxsz) { 1977 bp->b_resid = bp->b_bcount; 1978 return(0); 1979 } 1980 /* or truncate if part of it fits */ 1981 sz = maxsz - bp->b_blkno; 1982 if (sz <= 0) { 1983 bp->b_error = EINVAL; 1984 goto bad; 1985 } 1986 bp->b_bcount = sz << DEV_BSHIFT; 1987 } 1988 1989 bp->b_pblkno = bp->b_blkno + p->p_offset; 1990 return(1); 1991 1992bad: 1993 bp->b_flags |= B_ERROR; 1994 return(-1); 1995} 1996 1997#ifdef DDB 1998 1999/* 2000 * Provide inb() and outb() as functions. They are normally only 2001 * available as macros calling inlined functions, thus cannot be 2002 * called inside DDB. 2003 * 2004 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 2005 */ 2006 2007#undef inb 2008#undef outb 2009 2010/* silence compiler warnings */ 2011u_char inb(u_int); 2012void outb(u_int, u_char); 2013 2014u_char 2015inb(u_int port) 2016{ 2017 u_char data; 2018 /* 2019 * We use %%dx and not %1 here because i/o is done at %dx and not at 2020 * %edx, while gcc generates inferior code (movw instead of movl) 2021 * if we tell it to load (u_short) port. 2022 */ 2023 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 2024 return (data); 2025} 2026 2027void 2028outb(u_int port, u_char data) 2029{ 2030 u_char al; 2031 /* 2032 * Use an unnecessary assignment to help gcc's register allocator. 2033 * This make a large difference for gcc-1.40 and a tiny difference 2034 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 2035 * best results. gcc-2.6.0 can't handle this. 2036 */ 2037 al = data; 2038 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 2039} 2040 2041#endif /* DDB */ 2042