machdep.c revision 79573
1/*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 38 * $FreeBSD: head/sys/amd64/amd64/machdep.c 79573 2001-07-11 03:15:25Z bsd $ 39 */ 40 41#include "opt_atalk.h" 42#include "opt_compat.h" 43#include "opt_cpu.h" 44#include "opt_ddb.h" 45#include "opt_inet.h" 46#include "opt_ipx.h" 47#include "opt_isa.h" 48#include "opt_maxmem.h" 49#include "opt_msgbuf.h" 50#include "opt_npx.h" 51#include "opt_perfmon.h" 52/* #include "opt_userconfig.h" */ 53 54#include <sys/param.h> 55#include <sys/systm.h> 56#include <sys/sysproto.h> 57#include <sys/signalvar.h> 58#include <sys/kernel.h> 59#include <sys/ktr.h> 60#include <sys/linker.h> 61#include <sys/lock.h> 62#include <sys/malloc.h> 63#include <sys/mutex.h> 64#include <sys/pcpu.h> 65#include <sys/proc.h> 66#include <sys/bio.h> 67#include <sys/buf.h> 68#include <sys/reboot.h> 69#include <sys/smp.h> 70#include <sys/callout.h> 71#include <sys/msgbuf.h> 72#include <sys/sysent.h> 73#include <sys/sysctl.h> 74#include <sys/vmmeter.h> 75#include <sys/bus.h> 76#include <sys/eventhandler.h> 77 78#include <vm/vm.h> 79#include <vm/vm_param.h> 80#include <sys/lock.h> 81#include <vm/vm_kern.h> 82#include <vm/vm_object.h> 83#include <vm/vm_page.h> 84#include <vm/vm_map.h> 85#include <vm/vm_pager.h> 86#include <vm/vm_extern.h> 87 88#include <sys/user.h> 89#include <sys/exec.h> 90#include <sys/cons.h> 91 92#include <ddb/ddb.h> 93 94#include <net/netisr.h> 95 96#include <machine/cpu.h> 97#include <machine/cputypes.h> 98#include <machine/reg.h> 99#include <machine/clock.h> 100#include <machine/specialreg.h> 101#include <machine/bootinfo.h> 102#include <machine/md_var.h> 103#include <machine/pc/bios.h> 104#include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */ 105#include <machine/globals.h> 106#ifdef PERFMON 107#include <machine/perfmon.h> 108#endif 109 110#include <i386/isa/icu.h> 111#include <i386/isa/intr_machdep.h> 112#include <isa/rtc.h> 113#include <machine/vm86.h> 114#include <sys/ptrace.h> 115#include <machine/sigframe.h> 116 117extern void init386 __P((int first)); 118extern void dblfault_handler __P((void)); 119 120extern void printcpuinfo(void); /* XXX header file */ 121extern void earlysetcpuclass(void); /* same header file */ 122extern void finishidentcpu(void); 123extern void panicifcpuunsupported(void); 124extern void initializecpu(void); 125 126#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 127#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 128 129static void cpu_startup __P((void *)); 130SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) 131 132int _udatasel, _ucodesel; 133u_int atdevbase; 134 135#if defined(SWTCH_OPTIM_STATS) 136extern int swtch_optim_stats; 137SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, 138 CTLFLAG_RD, &swtch_optim_stats, 0, ""); 139SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, 140 CTLFLAG_RD, &tlb_flush_count, 0, ""); 141#endif 142 143#ifdef PC98 144static int ispc98 = 1; 145#else 146static int ispc98 = 0; 147#endif 148SYSCTL_INT(_machdep, OID_AUTO, ispc98, CTLFLAG_RD, &ispc98, 0, ""); 149 150int physmem = 0; 151int cold = 1; 152 153static void osendsig __P((sig_t catcher, int sig, sigset_t *mask, u_long code)); 154 155static int 156sysctl_hw_physmem(SYSCTL_HANDLER_ARGS) 157{ 158 int error = sysctl_handle_int(oidp, 0, ctob(physmem), req); 159 return (error); 160} 161 162SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_INT|CTLFLAG_RD, 163 0, 0, sysctl_hw_physmem, "IU", ""); 164 165static int 166sysctl_hw_usermem(SYSCTL_HANDLER_ARGS) 167{ 168 int error = sysctl_handle_int(oidp, 0, 169 ctob(physmem - cnt.v_wire_count), req); 170 return (error); 171} 172 173SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_INT|CTLFLAG_RD, 174 0, 0, sysctl_hw_usermem, "IU", ""); 175 176static int 177sysctl_hw_availpages(SYSCTL_HANDLER_ARGS) 178{ 179 int error = sysctl_handle_int(oidp, 0, 180 i386_btop(avail_end - avail_start), req); 181 return (error); 182} 183 184SYSCTL_PROC(_hw, OID_AUTO, availpages, CTLTYPE_INT|CTLFLAG_RD, 185 0, 0, sysctl_hw_availpages, "I", ""); 186 187int Maxmem = 0; 188long dumplo; 189 190vm_offset_t phys_avail[10]; 191 192/* must be 2 less so 0 0 can signal end of chunks */ 193#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2) 194 195static vm_offset_t buffer_sva, buffer_eva; 196vm_offset_t clean_sva, clean_eva; 197static vm_offset_t pager_sva, pager_eva; 198static struct trapframe proc0_tf; 199#ifndef SMP 200static struct globaldata __globaldata; 201#endif 202 203struct mtx sched_lock; 204struct mtx Giant; 205 206static void 207cpu_startup(dummy) 208 void *dummy; 209{ 210 register unsigned i; 211 register caddr_t v; 212 vm_offset_t maxaddr; 213 vm_size_t size = 0; 214 int firstaddr; 215 vm_offset_t minaddr; 216 int physmem_est; 217 218 /* 219 * Good {morning,afternoon,evening,night}. 220 */ 221 earlysetcpuclass(); 222 startrtclock(); 223 printcpuinfo(); 224 panicifcpuunsupported(); 225#ifdef PERFMON 226 perfmon_init(); 227#endif 228 printf("real memory = %u (%uK bytes)\n", ptoa(Maxmem), ptoa(Maxmem) / 1024); 229 /* 230 * Display any holes after the first chunk of extended memory. 231 */ 232 if (bootverbose) { 233 int indx; 234 235 printf("Physical memory chunk(s):\n"); 236 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 237 unsigned int size1 = phys_avail[indx + 1] - phys_avail[indx]; 238 239 printf("0x%08x - 0x%08x, %u bytes (%u pages)\n", 240 phys_avail[indx], phys_avail[indx + 1] - 1, size1, 241 size1 / PAGE_SIZE); 242 } 243 } 244 245 /* 246 * Calculate callout wheel size 247 */ 248 for (callwheelsize = 1, callwheelbits = 0; 249 callwheelsize < ncallout; 250 callwheelsize <<= 1, ++callwheelbits) 251 ; 252 callwheelmask = callwheelsize - 1; 253 254 /* 255 * Allocate space for system data structures. 256 * The first available kernel virtual address is in "v". 257 * As pages of kernel virtual memory are allocated, "v" is incremented. 258 * As pages of memory are allocated and cleared, 259 * "firstaddr" is incremented. 260 * An index into the kernel page table corresponding to the 261 * virtual memory address maintained in "v" is kept in "mapaddr". 262 */ 263 264 /* 265 * Make two passes. The first pass calculates how much memory is 266 * needed and allocates it. The second pass assigns virtual 267 * addresses to the various data structures. 268 */ 269 firstaddr = 0; 270again: 271 v = (caddr_t)firstaddr; 272 273#define valloc(name, type, num) \ 274 (name) = (type *)v; v = (caddr_t)((name)+(num)) 275#define valloclim(name, type, num, lim) \ 276 (name) = (type *)v; v = (caddr_t)((lim) = ((name)+(num))) 277 278 valloc(callout, struct callout, ncallout); 279 valloc(callwheel, struct callout_tailq, callwheelsize); 280 281 /* 282 * Discount the physical memory larger than the size of kernel_map 283 * to avoid eating up all of KVA space. 284 */ 285 if (kernel_map->first_free == NULL) { 286 printf("Warning: no free entries in kernel_map.\n"); 287 physmem_est = physmem; 288 } else 289 physmem_est = min(physmem, kernel_map->max_offset - kernel_map->min_offset); 290 291 /* 292 * The nominal buffer size (and minimum KVA allocation) is BKVASIZE. 293 * For the first 64MB of ram nominally allocate sufficient buffers to 294 * cover 1/4 of our ram. Beyond the first 64MB allocate additional 295 * buffers to cover 1/20 of our ram over 64MB. 296 * 297 * factor represents the 1/4 x ram conversion. 298 */ 299 if (nbuf == 0) { 300 int factor = 4 * BKVASIZE / PAGE_SIZE; 301 302 nbuf = 50; 303 if (physmem_est > 1024) 304 nbuf += min((physmem_est - 1024) / factor, 16384 / factor); 305 if (physmem_est > 16384) 306 nbuf += (physmem_est - 16384) * 2 / (factor * 5); 307 } 308 309 /* 310 * Do not allow the buffer_map to be more then 1/2 the size of the 311 * kernel_map. 312 */ 313 if (nbuf > (kernel_map->max_offset - kernel_map->min_offset) / 314 (BKVASIZE * 2)) { 315 nbuf = (kernel_map->max_offset - kernel_map->min_offset) / 316 (BKVASIZE * 2); 317 printf("Warning: nbufs capped at %d\n", nbuf); 318 } 319 320 nswbuf = max(min(nbuf/4, 256), 16); 321 322 valloc(swbuf, struct buf, nswbuf); 323 valloc(buf, struct buf, nbuf); 324 v = bufhashinit(v); 325 326 /* 327 * End of first pass, size has been calculated so allocate memory 328 */ 329 if (firstaddr == 0) { 330 size = (vm_size_t)(v - firstaddr); 331 firstaddr = (int)kmem_alloc(kernel_map, round_page(size)); 332 if (firstaddr == 0) 333 panic("startup: no room for tables"); 334 goto again; 335 } 336 337 /* 338 * End of second pass, addresses have been assigned 339 */ 340 if ((vm_size_t)(v - firstaddr) != size) 341 panic("startup: table size inconsistency"); 342 343 clean_map = kmem_suballoc(kernel_map, &clean_sva, &clean_eva, 344 (nbuf*BKVASIZE) + (nswbuf*MAXPHYS) + pager_map_size); 345 buffer_map = kmem_suballoc(clean_map, &buffer_sva, &buffer_eva, 346 (nbuf*BKVASIZE)); 347 buffer_map->system_map = 1; 348 pager_map = kmem_suballoc(clean_map, &pager_sva, &pager_eva, 349 (nswbuf*MAXPHYS) + pager_map_size); 350 pager_map->system_map = 1; 351 exec_map = kmem_suballoc(kernel_map, &minaddr, &maxaddr, 352 (16*(ARG_MAX+(PAGE_SIZE*3)))); 353 354 /* 355 * XXX: Mbuf system machine-specific initializations should 356 * go here, if anywhere. 357 */ 358 359 /* 360 * Initialize callouts 361 */ 362 SLIST_INIT(&callfree); 363 for (i = 0; i < ncallout; i++) { 364 callout_init(&callout[i], 0); 365 callout[i].c_flags = CALLOUT_LOCAL_ALLOC; 366 SLIST_INSERT_HEAD(&callfree, &callout[i], c_links.sle); 367 } 368 369 for (i = 0; i < callwheelsize; i++) { 370 TAILQ_INIT(&callwheel[i]); 371 } 372 373 mtx_init(&callout_lock, "callout", MTX_SPIN | MTX_RECURSE); 374 375#if defined(USERCONFIG) 376 userconfig(); 377 cninit(); /* the preferred console may have changed */ 378#endif 379 380 printf("avail memory = %u (%uK bytes)\n", ptoa(cnt.v_free_count), 381 ptoa(cnt.v_free_count) / 1024); 382 383 /* 384 * Set up buffers, so they can be used to read disk labels. 385 */ 386 bufinit(); 387 vm_pager_bufferinit(); 388 389 globaldata_register(GLOBALDATA); 390#ifndef SMP 391 /* For SMP, we delay the cpu_setregs() until after SMP startup. */ 392 cpu_setregs(); 393#endif 394} 395 396/* 397 * Send an interrupt to process. 398 * 399 * Stack is set up to allow sigcode stored 400 * at top to call routine, followed by kcall 401 * to sigreturn routine below. After sigreturn 402 * resets the signal mask, the stack, and the 403 * frame pointer, it returns to the user 404 * specified pc, psl. 405 */ 406static void 407osendsig(catcher, sig, mask, code) 408 sig_t catcher; 409 int sig; 410 sigset_t *mask; 411 u_long code; 412{ 413 struct osigframe sf; 414 struct osigframe *fp; 415 struct proc *p; 416 struct sigacts *psp; 417 struct trapframe *regs; 418 int oonstack; 419 420 p = curproc; 421 PROC_LOCK(p); 422 psp = p->p_sigacts; 423 regs = p->p_frame; 424 oonstack = sigonstack(regs->tf_esp); 425 426 /* Allocate and validate space for the signal handler context. */ 427 if ((p->p_flag & P_ALTSTACK) && !oonstack && 428 SIGISMEMBER(psp->ps_sigonstack, sig)) { 429 fp = (struct osigframe *)(p->p_sigstk.ss_sp + 430 p->p_sigstk.ss_size - sizeof(struct osigframe)); 431#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 432 p->p_sigstk.ss_flags |= SS_ONSTACK; 433#endif 434 } else 435 fp = (struct osigframe *)regs->tf_esp - 1; 436 PROC_UNLOCK(p); 437 438 /* 439 * grow_stack() will return 0 if *fp does not fit inside the stack 440 * and the stack can not be grown. 441 * useracc() will return FALSE if access is denied. 442 */ 443 if (grow_stack(p, (int)fp) == 0 || 444 !useracc((caddr_t)fp, sizeof(*fp), VM_PROT_WRITE)) { 445 /* 446 * Process has trashed its stack; give it an illegal 447 * instruction to halt it in its tracks. 448 */ 449 PROC_LOCK(p); 450 SIGACTION(p, SIGILL) = SIG_DFL; 451 SIGDELSET(p->p_sigignore, SIGILL); 452 SIGDELSET(p->p_sigcatch, SIGILL); 453 SIGDELSET(p->p_sigmask, SIGILL); 454 psignal(p, SIGILL); 455 PROC_UNLOCK(p); 456 return; 457 } 458 459 /* Translate the signal if appropriate. */ 460 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 461 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 462 463 /* Build the argument list for the signal handler. */ 464 sf.sf_signum = sig; 465 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc; 466 PROC_LOCK(p); 467 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 468 /* Signal handler installed with SA_SIGINFO. */ 469 sf.sf_arg2 = (register_t)&fp->sf_siginfo; 470 sf.sf_siginfo.si_signo = sig; 471 sf.sf_siginfo.si_code = code; 472 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher; 473 } else { 474 /* Old FreeBSD-style arguments. */ 475 sf.sf_arg2 = code; 476 sf.sf_addr = regs->tf_err; 477 sf.sf_ahu.sf_handler = catcher; 478 } 479 PROC_UNLOCK(p); 480 481 /* Save most if not all of trap frame. */ 482 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax; 483 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx; 484 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx; 485 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx; 486 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi; 487 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi; 488 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs; 489 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds; 490 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss; 491 sf.sf_siginfo.si_sc.sc_es = regs->tf_es; 492 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs; 493 sf.sf_siginfo.si_sc.sc_gs = rgs(); 494 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp; 495 496 /* Build the signal context to be used by osigreturn(). */ 497 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0; 498 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask); 499 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp; 500 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp; 501 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip; 502 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags; 503 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno; 504 sf.sf_siginfo.si_sc.sc_err = regs->tf_err; 505 506 /* 507 * If we're a vm86 process, we want to save the segment registers. 508 * We also change eflags to be our emulated eflags, not the actual 509 * eflags. 510 */ 511 if (regs->tf_eflags & PSL_VM) { 512 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */ 513 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 514 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 515 516 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs; 517 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs; 518 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es; 519 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds; 520 521 if (vm86->vm86_has_vme == 0) 522 sf.sf_siginfo.si_sc.sc_ps = 523 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 524 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 525 526 /* See sendsig() for comments. */ 527 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP); 528 } 529 530 /* Copy the sigframe out to the user's stack. */ 531 if (copyout(&sf, fp, sizeof(*fp)) != 0) { 532 /* 533 * Something is wrong with the stack pointer. 534 * ...Kill the process. 535 */ 536 PROC_LOCK(p); 537 sigexit(p, SIGILL); 538 /* NOTREACHED */ 539 } 540 541 regs->tf_esp = (int)fp; 542 regs->tf_eip = PS_STRINGS - szosigcode; 543 regs->tf_cs = _ucodesel; 544 regs->tf_ds = _udatasel; 545 regs->tf_es = _udatasel; 546 regs->tf_fs = _udatasel; 547 load_gs(_udatasel); 548 regs->tf_ss = _udatasel; 549} 550 551void 552sendsig(catcher, sig, mask, code) 553 sig_t catcher; 554 int sig; 555 sigset_t *mask; 556 u_long code; 557{ 558 struct sigframe sf; 559 struct proc *p; 560 struct sigacts *psp; 561 struct trapframe *regs; 562 struct sigframe *sfp; 563 int oonstack; 564 565 p = curproc; 566 PROC_LOCK(p); 567 psp = p->p_sigacts; 568 if (SIGISMEMBER(psp->ps_osigset, sig)) { 569 PROC_UNLOCK(p); 570 osendsig(catcher, sig, mask, code); 571 return; 572 } 573 regs = p->p_frame; 574 oonstack = sigonstack(regs->tf_esp); 575 576 /* Save user context. */ 577 bzero(&sf, sizeof(sf)); 578 sf.sf_uc.uc_sigmask = *mask; 579 sf.sf_uc.uc_stack = p->p_sigstk; 580 sf.sf_uc.uc_stack.ss_flags = (p->p_flag & P_ALTSTACK) 581 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 582 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 583 sf.sf_uc.uc_mcontext.mc_gs = rgs(); 584 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs)); 585 586 /* Allocate and validate space for the signal handler context. */ 587 if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack && 588 SIGISMEMBER(psp->ps_sigonstack, sig)) { 589 sfp = (struct sigframe *)(p->p_sigstk.ss_sp + 590 p->p_sigstk.ss_size - sizeof(struct sigframe)); 591#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 592 p->p_sigstk.ss_flags |= SS_ONSTACK; 593#endif 594 } else 595 sfp = (struct sigframe *)regs->tf_esp - 1; 596 PROC_UNLOCK(p); 597 598 /* 599 * grow_stack() will return 0 if *sfp does not fit inside the stack 600 * and the stack can not be grown. 601 * useracc() will return FALSE if access is denied. 602 */ 603 if (grow_stack(p, (int)sfp) == 0 || 604 !useracc((caddr_t)sfp, sizeof(*sfp), VM_PROT_WRITE)) { 605 /* 606 * Process has trashed its stack; give it an illegal 607 * instruction to halt it in its tracks. 608 */ 609#ifdef DEBUG 610 printf("process %d has trashed its stack\n", p->p_pid); 611#endif 612 PROC_LOCK(p); 613 SIGACTION(p, SIGILL) = SIG_DFL; 614 SIGDELSET(p->p_sigignore, SIGILL); 615 SIGDELSET(p->p_sigcatch, SIGILL); 616 SIGDELSET(p->p_sigmask, SIGILL); 617 psignal(p, SIGILL); 618 PROC_UNLOCK(p); 619 return; 620 } 621 622 /* Translate the signal if appropriate. */ 623 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 624 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 625 626 /* Build the argument list for the signal handler. */ 627 sf.sf_signum = sig; 628 sf.sf_ucontext = (register_t)&sfp->sf_uc; 629 PROC_LOCK(p); 630 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 631 /* Signal handler installed with SA_SIGINFO. */ 632 sf.sf_siginfo = (register_t)&sfp->sf_si; 633 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 634 635 /* Fill siginfo structure. */ 636 sf.sf_si.si_signo = sig; 637 sf.sf_si.si_code = code; 638 sf.sf_si.si_addr = (void *)regs->tf_err; 639 } else { 640 /* Old FreeBSD-style arguments. */ 641 sf.sf_siginfo = code; 642 sf.sf_addr = regs->tf_err; 643 sf.sf_ahu.sf_handler = catcher; 644 } 645 PROC_UNLOCK(p); 646 647 /* 648 * If we're a vm86 process, we want to save the segment registers. 649 * We also change eflags to be our emulated eflags, not the actual 650 * eflags. 651 */ 652 if (regs->tf_eflags & PSL_VM) { 653 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 654 struct vm86_kernel *vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 655 656 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 657 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 658 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 659 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 660 661 if (vm86->vm86_has_vme == 0) 662 sf.sf_uc.uc_mcontext.mc_eflags = 663 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 664 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 665 666 /* 667 * We should never have PSL_T set when returning from vm86 668 * mode. It may be set here if we deliver a signal before 669 * getting to vm86 mode, so turn it off. 670 * 671 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 672 * syscalls made by the signal handler. This just avoids 673 * wasting time for our lazy fixup of such faults. PSL_NT 674 * does nothing in vm86 mode, but vm86 programs can set it 675 * almost legitimately in probes for old cpu types. 676 */ 677 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_T | PSL_VIF | PSL_VIP); 678 } 679 680 /* Copy the sigframe out to the user's stack. */ 681 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { 682 /* 683 * Something is wrong with the stack pointer. 684 * ...Kill the process. 685 */ 686 PROC_LOCK(p); 687 sigexit(p, SIGILL); 688 /* NOTREACHED */ 689 } 690 691 regs->tf_esp = (int)sfp; 692 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 693 regs->tf_cs = _ucodesel; 694 regs->tf_ds = _udatasel; 695 regs->tf_es = _udatasel; 696 regs->tf_fs = _udatasel; 697 regs->tf_ss = _udatasel; 698} 699 700/* 701 * System call to cleanup state after a signal 702 * has been taken. Reset signal mask and 703 * stack state from context left by sendsig (above). 704 * Return to previous pc and psl as specified by 705 * context left by sendsig. Check carefully to 706 * make sure that the user has not modified the 707 * state to gain improper privileges. 708 */ 709int 710osigreturn(p, uap) 711 struct proc *p; 712 struct osigreturn_args /* { 713 struct osigcontext *sigcntxp; 714 } */ *uap; 715{ 716 struct trapframe *regs; 717 struct osigcontext *scp; 718 int eflags; 719 720 regs = p->p_frame; 721 scp = uap->sigcntxp; 722 if (!useracc((caddr_t)scp, sizeof(*scp), VM_PROT_READ)) 723 return (EFAULT); 724 eflags = scp->sc_ps; 725 if (eflags & PSL_VM) { 726 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 727 struct vm86_kernel *vm86; 728 729 /* 730 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 731 * set up the vm86 area, and we can't enter vm86 mode. 732 */ 733 if (p->p_addr->u_pcb.pcb_ext == 0) 734 return (EINVAL); 735 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 736 if (vm86->vm86_inited == 0) 737 return (EINVAL); 738 739 /* Go back to user mode if both flags are set. */ 740 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 741 trapsignal(p, SIGBUS, 0); 742 743 if (vm86->vm86_has_vme) { 744 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 745 (eflags & VME_USERCHANGE) | PSL_VM; 746 } else { 747 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 748 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 749 } 750 tf->tf_vm86_ds = scp->sc_ds; 751 tf->tf_vm86_es = scp->sc_es; 752 tf->tf_vm86_fs = scp->sc_fs; 753 tf->tf_vm86_gs = scp->sc_gs; 754 tf->tf_ds = _udatasel; 755 tf->tf_es = _udatasel; 756 tf->tf_fs = _udatasel; 757 } else { 758 /* 759 * Don't allow users to change privileged or reserved flags. 760 */ 761 /* 762 * XXX do allow users to change the privileged flag PSL_RF. 763 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 764 * should sometimes set it there too. tf_eflags is kept in 765 * the signal context during signal handling and there is no 766 * other place to remember it, so the PSL_RF bit may be 767 * corrupted by the signal handler without us knowing. 768 * Corruption of the PSL_RF bit at worst causes one more or 769 * one less debugger trap, so allowing it is fairly harmless. 770 */ 771 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 772 return (EINVAL); 773 } 774 775 /* 776 * Don't allow users to load a valid privileged %cs. Let the 777 * hardware check for invalid selectors, excess privilege in 778 * other selectors, invalid %eip's and invalid %esp's. 779 */ 780 if (!CS_SECURE(scp->sc_cs)) { 781 trapsignal(p, SIGBUS, T_PROTFLT); 782 return (EINVAL); 783 } 784 regs->tf_ds = scp->sc_ds; 785 regs->tf_es = scp->sc_es; 786 regs->tf_fs = scp->sc_fs; 787 } 788 789 /* Restore remaining registers. */ 790 regs->tf_eax = scp->sc_eax; 791 regs->tf_ebx = scp->sc_ebx; 792 regs->tf_ecx = scp->sc_ecx; 793 regs->tf_edx = scp->sc_edx; 794 regs->tf_esi = scp->sc_esi; 795 regs->tf_edi = scp->sc_edi; 796 regs->tf_cs = scp->sc_cs; 797 regs->tf_ss = scp->sc_ss; 798 regs->tf_isp = scp->sc_isp; 799 800 PROC_LOCK(p); 801#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 802 if (scp->sc_onstack & 1) 803 p->p_sigstk.ss_flags |= SS_ONSTACK; 804 else 805 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 806#endif 807 808 SIGSETOLD(p->p_sigmask, scp->sc_mask); 809 SIG_CANTMASK(p->p_sigmask); 810 PROC_UNLOCK(p); 811 regs->tf_ebp = scp->sc_fp; 812 regs->tf_esp = scp->sc_sp; 813 regs->tf_eip = scp->sc_pc; 814 regs->tf_eflags = eflags; 815 return (EJUSTRETURN); 816} 817 818int 819sigreturn(p, uap) 820 struct proc *p; 821 struct sigreturn_args /* { 822 ucontext_t *sigcntxp; 823 } */ *uap; 824{ 825 struct trapframe *regs; 826 ucontext_t *ucp; 827 int cs, eflags; 828 829 ucp = uap->sigcntxp; 830 if (!useracc((caddr_t)ucp, sizeof(struct osigcontext), VM_PROT_READ)) 831 return (EFAULT); 832 if (((struct osigcontext *)ucp)->sc_trapno == 0x01d516) 833 return (osigreturn(p, (struct osigreturn_args *)uap)); 834 835 /* 836 * Since ucp is not an osigcontext but a ucontext_t, we have to 837 * check again if all of it is accessible. A ucontext_t is 838 * much larger, so instead of just checking for the pointer 839 * being valid for the size of an osigcontext, now check for 840 * it being valid for a whole, new-style ucontext_t. 841 */ 842 if (!useracc((caddr_t)ucp, sizeof(*ucp), VM_PROT_READ)) 843 return (EFAULT); 844 845 regs = p->p_frame; 846 eflags = ucp->uc_mcontext.mc_eflags; 847 if (eflags & PSL_VM) { 848 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 849 struct vm86_kernel *vm86; 850 851 /* 852 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 853 * set up the vm86 area, and we can't enter vm86 mode. 854 */ 855 if (p->p_addr->u_pcb.pcb_ext == 0) 856 return (EINVAL); 857 vm86 = &p->p_addr->u_pcb.pcb_ext->ext_vm86; 858 if (vm86->vm86_inited == 0) 859 return (EINVAL); 860 861 /* Go back to user mode if both flags are set. */ 862 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 863 trapsignal(p, SIGBUS, 0); 864 865 if (vm86->vm86_has_vme) { 866 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 867 (eflags & VME_USERCHANGE) | PSL_VM; 868 } else { 869 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 870 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | (eflags & VM_USERCHANGE) | PSL_VM; 871 } 872 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe)); 873 tf->tf_eflags = eflags; 874 tf->tf_vm86_ds = tf->tf_ds; 875 tf->tf_vm86_es = tf->tf_es; 876 tf->tf_vm86_fs = tf->tf_fs; 877 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs; 878 tf->tf_ds = _udatasel; 879 tf->tf_es = _udatasel; 880 tf->tf_fs = _udatasel; 881 } else { 882 /* 883 * Don't allow users to change privileged or reserved flags. 884 */ 885 /* 886 * XXX do allow users to change the privileged flag PSL_RF. 887 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 888 * should sometimes set it there too. tf_eflags is kept in 889 * the signal context during signal handling and there is no 890 * other place to remember it, so the PSL_RF bit may be 891 * corrupted by the signal handler without us knowing. 892 * Corruption of the PSL_RF bit at worst causes one more or 893 * one less debugger trap, so allowing it is fairly harmless. 894 */ 895 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 896 printf("sigreturn: eflags = 0x%x\n", eflags); 897 return (EINVAL); 898 } 899 900 /* 901 * Don't allow users to load a valid privileged %cs. Let the 902 * hardware check for invalid selectors, excess privilege in 903 * other selectors, invalid %eip's and invalid %esp's. 904 */ 905 cs = ucp->uc_mcontext.mc_cs; 906 if (!CS_SECURE(cs)) { 907 printf("sigreturn: cs = 0x%x\n", cs); 908 trapsignal(p, SIGBUS, T_PROTFLT); 909 return (EINVAL); 910 } 911 912 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs)); 913 } 914 915 PROC_LOCK(p); 916#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 917 if (ucp->uc_mcontext.mc_onstack & 1) 918 p->p_sigstk.ss_flags |= SS_ONSTACK; 919 else 920 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 921#endif 922 923 p->p_sigmask = ucp->uc_sigmask; 924 SIG_CANTMASK(p->p_sigmask); 925 PROC_UNLOCK(p); 926 return (EJUSTRETURN); 927} 928 929/* 930 * Machine dependent boot() routine 931 * 932 * I haven't seen anything to put here yet 933 * Possibly some stuff might be grafted back here from boot() 934 */ 935void 936cpu_boot(int howto) 937{ 938} 939 940/* 941 * Shutdown the CPU as much as possible 942 */ 943void 944cpu_halt(void) 945{ 946 for (;;) 947 __asm__ ("hlt"); 948} 949 950/* 951 * Hook to idle the CPU when possible. This currently only works in 952 * the !SMP case, as there is no clean way to ensure that a CPU will be 953 * woken when there is work available for it. 954 */ 955static int cpu_idle_hlt = 1; 956SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, 957 &cpu_idle_hlt, 0, "Idle loop HLT enable"); 958 959/* 960 * Note that we have to be careful here to avoid a race between checking 961 * procrunnable() and actually halting. If we don't do this, we may waste 962 * the time between calling hlt and the next interrupt even though there 963 * is a runnable process. 964 */ 965void 966cpu_idle(void) 967{ 968#ifndef SMP 969 if (cpu_idle_hlt) { 970 disable_intr(); 971 if (procrunnable()) 972 enable_intr(); 973 else { 974 enable_intr(); 975 __asm __volatile("hlt"); 976 } 977 } 978#endif 979} 980 981/* 982 * Clear registers on exec 983 */ 984void 985setregs(p, entry, stack, ps_strings) 986 struct proc *p; 987 u_long entry; 988 u_long stack; 989 u_long ps_strings; 990{ 991 struct trapframe *regs = p->p_frame; 992 struct pcb *pcb = &p->p_addr->u_pcb; 993 994 if (pcb->pcb_ldt) 995 user_ldt_free(pcb); 996 997 bzero((char *)regs, sizeof(struct trapframe)); 998 regs->tf_eip = entry; 999 regs->tf_esp = stack; 1000 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T); 1001 regs->tf_ss = _udatasel; 1002 regs->tf_ds = _udatasel; 1003 regs->tf_es = _udatasel; 1004 regs->tf_fs = _udatasel; 1005 regs->tf_cs = _ucodesel; 1006 1007 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */ 1008 regs->tf_ebx = ps_strings; 1009 1010 /* reset %gs as well */ 1011 if (pcb == PCPU_GET(curpcb)) 1012 load_gs(_udatasel); 1013 else 1014 pcb->pcb_gs = _udatasel; 1015 1016 /* 1017 * Reset the hardware debug registers if they were in use. 1018 * They won't have any meaning for the newly exec'd process. 1019 */ 1020 if (pcb->pcb_flags & PCB_DBREGS) { 1021 pcb->pcb_dr0 = 0; 1022 pcb->pcb_dr1 = 0; 1023 pcb->pcb_dr2 = 0; 1024 pcb->pcb_dr3 = 0; 1025 pcb->pcb_dr6 = 0; 1026 pcb->pcb_dr7 = 0; 1027 if (pcb == PCPU_GET(curpcb)) { 1028 /* 1029 * Clear the debug registers on the running 1030 * CPU, otherwise they will end up affecting 1031 * the next process we switch to. 1032 */ 1033 reset_dbregs(); 1034 } 1035 pcb->pcb_flags &= ~PCB_DBREGS; 1036 } 1037 1038 /* 1039 * Initialize the math emulator (if any) for the current process. 1040 * Actually, just clear the bit that says that the emulator has 1041 * been initialized. Initialization is delayed until the process 1042 * traps to the emulator (if it is done at all) mainly because 1043 * emulators don't provide an entry point for initialization. 1044 */ 1045 p->p_addr->u_pcb.pcb_flags &= ~FP_SOFTFP; 1046 1047 /* 1048 * Arrange to trap the next npx or `fwait' instruction (see npx.c 1049 * for why fwait must be trapped at least if there is an npx or an 1050 * emulator). This is mainly to handle the case where npx0 is not 1051 * configured, since the npx routines normally set up the trap 1052 * otherwise. It should be done only at boot time, but doing it 1053 * here allows modifying `npx_exists' for testing the emulator on 1054 * systems with an npx. 1055 */ 1056 load_cr0(rcr0() | CR0_MP | CR0_TS); 1057 1058#ifdef DEV_NPX 1059 /* Initialize the npx (if any) for the current process. */ 1060 npxinit(__INITIAL_NPXCW__); 1061#endif 1062 1063 /* 1064 * XXX - Linux emulator 1065 * Make sure sure edx is 0x0 on entry. Linux binaries depend 1066 * on it. 1067 */ 1068 p->p_retval[1] = 0; 1069} 1070 1071void 1072cpu_setregs(void) 1073{ 1074 unsigned int cr0; 1075 1076 cr0 = rcr0(); 1077 cr0 |= CR0_NE; /* Done by npxinit() */ 1078 cr0 |= CR0_MP | CR0_TS; /* Done at every execve() too. */ 1079#ifndef I386_CPU 1080 cr0 |= CR0_WP | CR0_AM; 1081#endif 1082 load_cr0(cr0); 1083 load_gs(_udatasel); 1084} 1085 1086static int 1087sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS) 1088{ 1089 int error; 1090 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 1091 req); 1092 if (!error && req->newptr) 1093 resettodr(); 1094 return (error); 1095} 1096 1097SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 1098 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 1099 1100SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, 1101 CTLFLAG_RW, &disable_rtc_set, 0, ""); 1102 1103SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo, 1104 CTLFLAG_RD, &bootinfo, bootinfo, ""); 1105 1106SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, 1107 CTLFLAG_RW, &wall_cmos_clock, 0, ""); 1108 1109/* 1110 * Initialize 386 and configure to run kernel 1111 */ 1112 1113/* 1114 * Initialize segments & interrupt table 1115 */ 1116 1117int _default_ldt; 1118union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */ 1119static struct gate_descriptor idt0[NIDT]; 1120struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 1121union descriptor ldt[NLDT]; /* local descriptor table */ 1122#ifdef SMP 1123/* table descriptors - used to load tables by microp */ 1124struct region_descriptor r_gdt, r_idt; 1125#endif 1126 1127int private_tss; /* flag indicating private tss */ 1128 1129#if defined(I586_CPU) && !defined(NO_F00F_HACK) 1130extern int has_f00f_bug; 1131#endif 1132 1133static struct i386tss dblfault_tss; 1134static char dblfault_stack[PAGE_SIZE]; 1135 1136extern struct user *proc0paddr; 1137 1138 1139/* software prototypes -- in more palatable form */ 1140struct soft_segment_descriptor gdt_segs[] = { 1141/* GNULL_SEL 0 Null Descriptor */ 1142{ 0x0, /* segment base address */ 1143 0x0, /* length */ 1144 0, /* segment type */ 1145 0, /* segment descriptor priority level */ 1146 0, /* segment descriptor present */ 1147 0, 0, 1148 0, /* default 32 vs 16 bit size */ 1149 0 /* limit granularity (byte/page units)*/ }, 1150/* GCODE_SEL 1 Code Descriptor for kernel */ 1151{ 0x0, /* segment base address */ 1152 0xfffff, /* length - all address space */ 1153 SDT_MEMERA, /* segment type */ 1154 0, /* segment descriptor priority level */ 1155 1, /* segment descriptor present */ 1156 0, 0, 1157 1, /* default 32 vs 16 bit size */ 1158 1 /* limit granularity (byte/page units)*/ }, 1159/* GDATA_SEL 2 Data Descriptor for kernel */ 1160{ 0x0, /* segment base address */ 1161 0xfffff, /* length - all address space */ 1162 SDT_MEMRWA, /* segment type */ 1163 0, /* segment descriptor priority level */ 1164 1, /* segment descriptor present */ 1165 0, 0, 1166 1, /* default 32 vs 16 bit size */ 1167 1 /* limit granularity (byte/page units)*/ }, 1168/* GPRIV_SEL 3 SMP Per-Processor Private Data Descriptor */ 1169{ 0x0, /* segment base address */ 1170 0xfffff, /* length - all address space */ 1171 SDT_MEMRWA, /* segment type */ 1172 0, /* segment descriptor priority level */ 1173 1, /* segment descriptor present */ 1174 0, 0, 1175 1, /* default 32 vs 16 bit size */ 1176 1 /* limit granularity (byte/page units)*/ }, 1177/* GPROC0_SEL 4 Proc 0 Tss Descriptor */ 1178{ 1179 0x0, /* segment base address */ 1180 sizeof(struct i386tss)-1,/* length - all address space */ 1181 SDT_SYS386TSS, /* segment type */ 1182 0, /* segment descriptor priority level */ 1183 1, /* segment descriptor present */ 1184 0, 0, 1185 0, /* unused - default 32 vs 16 bit size */ 1186 0 /* limit granularity (byte/page units)*/ }, 1187/* GLDT_SEL 5 LDT Descriptor */ 1188{ (int) ldt, /* segment base address */ 1189 sizeof(ldt)-1, /* length - all address space */ 1190 SDT_SYSLDT, /* segment type */ 1191 SEL_UPL, /* segment descriptor priority level */ 1192 1, /* segment descriptor present */ 1193 0, 0, 1194 0, /* unused - default 32 vs 16 bit size */ 1195 0 /* limit granularity (byte/page units)*/ }, 1196/* GUSERLDT_SEL 6 User LDT Descriptor per process */ 1197{ (int) ldt, /* segment base address */ 1198 (512 * sizeof(union descriptor)-1), /* length */ 1199 SDT_SYSLDT, /* segment type */ 1200 0, /* segment descriptor priority level */ 1201 1, /* segment descriptor present */ 1202 0, 0, 1203 0, /* unused - default 32 vs 16 bit size */ 1204 0 /* limit granularity (byte/page units)*/ }, 1205/* GTGATE_SEL 7 Null Descriptor - Placeholder */ 1206{ 0x0, /* segment base address */ 1207 0x0, /* length - all address space */ 1208 0, /* segment type */ 1209 0, /* segment descriptor priority level */ 1210 0, /* segment descriptor present */ 1211 0, 0, 1212 0, /* default 32 vs 16 bit size */ 1213 0 /* limit granularity (byte/page units)*/ }, 1214/* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */ 1215{ 0x400, /* segment base address */ 1216 0xfffff, /* length */ 1217 SDT_MEMRWA, /* segment type */ 1218 0, /* segment descriptor priority level */ 1219 1, /* segment descriptor present */ 1220 0, 0, 1221 1, /* default 32 vs 16 bit size */ 1222 1 /* limit granularity (byte/page units)*/ }, 1223/* GPANIC_SEL 9 Panic Tss Descriptor */ 1224{ (int) &dblfault_tss, /* segment base address */ 1225 sizeof(struct i386tss)-1,/* length - all address space */ 1226 SDT_SYS386TSS, /* segment type */ 1227 0, /* segment descriptor priority level */ 1228 1, /* segment descriptor present */ 1229 0, 0, 1230 0, /* unused - default 32 vs 16 bit size */ 1231 0 /* limit granularity (byte/page units)*/ }, 1232/* GBIOSCODE32_SEL 10 BIOS 32-bit interface (32bit Code) */ 1233{ 0, /* segment base address (overwritten) */ 1234 0xfffff, /* length */ 1235 SDT_MEMERA, /* segment type */ 1236 0, /* segment descriptor priority level */ 1237 1, /* segment descriptor present */ 1238 0, 0, 1239 0, /* default 32 vs 16 bit size */ 1240 1 /* limit granularity (byte/page units)*/ }, 1241/* GBIOSCODE16_SEL 11 BIOS 32-bit interface (16bit Code) */ 1242{ 0, /* segment base address (overwritten) */ 1243 0xfffff, /* length */ 1244 SDT_MEMERA, /* segment type */ 1245 0, /* segment descriptor priority level */ 1246 1, /* segment descriptor present */ 1247 0, 0, 1248 0, /* default 32 vs 16 bit size */ 1249 1 /* limit granularity (byte/page units)*/ }, 1250/* GBIOSDATA_SEL 12 BIOS 32-bit interface (Data) */ 1251{ 0, /* segment base address (overwritten) */ 1252 0xfffff, /* length */ 1253 SDT_MEMRWA, /* segment type */ 1254 0, /* segment descriptor priority level */ 1255 1, /* segment descriptor present */ 1256 0, 0, 1257 1, /* default 32 vs 16 bit size */ 1258 1 /* limit granularity (byte/page units)*/ }, 1259/* GBIOSUTIL_SEL 13 BIOS 16-bit interface (Utility) */ 1260{ 0, /* segment base address (overwritten) */ 1261 0xfffff, /* length */ 1262 SDT_MEMRWA, /* segment type */ 1263 0, /* segment descriptor priority level */ 1264 1, /* segment descriptor present */ 1265 0, 0, 1266 0, /* default 32 vs 16 bit size */ 1267 1 /* limit granularity (byte/page units)*/ }, 1268/* GBIOSARGS_SEL 14 BIOS 16-bit interface (Arguments) */ 1269{ 0, /* segment base address (overwritten) */ 1270 0xfffff, /* length */ 1271 SDT_MEMRWA, /* segment type */ 1272 0, /* segment descriptor priority level */ 1273 1, /* segment descriptor present */ 1274 0, 0, 1275 0, /* default 32 vs 16 bit size */ 1276 1 /* limit granularity (byte/page units)*/ }, 1277}; 1278 1279static struct soft_segment_descriptor ldt_segs[] = { 1280 /* Null Descriptor - overwritten by call gate */ 1281{ 0x0, /* segment base address */ 1282 0x0, /* length - all address space */ 1283 0, /* segment type */ 1284 0, /* segment descriptor priority level */ 1285 0, /* segment descriptor present */ 1286 0, 0, 1287 0, /* default 32 vs 16 bit size */ 1288 0 /* limit granularity (byte/page units)*/ }, 1289 /* Null Descriptor - overwritten by call gate */ 1290{ 0x0, /* segment base address */ 1291 0x0, /* length - all address space */ 1292 0, /* segment type */ 1293 0, /* segment descriptor priority level */ 1294 0, /* segment descriptor present */ 1295 0, 0, 1296 0, /* default 32 vs 16 bit size */ 1297 0 /* limit granularity (byte/page units)*/ }, 1298 /* Null Descriptor - overwritten by call gate */ 1299{ 0x0, /* segment base address */ 1300 0x0, /* length - all address space */ 1301 0, /* segment type */ 1302 0, /* segment descriptor priority level */ 1303 0, /* segment descriptor present */ 1304 0, 0, 1305 0, /* default 32 vs 16 bit size */ 1306 0 /* limit granularity (byte/page units)*/ }, 1307 /* Code Descriptor for user */ 1308{ 0x0, /* segment base address */ 1309 0xfffff, /* length - all address space */ 1310 SDT_MEMERA, /* segment type */ 1311 SEL_UPL, /* segment descriptor priority level */ 1312 1, /* segment descriptor present */ 1313 0, 0, 1314 1, /* default 32 vs 16 bit size */ 1315 1 /* limit granularity (byte/page units)*/ }, 1316 /* Null Descriptor - overwritten by call gate */ 1317{ 0x0, /* segment base address */ 1318 0x0, /* length - all address space */ 1319 0, /* segment type */ 1320 0, /* segment descriptor priority level */ 1321 0, /* segment descriptor present */ 1322 0, 0, 1323 0, /* default 32 vs 16 bit size */ 1324 0 /* limit granularity (byte/page units)*/ }, 1325 /* Data Descriptor for user */ 1326{ 0x0, /* segment base address */ 1327 0xfffff, /* length - all address space */ 1328 SDT_MEMRWA, /* segment type */ 1329 SEL_UPL, /* segment descriptor priority level */ 1330 1, /* segment descriptor present */ 1331 0, 0, 1332 1, /* default 32 vs 16 bit size */ 1333 1 /* limit granularity (byte/page units)*/ }, 1334}; 1335 1336void 1337setidt(idx, func, typ, dpl, selec) 1338 int idx; 1339 inthand_t *func; 1340 int typ; 1341 int dpl; 1342 int selec; 1343{ 1344 struct gate_descriptor *ip; 1345 1346 ip = idt + idx; 1347 ip->gd_looffset = (int)func; 1348 ip->gd_selector = selec; 1349 ip->gd_stkcpy = 0; 1350 ip->gd_xx = 0; 1351 ip->gd_type = typ; 1352 ip->gd_dpl = dpl; 1353 ip->gd_p = 1; 1354 ip->gd_hioffset = ((int)func)>>16 ; 1355} 1356 1357#define IDTVEC(name) __CONCAT(X,name) 1358 1359extern inthand_t 1360 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 1361 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 1362 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 1363 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 1364 IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall); 1365 1366void 1367sdtossd(sd, ssd) 1368 struct segment_descriptor *sd; 1369 struct soft_segment_descriptor *ssd; 1370{ 1371 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 1372 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 1373 ssd->ssd_type = sd->sd_type; 1374 ssd->ssd_dpl = sd->sd_dpl; 1375 ssd->ssd_p = sd->sd_p; 1376 ssd->ssd_def32 = sd->sd_def32; 1377 ssd->ssd_gran = sd->sd_gran; 1378} 1379 1380#define PHYSMAP_SIZE (2 * 8) 1381 1382/* 1383 * Populate the (physmap) array with base/bound pairs describing the 1384 * available physical memory in the system, then test this memory and 1385 * build the phys_avail array describing the actually-available memory. 1386 * 1387 * If we cannot accurately determine the physical memory map, then use 1388 * value from the 0xE801 call, and failing that, the RTC. 1389 * 1390 * Total memory size may be set by the kernel environment variable 1391 * hw.physmem or the compile-time define MAXMEM. 1392 */ 1393static void 1394getmemsize(int first) 1395{ 1396 int i, physmap_idx, pa_indx; 1397 u_int basemem, extmem; 1398 struct vm86frame vmf; 1399 struct vm86context vmc; 1400 vm_offset_t pa, physmap[PHYSMAP_SIZE]; 1401 pt_entry_t pte; 1402 const char *cp; 1403 struct bios_smap *smap; 1404 1405 bzero(&vmf, sizeof(struct vm86frame)); 1406 bzero(physmap, sizeof(physmap)); 1407 1408 /* 1409 * Perform "base memory" related probes & setup 1410 */ 1411 vm86_intcall(0x12, &vmf); 1412 basemem = vmf.vmf_ax; 1413 if (basemem > 640) { 1414 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", 1415 basemem); 1416 basemem = 640; 1417 } 1418 1419 /* 1420 * XXX if biosbasemem is now < 640, there is a `hole' 1421 * between the end of base memory and the start of 1422 * ISA memory. The hole may be empty or it may 1423 * contain BIOS code or data. Map it read/write so 1424 * that the BIOS can write to it. (Memory from 0 to 1425 * the physical end of the kernel is mapped read-only 1426 * to begin with and then parts of it are remapped. 1427 * The parts that aren't remapped form holes that 1428 * remain read-only and are unused by the kernel. 1429 * The base memory area is below the physical end of 1430 * the kernel and right now forms a read-only hole. 1431 * The part of it from PAGE_SIZE to 1432 * (trunc_page(biosbasemem * 1024) - 1) will be 1433 * remapped and used by the kernel later.) 1434 * 1435 * This code is similar to the code used in 1436 * pmap_mapdev, but since no memory needs to be 1437 * allocated we simply change the mapping. 1438 */ 1439 for (pa = trunc_page(basemem * 1024); 1440 pa < ISA_HOLE_START; pa += PAGE_SIZE) { 1441 pte = (pt_entry_t)vtopte(pa + KERNBASE); 1442 *pte = pa | PG_RW | PG_V; 1443 } 1444 1445 /* 1446 * if basemem != 640, map pages r/w into vm86 page table so 1447 * that the bios can scribble on it. 1448 */ 1449 pte = (pt_entry_t)vm86paddr; 1450 for (i = basemem / 4; i < 160; i++) 1451 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; 1452 1453 /* 1454 * map page 1 R/W into the kernel page table so we can use it 1455 * as a buffer. The kernel will unmap this page later. 1456 */ 1457 pte = (pt_entry_t)vtopte(KERNBASE + (1 << PAGE_SHIFT)); 1458 *pte = (1 << PAGE_SHIFT) | PG_RW | PG_V; 1459 1460 /* 1461 * get memory map with INT 15:E820 1462 */ 1463 vmc.npages = 0; 1464 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT)); 1465 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di); 1466 1467 physmap_idx = 0; 1468 vmf.vmf_ebx = 0; 1469 do { 1470 vmf.vmf_eax = 0xE820; 1471 vmf.vmf_edx = SMAP_SIG; 1472 vmf.vmf_ecx = sizeof(struct bios_smap); 1473 i = vm86_datacall(0x15, &vmf, &vmc); 1474 if (i || vmf.vmf_eax != SMAP_SIG) 1475 break; 1476 if (boothowto & RB_VERBOSE) 1477 printf("SMAP type=%02x base=%08x %08x len=%08x %08x\n", 1478 smap->type, 1479 *(u_int32_t *)((char *)&smap->base + 4), 1480 (u_int32_t)smap->base, 1481 *(u_int32_t *)((char *)&smap->length + 4), 1482 (u_int32_t)smap->length); 1483 1484 if (smap->type != 0x01) 1485 goto next_run; 1486 1487 if (smap->length == 0) 1488 goto next_run; 1489 1490 if (smap->base >= 0xffffffff) { 1491 printf("%uK of memory above 4GB ignored\n", 1492 (u_int)(smap->length / 1024)); 1493 goto next_run; 1494 } 1495 1496 for (i = 0; i <= physmap_idx; i += 2) { 1497 if (smap->base < physmap[i + 1]) { 1498 if (boothowto & RB_VERBOSE) 1499 printf( 1500 "Overlapping or non-montonic memory region, ignoring second region\n"); 1501 goto next_run; 1502 } 1503 } 1504 1505 if (smap->base == physmap[physmap_idx + 1]) { 1506 physmap[physmap_idx + 1] += smap->length; 1507 goto next_run; 1508 } 1509 1510 physmap_idx += 2; 1511 if (physmap_idx == PHYSMAP_SIZE) { 1512 printf( 1513 "Too many segments in the physical address map, giving up\n"); 1514 break; 1515 } 1516 physmap[physmap_idx] = smap->base; 1517 physmap[physmap_idx + 1] = smap->base + smap->length; 1518next_run: 1519 } while (vmf.vmf_ebx != 0); 1520 1521 if (physmap[1] != 0) 1522 goto physmap_done; 1523 1524 /* 1525 * If we failed above, try memory map with INT 15:E801 1526 */ 1527 vmf.vmf_ax = 0xE801; 1528 if (vm86_intcall(0x15, &vmf) == 0) { 1529 extmem = vmf.vmf_cx + vmf.vmf_dx * 64; 1530 } else { 1531#if 0 1532 vmf.vmf_ah = 0x88; 1533 vm86_intcall(0x15, &vmf); 1534 extmem = vmf.vmf_ax; 1535#else 1536 /* 1537 * Prefer the RTC value for extended memory. 1538 */ 1539 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8); 1540#endif 1541 } 1542 1543 /* 1544 * Special hack for chipsets that still remap the 384k hole when 1545 * there's 16MB of memory - this really confuses people that 1546 * are trying to use bus mastering ISA controllers with the 1547 * "16MB limit"; they only have 16MB, but the remapping puts 1548 * them beyond the limit. 1549 * 1550 * If extended memory is between 15-16MB (16-17MB phys address range), 1551 * chop it to 15MB. 1552 */ 1553 if ((extmem > 15 * 1024) && (extmem < 16 * 1024)) 1554 extmem = 15 * 1024; 1555 1556 physmap[0] = 0; 1557 physmap[1] = basemem * 1024; 1558 physmap_idx = 2; 1559 physmap[physmap_idx] = 0x100000; 1560 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024; 1561 1562physmap_done: 1563 /* 1564 * Now, physmap contains a map of physical memory. 1565 */ 1566 1567#ifdef SMP 1568 /* make hole for AP bootstrap code */ 1569 physmap[1] = mp_bootaddress(physmap[1] / 1024); 1570 1571 /* look for the MP hardware - needed for apic addresses */ 1572 i386_mp_probe(); 1573#endif 1574 1575 /* 1576 * Maxmem isn't the "maximum memory", it's one larger than the 1577 * highest page of the physical address space. It should be 1578 * called something like "Maxphyspage". We may adjust this 1579 * based on ``hw.physmem'' and the results of the memory test. 1580 */ 1581 Maxmem = atop(physmap[physmap_idx + 1]); 1582 1583#ifdef MAXMEM 1584 Maxmem = MAXMEM / 4; 1585#endif 1586 1587 /* 1588 * hw.maxmem is a size in bytes; we also allow k, m, and g suffixes 1589 * for the appropriate modifiers. This overrides MAXMEM. 1590 */ 1591 if ((cp = getenv("hw.physmem")) != NULL) { 1592 u_int64_t AllowMem, sanity; 1593 char *ep; 1594 1595 sanity = AllowMem = strtouq(cp, &ep, 0); 1596 if ((ep != cp) && (*ep != 0)) { 1597 switch(*ep) { 1598 case 'g': 1599 case 'G': 1600 AllowMem <<= 10; 1601 case 'm': 1602 case 'M': 1603 AllowMem <<= 10; 1604 case 'k': 1605 case 'K': 1606 AllowMem <<= 10; 1607 break; 1608 default: 1609 AllowMem = sanity = 0; 1610 } 1611 if (AllowMem < sanity) 1612 AllowMem = 0; 1613 } 1614 if (AllowMem == 0) 1615 printf("Ignoring invalid memory size of '%s'\n", cp); 1616 else 1617 Maxmem = atop(AllowMem); 1618 } 1619 1620 if (atop(physmap[physmap_idx + 1]) != Maxmem && 1621 (boothowto & RB_VERBOSE)) 1622 printf("Physical memory use set to %uK\n", Maxmem * 4); 1623 1624 /* 1625 * If Maxmem has been increased beyond what the system has detected, 1626 * extend the last memory segment to the new limit. 1627 */ 1628 if (atop(physmap[physmap_idx + 1]) < Maxmem) 1629 physmap[physmap_idx + 1] = ptoa(Maxmem); 1630 1631 /* call pmap initialization to make new kernel address space */ 1632 pmap_bootstrap(first, 0); 1633 1634 /* 1635 * Size up each available chunk of physical memory. 1636 */ 1637 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 1638 pa_indx = 0; 1639 phys_avail[pa_indx++] = physmap[0]; 1640 phys_avail[pa_indx] = physmap[0]; 1641#if 0 1642 pte = (pt_entry_t)vtopte(KERNBASE); 1643#else 1644 pte = (pt_entry_t)CMAP1; 1645#endif 1646 1647 /* 1648 * physmap is in bytes, so when converting to page boundaries, 1649 * round up the start address and round down the end address. 1650 */ 1651 for (i = 0; i <= physmap_idx; i += 2) { 1652 vm_offset_t end; 1653 1654 end = ptoa(Maxmem); 1655 if (physmap[i + 1] < end) 1656 end = trunc_page(physmap[i + 1]); 1657 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 1658 int tmp, page_bad; 1659#if 0 1660 int *ptr = 0; 1661#else 1662 int *ptr = (int *)CADDR1; 1663#endif 1664 1665 /* 1666 * block out kernel memory as not available. 1667 */ 1668 if (pa >= 0x100000 && pa < first) 1669 continue; 1670 1671 page_bad = FALSE; 1672 1673 /* 1674 * map page into kernel: valid, read/write,non-cacheable 1675 */ 1676 *pte = pa | PG_V | PG_RW | PG_N; 1677 invltlb(); 1678 1679 tmp = *(int *)ptr; 1680 /* 1681 * Test for alternating 1's and 0's 1682 */ 1683 *(volatile int *)ptr = 0xaaaaaaaa; 1684 if (*(volatile int *)ptr != 0xaaaaaaaa) { 1685 page_bad = TRUE; 1686 } 1687 /* 1688 * Test for alternating 0's and 1's 1689 */ 1690 *(volatile int *)ptr = 0x55555555; 1691 if (*(volatile int *)ptr != 0x55555555) { 1692 page_bad = TRUE; 1693 } 1694 /* 1695 * Test for all 1's 1696 */ 1697 *(volatile int *)ptr = 0xffffffff; 1698 if (*(volatile int *)ptr != 0xffffffff) { 1699 page_bad = TRUE; 1700 } 1701 /* 1702 * Test for all 0's 1703 */ 1704 *(volatile int *)ptr = 0x0; 1705 if (*(volatile int *)ptr != 0x0) { 1706 page_bad = TRUE; 1707 } 1708 /* 1709 * Restore original value. 1710 */ 1711 *(int *)ptr = tmp; 1712 1713 /* 1714 * Adjust array of valid/good pages. 1715 */ 1716 if (page_bad == TRUE) { 1717 continue; 1718 } 1719 /* 1720 * If this good page is a continuation of the 1721 * previous set of good pages, then just increase 1722 * the end pointer. Otherwise start a new chunk. 1723 * Note that "end" points one higher than end, 1724 * making the range >= start and < end. 1725 * If we're also doing a speculative memory 1726 * test and we at or past the end, bump up Maxmem 1727 * so that we keep going. The first bad page 1728 * will terminate the loop. 1729 */ 1730 if (phys_avail[pa_indx] == pa) { 1731 phys_avail[pa_indx] += PAGE_SIZE; 1732 } else { 1733 pa_indx++; 1734 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1735 printf("Too many holes in the physical address space, giving up\n"); 1736 pa_indx--; 1737 break; 1738 } 1739 phys_avail[pa_indx++] = pa; /* start */ 1740 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1741 } 1742 physmem++; 1743 } 1744 } 1745 *pte = 0; 1746 invltlb(); 1747 1748 /* 1749 * XXX 1750 * The last chunk must contain at least one page plus the message 1751 * buffer to avoid complicating other code (message buffer address 1752 * calculation, etc.). 1753 */ 1754 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1755 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { 1756 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1757 phys_avail[pa_indx--] = 0; 1758 phys_avail[pa_indx--] = 0; 1759 } 1760 1761 Maxmem = atop(phys_avail[pa_indx]); 1762 1763 /* Trim off space for the message buffer. */ 1764 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); 1765 1766 avail_end = phys_avail[pa_indx]; 1767} 1768 1769void 1770init386(first) 1771 int first; 1772{ 1773 int x; 1774 struct gate_descriptor *gdp; 1775 int gsel_tss; 1776#ifndef SMP 1777 /* table descriptors - used to load tables by microp */ 1778 struct region_descriptor r_gdt, r_idt; 1779#endif 1780 int off; 1781 1782 proc0.p_addr = proc0paddr; 1783 1784 atdevbase = ISA_HOLE_START + KERNBASE; 1785 1786 if (bootinfo.bi_modulep) { 1787 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE; 1788 preload_bootstrap_relocate(KERNBASE); 1789 } else { 1790 printf("WARNING: loader(8) metadata is missing!\n"); 1791 } 1792 if (bootinfo.bi_envp) 1793 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE; 1794 1795 /* 1796 * make gdt memory segments, the code segment goes up to end of the 1797 * page with etext in it, the data segment goes to the end of 1798 * the address space 1799 */ 1800 /* 1801 * XXX text protection is temporarily (?) disabled. The limit was 1802 * i386_btop(round_page(etext)) - 1. 1803 */ 1804 gdt_segs[GCODE_SEL].ssd_limit = i386_btop(0) - 1; 1805 gdt_segs[GDATA_SEL].ssd_limit = i386_btop(0) - 1; 1806#ifdef SMP 1807 gdt_segs[GPRIV_SEL].ssd_limit = 1808 i386_btop(sizeof(struct privatespace)) - 1; 1809 gdt_segs[GPRIV_SEL].ssd_base = (int) &SMP_prvspace[0]; 1810 gdt_segs[GPROC0_SEL].ssd_base = 1811 (int) &SMP_prvspace[0].globaldata.gd_common_tss; 1812 SMP_prvspace[0].globaldata.gd_prvspace = &SMP_prvspace[0].globaldata; 1813#else 1814 gdt_segs[GPRIV_SEL].ssd_limit = 1815 i386_btop(sizeof(struct globaldata)) - 1; 1816 gdt_segs[GPRIV_SEL].ssd_base = (int) &__globaldata; 1817 gdt_segs[GPROC0_SEL].ssd_base = 1818 (int) &__globaldata.gd_common_tss; 1819 __globaldata.gd_prvspace = &__globaldata; 1820#endif 1821 1822 for (x = 0; x < NGDT; x++) { 1823#ifdef BDE_DEBUGGER 1824 /* avoid overwriting db entries with APM ones */ 1825 if (x >= GAPMCODE32_SEL && x <= GAPMDATA_SEL) 1826 continue; 1827#endif 1828 ssdtosd(&gdt_segs[x], &gdt[x].sd); 1829 } 1830 1831 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1832 r_gdt.rd_base = (int) gdt; 1833 lgdt(&r_gdt); 1834 1835 /* setup curproc so that mutexes work */ 1836 PCPU_SET(curproc, &proc0); 1837 PCPU_SET(spinlocks, NULL); 1838 1839 LIST_INIT(&proc0.p_contested); 1840 1841 /* 1842 * Initialize mutexes. 1843 */ 1844 mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE); 1845 mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE); 1846 mtx_init(&proc0.p_mtx, "process lock", MTX_DEF); 1847 mtx_init(&clock_lock, "clk", MTX_SPIN | MTX_RECURSE); 1848#ifdef SMP 1849 mtx_init(&imen_mtx, "imen", MTX_SPIN); 1850#endif 1851 mtx_lock(&Giant); 1852 1853 /* make ldt memory segments */ 1854 /* 1855 * The data segment limit must not cover the user area because we 1856 * don't want the user area to be writable in copyout() etc. (page 1857 * level protection is lost in kernel mode on 386's). Also, we 1858 * don't want the user area to be writable directly (page level 1859 * protection of the user area is not available on 486's with 1860 * CR0_WP set, because there is no user-read/kernel-write mode). 1861 * 1862 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it 1863 * should be spelled ...MAX_USER... 1864 */ 1865#define VM_END_USER_RW_ADDRESS VM_MAXUSER_ADDRESS 1866 /* 1867 * The code segment limit has to cover the user area until we move 1868 * the signal trampoline out of the user area. This is safe because 1869 * the code segment cannot be written to directly. 1870 */ 1871#define VM_END_USER_R_ADDRESS (VM_END_USER_RW_ADDRESS + UPAGES * PAGE_SIZE) 1872 ldt_segs[LUCODE_SEL].ssd_limit = i386_btop(VM_END_USER_R_ADDRESS) - 1; 1873 ldt_segs[LUDATA_SEL].ssd_limit = i386_btop(VM_END_USER_RW_ADDRESS) - 1; 1874 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++) 1875 ssdtosd(&ldt_segs[x], &ldt[x].sd); 1876 1877 _default_ldt = GSEL(GLDT_SEL, SEL_KPL); 1878 lldt(_default_ldt); 1879 PCPU_SET(currentldt, _default_ldt); 1880 1881 /* exceptions */ 1882 for (x = 0; x < NIDT; x++) 1883 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1884 setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1885 setidt(1, &IDTVEC(dbg), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1886 setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1887 setidt(3, &IDTVEC(bpt), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1888 setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1889 setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1890 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1891 setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1892 setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL)); 1893 setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1894 setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1895 setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1896 setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1897 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1898 setidt(14, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1899 setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1900 setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1901 setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1902 setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1903 setidt(0x80, &IDTVEC(int0x80_syscall), 1904 SDT_SYS386TGT, SEL_UPL, GSEL(GCODE_SEL, SEL_KPL)); 1905 1906 r_idt.rd_limit = sizeof(idt0) - 1; 1907 r_idt.rd_base = (int) idt; 1908 lidt(&r_idt); 1909 1910 /* 1911 * Initialize the console before we print anything out. 1912 */ 1913 cninit(); 1914 1915#ifdef DEV_ISA 1916 isa_defaultirq(); 1917#endif 1918 1919#ifdef DDB 1920 kdb_init(); 1921 if (boothowto & RB_KDB) 1922 Debugger("Boot flags requested debugger"); 1923#endif 1924 1925 finishidentcpu(); /* Final stage of CPU initialization */ 1926 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1927 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, GSEL(GCODE_SEL, SEL_KPL)); 1928 initializecpu(); /* Initialize CPU registers */ 1929 1930 /* make an initial tss so cpu can get interrupt stack on syscall! */ 1931 PCPU_SET(common_tss.tss_esp0, 1932 (int) proc0.p_addr + UPAGES*PAGE_SIZE - 16); 1933 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); 1934 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 1935 private_tss = 0; 1936 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd); 1937 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 1938 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16); 1939 ltr(gsel_tss); 1940 1941 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = 1942 dblfault_tss.tss_esp2 = (int) &dblfault_stack[sizeof(dblfault_stack)]; 1943 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = 1944 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); 1945 dblfault_tss.tss_cr3 = (int)IdlePTD; 1946 dblfault_tss.tss_eip = (int) dblfault_handler; 1947 dblfault_tss.tss_eflags = PSL_KERNEL; 1948 dblfault_tss.tss_ds = dblfault_tss.tss_es = 1949 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); 1950 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL); 1951 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); 1952 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); 1953 1954 vm86_initialize(); 1955 getmemsize(first); 1956 1957 /* now running on new page tables, configured,and u/iom is accessible */ 1958 1959 /* Map the message buffer. */ 1960 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) 1961 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off); 1962 1963 msgbufinit(msgbufp, MSGBUF_SIZE); 1964 1965 /* make a call gate to reenter kernel with */ 1966 gdp = &ldt[LSYS5CALLS_SEL].gd; 1967 1968 x = (int) &IDTVEC(lcall_syscall); 1969 gdp->gd_looffset = x; 1970 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL); 1971 gdp->gd_stkcpy = 1; 1972 gdp->gd_type = SDT_SYS386CGT; 1973 gdp->gd_dpl = SEL_UPL; 1974 gdp->gd_p = 1; 1975 gdp->gd_hioffset = x >> 16; 1976 1977 /* XXX does this work? */ 1978 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL]; 1979 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL]; 1980 1981 /* transfer to user mode */ 1982 1983 _ucodesel = LSEL(LUCODE_SEL, SEL_UPL); 1984 _udatasel = LSEL(LUDATA_SEL, SEL_UPL); 1985 1986 /* setup proc 0's pcb */ 1987 proc0.p_addr->u_pcb.pcb_flags = 0; 1988 proc0.p_addr->u_pcb.pcb_cr3 = (int)IdlePTD; 1989 proc0.p_addr->u_pcb.pcb_ext = 0; 1990 proc0.p_frame = &proc0_tf; 1991} 1992 1993#if defined(I586_CPU) && !defined(NO_F00F_HACK) 1994static void f00f_hack(void *unused); 1995SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL); 1996 1997static void 1998f00f_hack(void *unused) { 1999 struct gate_descriptor *new_idt; 2000#ifndef SMP 2001 struct region_descriptor r_idt; 2002#endif 2003 vm_offset_t tmp; 2004 2005 if (!has_f00f_bug) 2006 return; 2007 2008 GIANT_REQUIRED; 2009 2010 printf("Intel Pentium detected, installing workaround for F00F bug\n"); 2011 2012 r_idt.rd_limit = sizeof(idt0) - 1; 2013 2014 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2); 2015 if (tmp == 0) 2016 panic("kmem_alloc returned 0"); 2017 if (((unsigned int)tmp & (PAGE_SIZE-1)) != 0) 2018 panic("kmem_alloc returned non-page-aligned memory"); 2019 /* Put the first seven entries in the lower page */ 2020 new_idt = (struct gate_descriptor*)(tmp + PAGE_SIZE - (7*8)); 2021 bcopy(idt, new_idt, sizeof(idt0)); 2022 r_idt.rd_base = (int)new_idt; 2023 lidt(&r_idt); 2024 idt = new_idt; 2025 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE, 2026 VM_PROT_READ, FALSE) != KERN_SUCCESS) 2027 panic("vm_map_protect failed"); 2028 return; 2029} 2030#endif /* defined(I586_CPU) && !NO_F00F_HACK */ 2031 2032int 2033ptrace_set_pc(p, addr) 2034 struct proc *p; 2035 unsigned long addr; 2036{ 2037 p->p_frame->tf_eip = addr; 2038 return (0); 2039} 2040 2041int 2042ptrace_single_step(p) 2043 struct proc *p; 2044{ 2045 p->p_frame->tf_eflags |= PSL_T; 2046 return (0); 2047} 2048 2049int ptrace_read_u_check(p, addr, len) 2050 struct proc *p; 2051 vm_offset_t addr; 2052 size_t len; 2053{ 2054 vm_offset_t gap; 2055 2056 if ((vm_offset_t) (addr + len) < addr) 2057 return EPERM; 2058 if ((vm_offset_t) (addr + len) <= sizeof(struct user)) 2059 return 0; 2060 2061 gap = (char *) p->p_frame - (char *) p->p_addr; 2062 2063 if ((vm_offset_t) addr < gap) 2064 return EPERM; 2065 if ((vm_offset_t) (addr + len) <= 2066 (vm_offset_t) (gap + sizeof(struct trapframe))) 2067 return 0; 2068 return EPERM; 2069} 2070 2071int ptrace_write_u(p, off, data) 2072 struct proc *p; 2073 vm_offset_t off; 2074 long data; 2075{ 2076 struct trapframe frame_copy; 2077 vm_offset_t min; 2078 struct trapframe *tp; 2079 2080 /* 2081 * Privileged kernel state is scattered all over the user area. 2082 * Only allow write access to parts of regs and to fpregs. 2083 */ 2084 min = (char *)p->p_frame - (char *)p->p_addr; 2085 if (off >= min && off <= min + sizeof(struct trapframe) - sizeof(int)) { 2086 tp = p->p_frame; 2087 frame_copy = *tp; 2088 *(int *)((char *)&frame_copy + (off - min)) = data; 2089 if (!EFL_SECURE(frame_copy.tf_eflags, tp->tf_eflags) || 2090 !CS_SECURE(frame_copy.tf_cs)) 2091 return (EINVAL); 2092 *(int*)((char *)p->p_addr + off) = data; 2093 return (0); 2094 } 2095 min = offsetof(struct user, u_pcb) + offsetof(struct pcb, pcb_savefpu); 2096 if (off >= min && off <= min + sizeof(struct save87) - sizeof(int)) { 2097 *(int*)((char *)p->p_addr + off) = data; 2098 return (0); 2099 } 2100 return (EFAULT); 2101} 2102 2103int 2104fill_regs(p, regs) 2105 struct proc *p; 2106 struct reg *regs; 2107{ 2108 struct pcb *pcb; 2109 struct trapframe *tp; 2110 2111 tp = p->p_frame; 2112 regs->r_fs = tp->tf_fs; 2113 regs->r_es = tp->tf_es; 2114 regs->r_ds = tp->tf_ds; 2115 regs->r_edi = tp->tf_edi; 2116 regs->r_esi = tp->tf_esi; 2117 regs->r_ebp = tp->tf_ebp; 2118 regs->r_ebx = tp->tf_ebx; 2119 regs->r_edx = tp->tf_edx; 2120 regs->r_ecx = tp->tf_ecx; 2121 regs->r_eax = tp->tf_eax; 2122 regs->r_eip = tp->tf_eip; 2123 regs->r_cs = tp->tf_cs; 2124 regs->r_eflags = tp->tf_eflags; 2125 regs->r_esp = tp->tf_esp; 2126 regs->r_ss = tp->tf_ss; 2127 pcb = &p->p_addr->u_pcb; 2128 regs->r_gs = pcb->pcb_gs; 2129 return (0); 2130} 2131 2132int 2133set_regs(p, regs) 2134 struct proc *p; 2135 struct reg *regs; 2136{ 2137 struct pcb *pcb; 2138 struct trapframe *tp; 2139 2140 tp = p->p_frame; 2141 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) || 2142 !CS_SECURE(regs->r_cs)) 2143 return (EINVAL); 2144 tp->tf_fs = regs->r_fs; 2145 tp->tf_es = regs->r_es; 2146 tp->tf_ds = regs->r_ds; 2147 tp->tf_edi = regs->r_edi; 2148 tp->tf_esi = regs->r_esi; 2149 tp->tf_ebp = regs->r_ebp; 2150 tp->tf_ebx = regs->r_ebx; 2151 tp->tf_edx = regs->r_edx; 2152 tp->tf_ecx = regs->r_ecx; 2153 tp->tf_eax = regs->r_eax; 2154 tp->tf_eip = regs->r_eip; 2155 tp->tf_cs = regs->r_cs; 2156 tp->tf_eflags = regs->r_eflags; 2157 tp->tf_esp = regs->r_esp; 2158 tp->tf_ss = regs->r_ss; 2159 pcb = &p->p_addr->u_pcb; 2160 pcb->pcb_gs = regs->r_gs; 2161 return (0); 2162} 2163 2164int 2165fill_fpregs(p, fpregs) 2166 struct proc *p; 2167 struct fpreg *fpregs; 2168{ 2169 bcopy(&p->p_addr->u_pcb.pcb_savefpu, fpregs, sizeof *fpregs); 2170 return (0); 2171} 2172 2173int 2174set_fpregs(p, fpregs) 2175 struct proc *p; 2176 struct fpreg *fpregs; 2177{ 2178 bcopy(fpregs, &p->p_addr->u_pcb.pcb_savefpu, sizeof *fpregs); 2179 return (0); 2180} 2181 2182int 2183fill_dbregs(p, dbregs) 2184 struct proc *p; 2185 struct dbreg *dbregs; 2186{ 2187 struct pcb *pcb; 2188 2189 if (p == NULL) { 2190 dbregs->dr0 = rdr0(); 2191 dbregs->dr1 = rdr1(); 2192 dbregs->dr2 = rdr2(); 2193 dbregs->dr3 = rdr3(); 2194 dbregs->dr4 = rdr4(); 2195 dbregs->dr5 = rdr5(); 2196 dbregs->dr6 = rdr6(); 2197 dbregs->dr7 = rdr7(); 2198 } 2199 else { 2200 pcb = &p->p_addr->u_pcb; 2201 dbregs->dr0 = pcb->pcb_dr0; 2202 dbregs->dr1 = pcb->pcb_dr1; 2203 dbregs->dr2 = pcb->pcb_dr2; 2204 dbregs->dr3 = pcb->pcb_dr3; 2205 dbregs->dr4 = 0; 2206 dbregs->dr5 = 0; 2207 dbregs->dr6 = pcb->pcb_dr6; 2208 dbregs->dr7 = pcb->pcb_dr7; 2209 } 2210 return (0); 2211} 2212 2213int 2214set_dbregs(p, dbregs) 2215 struct proc *p; 2216 struct dbreg *dbregs; 2217{ 2218 struct pcb *pcb; 2219 int i; 2220 u_int32_t mask1, mask2; 2221 2222 if (p == NULL) { 2223 load_dr0(dbregs->dr0); 2224 load_dr1(dbregs->dr1); 2225 load_dr2(dbregs->dr2); 2226 load_dr3(dbregs->dr3); 2227 load_dr4(dbregs->dr4); 2228 load_dr5(dbregs->dr5); 2229 load_dr6(dbregs->dr6); 2230 load_dr7(dbregs->dr7); 2231 } 2232 else { 2233 /* 2234 * Don't let an illegal value for dr7 get set. Specifically, 2235 * check for undefined settings. Setting these bit patterns 2236 * result in undefined behaviour and can lead to an unexpected 2237 * TRCTRAP. 2238 */ 2239 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 8; 2240 i++, mask1 <<= 2, mask2 <<= 2) 2241 if ((dbregs->dr7 & mask1) == mask2) 2242 return (EINVAL); 2243 2244 if (dbregs->dr7 & 0x0000fc00) 2245 return (EINVAL); 2246 2247 pcb = &p->p_addr->u_pcb; 2248 2249 /* 2250 * Don't let a process set a breakpoint that is not within the 2251 * process's address space. If a process could do this, it 2252 * could halt the system by setting a breakpoint in the kernel 2253 * (if ddb was enabled). Thus, we need to check to make sure 2254 * that no breakpoints are being enabled for addresses outside 2255 * process's address space, unless, perhaps, we were called by 2256 * uid 0. 2257 * 2258 * XXX - what about when the watched area of the user's 2259 * address space is written into from within the kernel 2260 * ... wouldn't that still cause a breakpoint to be generated 2261 * from within kernel mode? 2262 */ 2263 2264 if (suser(p) != 0) { 2265 if (dbregs->dr7 & 0x3) { 2266 /* dr0 is enabled */ 2267 if (dbregs->dr0 >= VM_MAXUSER_ADDRESS) 2268 return (EINVAL); 2269 } 2270 2271 if (dbregs->dr7 & (0x3<<2)) { 2272 /* dr1 is enabled */ 2273 if (dbregs->dr1 >= VM_MAXUSER_ADDRESS) 2274 return (EINVAL); 2275 } 2276 2277 if (dbregs->dr7 & (0x3<<4)) { 2278 /* dr2 is enabled */ 2279 if (dbregs->dr2 >= VM_MAXUSER_ADDRESS) 2280 return (EINVAL); 2281 } 2282 2283 if (dbregs->dr7 & (0x3<<6)) { 2284 /* dr3 is enabled */ 2285 if (dbregs->dr3 >= VM_MAXUSER_ADDRESS) 2286 return (EINVAL); 2287 } 2288 } 2289 2290 pcb->pcb_dr0 = dbregs->dr0; 2291 pcb->pcb_dr1 = dbregs->dr1; 2292 pcb->pcb_dr2 = dbregs->dr2; 2293 pcb->pcb_dr3 = dbregs->dr3; 2294 pcb->pcb_dr6 = dbregs->dr6; 2295 pcb->pcb_dr7 = dbregs->dr7; 2296 2297 pcb->pcb_flags |= PCB_DBREGS; 2298 } 2299 2300 return (0); 2301} 2302 2303/* 2304 * Return > 0 if a hardware breakpoint has been hit, and the 2305 * breakpoint was in user space. Return 0, otherwise. 2306 */ 2307int 2308user_dbreg_trap(void) 2309{ 2310 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */ 2311 u_int32_t bp; /* breakpoint bits extracted from dr6 */ 2312 int nbp; /* number of breakpoints that triggered */ 2313 caddr_t addr[4]; /* breakpoint addresses */ 2314 int i; 2315 2316 dr7 = rdr7(); 2317 if ((dr7 & 0x000000ff) == 0) { 2318 /* 2319 * all GE and LE bits in the dr7 register are zero, 2320 * thus the trap couldn't have been caused by the 2321 * hardware debug registers 2322 */ 2323 return 0; 2324 } 2325 2326 nbp = 0; 2327 dr6 = rdr6(); 2328 bp = dr6 & 0x0000000f; 2329 2330 if (!bp) { 2331 /* 2332 * None of the breakpoint bits are set meaning this 2333 * trap was not caused by any of the debug registers 2334 */ 2335 return 0; 2336 } 2337 2338 /* 2339 * at least one of the breakpoints were hit, check to see 2340 * which ones and if any of them are user space addresses 2341 */ 2342 2343 if (bp & 0x01) { 2344 addr[nbp++] = (caddr_t)rdr0(); 2345 } 2346 if (bp & 0x02) { 2347 addr[nbp++] = (caddr_t)rdr1(); 2348 } 2349 if (bp & 0x04) { 2350 addr[nbp++] = (caddr_t)rdr2(); 2351 } 2352 if (bp & 0x08) { 2353 addr[nbp++] = (caddr_t)rdr3(); 2354 } 2355 2356 for (i=0; i<nbp; i++) { 2357 if (addr[i] < 2358 (caddr_t)VM_MAXUSER_ADDRESS) { 2359 /* 2360 * addr[i] is in user space 2361 */ 2362 return nbp; 2363 } 2364 } 2365 2366 /* 2367 * None of the breakpoints are in user space. 2368 */ 2369 return 0; 2370} 2371 2372 2373#ifndef DDB 2374void 2375Debugger(const char *msg) 2376{ 2377 printf("Debugger(\"%s\") called.\n", msg); 2378} 2379#endif /* no DDB */ 2380 2381#include <sys/disklabel.h> 2382 2383/* 2384 * Determine the size of the transfer, and make sure it is 2385 * within the boundaries of the partition. Adjust transfer 2386 * if needed, and signal errors or early completion. 2387 */ 2388int 2389bounds_check_with_label(struct bio *bp, struct disklabel *lp, int wlabel) 2390{ 2391 struct partition *p = lp->d_partitions + dkpart(bp->bio_dev); 2392 int labelsect = lp->d_partitions[0].p_offset; 2393 int maxsz = p->p_size, 2394 sz = (bp->bio_bcount + DEV_BSIZE - 1) >> DEV_BSHIFT; 2395 2396 /* overwriting disk label ? */ 2397 /* XXX should also protect bootstrap in first 8K */ 2398 if (bp->bio_blkno + p->p_offset <= LABELSECTOR + labelsect && 2399#if LABELSECTOR != 0 2400 bp->bio_blkno + p->p_offset + sz > LABELSECTOR + labelsect && 2401#endif 2402 (bp->bio_cmd == BIO_WRITE) && wlabel == 0) { 2403 bp->bio_error = EROFS; 2404 goto bad; 2405 } 2406 2407#if defined(DOSBBSECTOR) && defined(notyet) 2408 /* overwriting master boot record? */ 2409 if (bp->bio_blkno + p->p_offset <= DOSBBSECTOR && 2410 (bp->bio_cmd == BIO_WRITE) && wlabel == 0) { 2411 bp->bio_error = EROFS; 2412 goto bad; 2413 } 2414#endif 2415 2416 /* beyond partition? */ 2417 if (bp->bio_blkno < 0 || bp->bio_blkno + sz > maxsz) { 2418 /* if exactly at end of disk, return an EOF */ 2419 if (bp->bio_blkno == maxsz) { 2420 bp->bio_resid = bp->bio_bcount; 2421 return(0); 2422 } 2423 /* or truncate if part of it fits */ 2424 sz = maxsz - bp->bio_blkno; 2425 if (sz <= 0) { 2426 bp->bio_error = EINVAL; 2427 goto bad; 2428 } 2429 bp->bio_bcount = sz << DEV_BSHIFT; 2430 } 2431 2432 bp->bio_pblkno = bp->bio_blkno + p->p_offset; 2433 return(1); 2434 2435bad: 2436 bp->bio_flags |= BIO_ERROR; 2437 return(-1); 2438} 2439 2440#ifdef DDB 2441 2442/* 2443 * Provide inb() and outb() as functions. They are normally only 2444 * available as macros calling inlined functions, thus cannot be 2445 * called inside DDB. 2446 * 2447 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 2448 */ 2449 2450#undef inb 2451#undef outb 2452 2453/* silence compiler warnings */ 2454u_char inb(u_int); 2455void outb(u_int, u_char); 2456 2457u_char 2458inb(u_int port) 2459{ 2460 u_char data; 2461 /* 2462 * We use %%dx and not %1 here because i/o is done at %dx and not at 2463 * %edx, while gcc generates inferior code (movw instead of movl) 2464 * if we tell it to load (u_short) port. 2465 */ 2466 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 2467 return (data); 2468} 2469 2470void 2471outb(u_int port, u_char data) 2472{ 2473 u_char al; 2474 /* 2475 * Use an unnecessary assignment to help gcc's register allocator. 2476 * This make a large difference for gcc-1.40 and a tiny difference 2477 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 2478 * best results. gcc-2.6.0 can't handle this. 2479 */ 2480 al = data; 2481 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 2482} 2483 2484#endif /* DDB */ 2485