machdep.c revision 106503
1/*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 38 * $FreeBSD: head/sys/amd64/amd64/machdep.c 106503 2002-11-06 14:53:35Z jmallett $ 39 */ 40 41#include "opt_atalk.h" 42#include "opt_compat.h" 43#include "opt_cpu.h" 44#include "opt_ddb.h" 45#include "opt_inet.h" 46#include "opt_ipx.h" 47#include "opt_isa.h" 48#include "opt_maxmem.h" 49#include "opt_msgbuf.h" 50#include "opt_npx.h" 51#include "opt_perfmon.h" 52#include "opt_kstack_pages.h" 53 54#include <sys/param.h> 55#include <sys/systm.h> 56#include <sys/sysproto.h> 57#include <sys/signalvar.h> 58#include <sys/imgact.h> 59#include <sys/kernel.h> 60#include <sys/ktr.h> 61#include <sys/linker.h> 62#include <sys/lock.h> 63#include <sys/malloc.h> 64#include <sys/mutex.h> 65#include <sys/pcpu.h> 66#include <sys/proc.h> 67#include <sys/bio.h> 68#include <sys/buf.h> 69#include <sys/reboot.h> 70#include <sys/callout.h> 71#include <sys/msgbuf.h> 72#include <sys/sched.h> 73#include <sys/sysent.h> 74#include <sys/sysctl.h> 75#include <sys/ucontext.h> 76#include <sys/vmmeter.h> 77#include <sys/bus.h> 78#include <sys/eventhandler.h> 79 80#include <vm/vm.h> 81#include <vm/vm_param.h> 82#include <vm/vm_kern.h> 83#include <vm/vm_object.h> 84#include <vm/vm_page.h> 85#include <vm/vm_map.h> 86#include <vm/vm_pager.h> 87#include <vm/vm_extern.h> 88 89#include <sys/user.h> 90#include <sys/exec.h> 91#include <sys/cons.h> 92 93#include <ddb/ddb.h> 94 95#include <net/netisr.h> 96 97#include <machine/cpu.h> 98#include <machine/cputypes.h> 99#include <machine/reg.h> 100#include <machine/clock.h> 101#include <machine/specialreg.h> 102#include <machine/bootinfo.h> 103#include <machine/md_var.h> 104#include <machine/pc/bios.h> 105#include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */ 106#include <machine/proc.h> 107#ifdef PERFMON 108#include <machine/perfmon.h> 109#endif 110#ifdef SMP 111#include <machine/privatespace.h> 112#include <machine/smp.h> 113#endif 114 115#include <i386/isa/icu.h> 116#include <i386/isa/intr_machdep.h> 117#include <isa/rtc.h> 118#include <machine/vm86.h> 119#include <sys/ptrace.h> 120#include <machine/sigframe.h> 121 122extern void init386(int first); 123extern void dblfault_handler(void); 124 125extern void printcpuinfo(void); /* XXX header file */ 126extern void earlysetcpuclass(void); /* same header file */ 127extern void finishidentcpu(void); 128extern void panicifcpuunsupported(void); 129extern void initializecpu(void); 130 131#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 132#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 133 134#if !defined(CPU_ENABLE_SSE) && defined(I686_CPU) 135#define CPU_ENABLE_SSE 136#endif 137#if defined(CPU_DISABLE_SSE) 138#undef CPU_ENABLE_SSE 139#endif 140 141static void cpu_startup(void *); 142static void fpstate_drop(struct thread *td); 143static void get_fpcontext(struct thread *td, mcontext_t *mcp); 144static int set_fpcontext(struct thread *td, const mcontext_t *mcp); 145#ifdef CPU_ENABLE_SSE 146static void set_fpregs_xmm(struct save87 *, struct savexmm *); 147static void fill_fpregs_xmm(struct savexmm *, struct save87 *); 148#endif /* CPU_ENABLE_SSE */ 149SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) 150 151int _udatasel, _ucodesel; 152u_int atdevbase; 153 154#if defined(SWTCH_OPTIM_STATS) 155extern int swtch_optim_stats; 156SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, 157 CTLFLAG_RD, &swtch_optim_stats, 0, ""); 158SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, 159 CTLFLAG_RD, &tlb_flush_count, 0, ""); 160#endif 161 162long physmem = 0; 163int cold = 1; 164 165#ifdef COMPAT_43 166static void osendsig(sig_t catcher, int sig, sigset_t *mask, u_long code); 167#endif 168#ifdef COMPAT_FREEBSD4 169static void freebsd4_sendsig(sig_t catcher, int sig, sigset_t *mask, 170 u_long code); 171#endif 172 173static int 174sysctl_hw_physmem(SYSCTL_HANDLER_ARGS) 175{ 176 u_long val; 177 178 val = ctob(physmem); 179 return (sysctl_handle_long(oidp, &val, 0, req)); 180} 181 182SYSCTL_PROC(_hw, HW_PHYSMEM, physmem, CTLTYPE_ULONG | CTLFLAG_RD, 183 0, 0, sysctl_hw_physmem, "LU", ""); 184 185static int 186sysctl_hw_usermem(SYSCTL_HANDLER_ARGS) 187{ 188 u_long val; 189 190 val = ctob(physmem - cnt.v_wire_count); 191 return (sysctl_handle_long(oidp, &val, 0, req)); 192} 193 194SYSCTL_PROC(_hw, HW_USERMEM, usermem, CTLTYPE_ULONG | CTLFLAG_RD, 195 0, 0, sysctl_hw_usermem, "LU", ""); 196 197static int 198sysctl_hw_availpages(SYSCTL_HANDLER_ARGS) 199{ 200 u_long val; 201 202 val = i386_btop(avail_end - avail_start); 203 return (sysctl_handle_long(oidp, &val, 0, req)); 204} 205 206SYSCTL_PROC(_hw, OID_AUTO, availpages, CTLTYPE_ULONG | CTLFLAG_RD, 207 0, 0, sysctl_hw_availpages, "LU", ""); 208 209long Maxmem = 0; 210 211vm_offset_t phys_avail[10]; 212 213/* must be 2 less so 0 0 can signal end of chunks */ 214#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2) 215 216struct kva_md_info kmi; 217 218static struct trapframe proc0_tf; 219#ifndef SMP 220static struct pcpu __pcpu; 221#endif 222 223struct mtx icu_lock; 224 225static void 226cpu_startup(dummy) 227 void *dummy; 228{ 229 /* 230 * Good {morning,afternoon,evening,night}. 231 */ 232 earlysetcpuclass(); 233 startrtclock(); 234 printcpuinfo(); 235 panicifcpuunsupported(); 236#ifdef PERFMON 237 perfmon_init(); 238#endif 239 printf("real memory = %u (%uK bytes)\n", ptoa(Maxmem), 240 ptoa(Maxmem) / 1024); 241 /* 242 * Display any holes after the first chunk of extended memory. 243 */ 244 if (bootverbose) { 245 int indx; 246 247 printf("Physical memory chunk(s):\n"); 248 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 249 unsigned int size1; 250 251 size1 = phys_avail[indx + 1] - phys_avail[indx]; 252 printf("0x%08x - 0x%08x, %u bytes (%u pages)\n", 253 phys_avail[indx], phys_avail[indx + 1] - 1, size1, 254 size1 / PAGE_SIZE); 255 } 256 } 257 258 vm_ksubmap_init(&kmi); 259 260 printf("avail memory = %u (%uK bytes)\n", ptoa(cnt.v_free_count), 261 ptoa(cnt.v_free_count) / 1024); 262 263 /* 264 * Set up buffers, so they can be used to read disk labels. 265 */ 266 bufinit(); 267 vm_pager_bufferinit(); 268 269#ifndef SMP 270 /* For SMP, we delay the cpu_setregs() until after SMP startup. */ 271 cpu_setregs(); 272#endif 273} 274 275/* 276 * Send an interrupt to process. 277 * 278 * Stack is set up to allow sigcode stored 279 * at top to call routine, followed by kcall 280 * to sigreturn routine below. After sigreturn 281 * resets the signal mask, the stack, and the 282 * frame pointer, it returns to the user 283 * specified pc, psl. 284 */ 285#ifdef COMPAT_43 286static void 287osendsig(catcher, sig, mask, code) 288 sig_t catcher; 289 int sig; 290 sigset_t *mask; 291 u_long code; 292{ 293 struct osigframe sf, *fp; 294 struct proc *p; 295 struct thread *td; 296 struct sigacts *psp; 297 struct trapframe *regs; 298 int oonstack; 299 300 td = curthread; 301 p = td->td_proc; 302 PROC_LOCK_ASSERT(p, MA_OWNED); 303 psp = p->p_sigacts; 304 regs = td->td_frame; 305 oonstack = sigonstack(regs->tf_esp); 306 307 /* Allocate space for the signal handler context. */ 308 if ((p->p_flag & P_ALTSTACK) && !oonstack && 309 SIGISMEMBER(psp->ps_sigonstack, sig)) { 310 fp = (struct osigframe *)(p->p_sigstk.ss_sp + 311 p->p_sigstk.ss_size - sizeof(struct osigframe)); 312#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 313 p->p_sigstk.ss_flags |= SS_ONSTACK; 314#endif 315 } else 316 fp = (struct osigframe *)regs->tf_esp - 1; 317 PROC_UNLOCK(p); 318 319 /* Translate the signal if appropriate. */ 320 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 321 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 322 323 /* Build the argument list for the signal handler. */ 324 sf.sf_signum = sig; 325 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc; 326 PROC_LOCK(p); 327 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 328 /* Signal handler installed with SA_SIGINFO. */ 329 sf.sf_arg2 = (register_t)&fp->sf_siginfo; 330 sf.sf_siginfo.si_signo = sig; 331 sf.sf_siginfo.si_code = code; 332 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher; 333 } else { 334 /* Old FreeBSD-style arguments. */ 335 sf.sf_arg2 = code; 336 sf.sf_addr = regs->tf_err; 337 sf.sf_ahu.sf_handler = catcher; 338 } 339 PROC_UNLOCK(p); 340 341 /* Save most if not all of trap frame. */ 342 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax; 343 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx; 344 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx; 345 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx; 346 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi; 347 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi; 348 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs; 349 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds; 350 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss; 351 sf.sf_siginfo.si_sc.sc_es = regs->tf_es; 352 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs; 353 sf.sf_siginfo.si_sc.sc_gs = rgs(); 354 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp; 355 356 /* Build the signal context to be used by osigreturn(). */ 357 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0; 358 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask); 359 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp; 360 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp; 361 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip; 362 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags; 363 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno; 364 sf.sf_siginfo.si_sc.sc_err = regs->tf_err; 365 366 /* 367 * If we're a vm86 process, we want to save the segment registers. 368 * We also change eflags to be our emulated eflags, not the actual 369 * eflags. 370 */ 371 if (regs->tf_eflags & PSL_VM) { 372 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */ 373 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 374 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86; 375 376 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs; 377 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs; 378 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es; 379 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds; 380 381 if (vm86->vm86_has_vme == 0) 382 sf.sf_siginfo.si_sc.sc_ps = 383 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 384 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 385 386 /* See sendsig() for comments. */ 387 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); 388 } 389 390 /* 391 * Copy the sigframe out to the user's stack. 392 */ 393 if (copyout(&sf, fp, sizeof(*fp)) != 0) { 394#ifdef DEBUG 395 printf("process %ld has trashed its stack\n", (long)p->p_pid); 396#endif 397 PROC_LOCK(p); 398 sigexit(td, SIGILL); 399 } 400 401 regs->tf_esp = (int)fp; 402 regs->tf_eip = PS_STRINGS - szosigcode; 403 regs->tf_eflags &= ~PSL_T; 404 regs->tf_cs = _ucodesel; 405 regs->tf_ds = _udatasel; 406 regs->tf_es = _udatasel; 407 regs->tf_fs = _udatasel; 408 load_gs(_udatasel); 409 regs->tf_ss = _udatasel; 410 PROC_LOCK(p); 411} 412#endif /* COMPAT_43 */ 413 414#ifdef COMPAT_FREEBSD4 415static void 416freebsd4_sendsig(catcher, sig, mask, code) 417 sig_t catcher; 418 int sig; 419 sigset_t *mask; 420 u_long code; 421{ 422 struct sigframe4 sf, *sfp; 423 struct proc *p; 424 struct thread *td; 425 struct sigacts *psp; 426 struct trapframe *regs; 427 int oonstack; 428 429 td = curthread; 430 p = td->td_proc; 431 PROC_LOCK_ASSERT(p, MA_OWNED); 432 psp = p->p_sigacts; 433 regs = td->td_frame; 434 oonstack = sigonstack(regs->tf_esp); 435 436 /* Save user context. */ 437 bzero(&sf, sizeof(sf)); 438 sf.sf_uc.uc_sigmask = *mask; 439 sf.sf_uc.uc_stack = p->p_sigstk; 440 sf.sf_uc.uc_stack.ss_flags = (p->p_flag & P_ALTSTACK) 441 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 442 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 443 sf.sf_uc.uc_mcontext.mc_gs = rgs(); 444 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs)); 445 446 /* Allocate space for the signal handler context. */ 447 if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack && 448 SIGISMEMBER(psp->ps_sigonstack, sig)) { 449 sfp = (struct sigframe4 *)(p->p_sigstk.ss_sp + 450 p->p_sigstk.ss_size - sizeof(struct sigframe4)); 451#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 452 p->p_sigstk.ss_flags |= SS_ONSTACK; 453#endif 454 } else 455 sfp = (struct sigframe4 *)regs->tf_esp - 1; 456 PROC_UNLOCK(p); 457 458 /* Translate the signal if appropriate. */ 459 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 460 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 461 462 /* Build the argument list for the signal handler. */ 463 sf.sf_signum = sig; 464 sf.sf_ucontext = (register_t)&sfp->sf_uc; 465 PROC_LOCK(p); 466 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 467 /* Signal handler installed with SA_SIGINFO. */ 468 sf.sf_siginfo = (register_t)&sfp->sf_si; 469 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 470 471 /* Fill in POSIX parts */ 472 sf.sf_si.si_signo = sig; 473 sf.sf_si.si_code = code; 474 sf.sf_si.si_addr = (void *)regs->tf_err; 475 } else { 476 /* Old FreeBSD-style arguments. */ 477 sf.sf_siginfo = code; 478 sf.sf_addr = regs->tf_err; 479 sf.sf_ahu.sf_handler = catcher; 480 } 481 PROC_UNLOCK(p); 482 483 /* 484 * If we're a vm86 process, we want to save the segment registers. 485 * We also change eflags to be our emulated eflags, not the actual 486 * eflags. 487 */ 488 if (regs->tf_eflags & PSL_VM) { 489 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 490 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86; 491 492 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 493 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 494 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 495 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 496 497 if (vm86->vm86_has_vme == 0) 498 sf.sf_uc.uc_mcontext.mc_eflags = 499 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 500 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 501 502 /* 503 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 504 * syscalls made by the signal handler. This just avoids 505 * wasting time for our lazy fixup of such faults. PSL_NT 506 * does nothing in vm86 mode, but vm86 programs can set it 507 * almost legitimately in probes for old cpu types. 508 */ 509 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); 510 } 511 512 /* 513 * Copy the sigframe out to the user's stack. 514 */ 515 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { 516#ifdef DEBUG 517 printf("process %ld has trashed its stack\n", (long)p->p_pid); 518#endif 519 PROC_LOCK(p); 520 sigexit(td, SIGILL); 521 } 522 523 regs->tf_esp = (int)sfp; 524 regs->tf_eip = PS_STRINGS - szfreebsd4_sigcode; 525 regs->tf_eflags &= ~PSL_T; 526 regs->tf_cs = _ucodesel; 527 regs->tf_ds = _udatasel; 528 regs->tf_es = _udatasel; 529 regs->tf_fs = _udatasel; 530 regs->tf_ss = _udatasel; 531 PROC_LOCK(p); 532} 533#endif /* COMPAT_FREEBSD4 */ 534 535void 536sendsig(catcher, sig, mask, code) 537 sig_t catcher; 538 int sig; 539 sigset_t *mask; 540 u_long code; 541{ 542 struct sigframe sf, *sfp; 543 struct proc *p; 544 struct thread *td; 545 struct sigacts *psp; 546 struct trapframe *regs; 547 int oonstack; 548 549 td = curthread; 550 p = td->td_proc; 551 PROC_LOCK_ASSERT(p, MA_OWNED); 552 psp = p->p_sigacts; 553#ifdef COMPAT_FREEBSD4 554 if (SIGISMEMBER(psp->ps_freebsd4, sig)) { 555 freebsd4_sendsig(catcher, sig, mask, code); 556 return; 557 } 558#endif 559#ifdef COMPAT_43 560 if (SIGISMEMBER(psp->ps_osigset, sig)) { 561 osendsig(catcher, sig, mask, code); 562 return; 563 } 564#endif 565 regs = td->td_frame; 566 oonstack = sigonstack(regs->tf_esp); 567 568 /* Save user context. */ 569 bzero(&sf, sizeof(sf)); 570 sf.sf_uc.uc_sigmask = *mask; 571 sf.sf_uc.uc_stack = p->p_sigstk; 572 sf.sf_uc.uc_stack.ss_flags = (p->p_flag & P_ALTSTACK) 573 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 574 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 575 sf.sf_uc.uc_mcontext.mc_gs = rgs(); 576 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs)); 577 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */ 578 get_fpcontext(td, &sf.sf_uc.uc_mcontext); 579 fpstate_drop(td); 580 581 /* Allocate space for the signal handler context. */ 582 if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack && 583 SIGISMEMBER(psp->ps_sigonstack, sig)) { 584 sfp = (struct sigframe *)(p->p_sigstk.ss_sp + 585 p->p_sigstk.ss_size - sizeof(struct sigframe)); 586#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 587 p->p_sigstk.ss_flags |= SS_ONSTACK; 588#endif 589 } else 590 sfp = (struct sigframe *)regs->tf_esp - 1; 591 PROC_UNLOCK(p); 592 593 /* Translate the signal if appropriate. */ 594 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 595 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 596 597 /* Build the argument list for the signal handler. */ 598 sf.sf_signum = sig; 599 sf.sf_ucontext = (register_t)&sfp->sf_uc; 600 PROC_LOCK(p); 601 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 602 /* Signal handler installed with SA_SIGINFO. */ 603 sf.sf_siginfo = (register_t)&sfp->sf_si; 604 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 605 606 /* Fill in POSIX parts */ 607 sf.sf_si.si_signo = sig; 608 sf.sf_si.si_code = code; 609 sf.sf_si.si_addr = (void *)regs->tf_err; 610 } else { 611 /* Old FreeBSD-style arguments. */ 612 sf.sf_siginfo = code; 613 sf.sf_addr = regs->tf_err; 614 sf.sf_ahu.sf_handler = catcher; 615 } 616 PROC_UNLOCK(p); 617 618 /* 619 * If we're a vm86 process, we want to save the segment registers. 620 * We also change eflags to be our emulated eflags, not the actual 621 * eflags. 622 */ 623 if (regs->tf_eflags & PSL_VM) { 624 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 625 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86; 626 627 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 628 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 629 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 630 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 631 632 if (vm86->vm86_has_vme == 0) 633 sf.sf_uc.uc_mcontext.mc_eflags = 634 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 635 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 636 637 /* 638 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 639 * syscalls made by the signal handler. This just avoids 640 * wasting time for our lazy fixup of such faults. PSL_NT 641 * does nothing in vm86 mode, but vm86 programs can set it 642 * almost legitimately in probes for old cpu types. 643 */ 644 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); 645 } 646 647 /* 648 * Copy the sigframe out to the user's stack. 649 */ 650 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { 651#ifdef DEBUG 652 printf("process %ld has trashed its stack\n", (long)p->p_pid); 653#endif 654 PROC_LOCK(p); 655 sigexit(td, SIGILL); 656 } 657 658 regs->tf_esp = (int)sfp; 659 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 660 regs->tf_eflags &= ~PSL_T; 661 regs->tf_cs = _ucodesel; 662 regs->tf_ds = _udatasel; 663 regs->tf_es = _udatasel; 664 regs->tf_fs = _udatasel; 665 regs->tf_ss = _udatasel; 666 PROC_LOCK(p); 667} 668 669/* 670 * System call to cleanup state after a signal 671 * has been taken. Reset signal mask and 672 * stack state from context left by sendsig (above). 673 * Return to previous pc and psl as specified by 674 * context left by sendsig. Check carefully to 675 * make sure that the user has not modified the 676 * state to gain improper privileges. 677 * 678 * MPSAFE 679 */ 680#ifdef COMPAT_43 681int 682osigreturn(td, uap) 683 struct thread *td; 684 struct osigreturn_args /* { 685 struct osigcontext *sigcntxp; 686 } */ *uap; 687{ 688 struct osigcontext sc; 689 struct trapframe *regs; 690 struct osigcontext *scp; 691 struct proc *p = td->td_proc; 692 int eflags, error; 693 694 regs = td->td_frame; 695 error = copyin(uap->sigcntxp, &sc, sizeof(sc)); 696 if (error != 0) 697 return (error); 698 scp = ≻ 699 eflags = scp->sc_ps; 700 if (eflags & PSL_VM) { 701 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 702 struct vm86_kernel *vm86; 703 704 /* 705 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 706 * set up the vm86 area, and we can't enter vm86 mode. 707 */ 708 if (td->td_pcb->pcb_ext == 0) 709 return (EINVAL); 710 vm86 = &td->td_pcb->pcb_ext->ext_vm86; 711 if (vm86->vm86_inited == 0) 712 return (EINVAL); 713 714 /* Go back to user mode if both flags are set. */ 715 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 716 trapsignal(p, SIGBUS, 0); 717 718 if (vm86->vm86_has_vme) { 719 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 720 (eflags & VME_USERCHANGE) | PSL_VM; 721 } else { 722 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 723 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | 724 (eflags & VM_USERCHANGE) | PSL_VM; 725 } 726 tf->tf_vm86_ds = scp->sc_ds; 727 tf->tf_vm86_es = scp->sc_es; 728 tf->tf_vm86_fs = scp->sc_fs; 729 tf->tf_vm86_gs = scp->sc_gs; 730 tf->tf_ds = _udatasel; 731 tf->tf_es = _udatasel; 732 tf->tf_fs = _udatasel; 733 } else { 734 /* 735 * Don't allow users to change privileged or reserved flags. 736 */ 737 /* 738 * XXX do allow users to change the privileged flag PSL_RF. 739 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 740 * should sometimes set it there too. tf_eflags is kept in 741 * the signal context during signal handling and there is no 742 * other place to remember it, so the PSL_RF bit may be 743 * corrupted by the signal handler without us knowing. 744 * Corruption of the PSL_RF bit at worst causes one more or 745 * one less debugger trap, so allowing it is fairly harmless. 746 */ 747 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 748 return (EINVAL); 749 } 750 751 /* 752 * Don't allow users to load a valid privileged %cs. Let the 753 * hardware check for invalid selectors, excess privilege in 754 * other selectors, invalid %eip's and invalid %esp's. 755 */ 756 if (!CS_SECURE(scp->sc_cs)) { 757 trapsignal(p, SIGBUS, T_PROTFLT); 758 return (EINVAL); 759 } 760 regs->tf_ds = scp->sc_ds; 761 regs->tf_es = scp->sc_es; 762 regs->tf_fs = scp->sc_fs; 763 } 764 765 /* Restore remaining registers. */ 766 regs->tf_eax = scp->sc_eax; 767 regs->tf_ebx = scp->sc_ebx; 768 regs->tf_ecx = scp->sc_ecx; 769 regs->tf_edx = scp->sc_edx; 770 regs->tf_esi = scp->sc_esi; 771 regs->tf_edi = scp->sc_edi; 772 regs->tf_cs = scp->sc_cs; 773 regs->tf_ss = scp->sc_ss; 774 regs->tf_isp = scp->sc_isp; 775 regs->tf_ebp = scp->sc_fp; 776 regs->tf_esp = scp->sc_sp; 777 regs->tf_eip = scp->sc_pc; 778 regs->tf_eflags = eflags; 779 780 PROC_LOCK(p); 781#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 782 if (scp->sc_onstack & 1) 783 p->p_sigstk.ss_flags |= SS_ONSTACK; 784 else 785 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 786#endif 787 SIGSETOLD(p->p_sigmask, scp->sc_mask); 788 SIG_CANTMASK(p->p_sigmask); 789 signotify(p); 790 PROC_UNLOCK(p); 791 return (EJUSTRETURN); 792} 793#endif /* COMPAT_43 */ 794 795#ifdef COMPAT_FREEBSD4 796/* 797 * MPSAFE 798 */ 799int 800freebsd4_sigreturn(td, uap) 801 struct thread *td; 802 struct freebsd4_sigreturn_args /* { 803 const ucontext4 *sigcntxp; 804 } */ *uap; 805{ 806 struct ucontext4 uc; 807 struct proc *p = td->td_proc; 808 struct trapframe *regs; 809 const struct ucontext4 *ucp; 810 int cs, eflags, error; 811 812 error = copyin(uap->sigcntxp, &uc, sizeof(uc)); 813 if (error != 0) 814 return (error); 815 ucp = &uc; 816 regs = td->td_frame; 817 eflags = ucp->uc_mcontext.mc_eflags; 818 if (eflags & PSL_VM) { 819 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 820 struct vm86_kernel *vm86; 821 822 /* 823 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 824 * set up the vm86 area, and we can't enter vm86 mode. 825 */ 826 if (td->td_pcb->pcb_ext == 0) 827 return (EINVAL); 828 vm86 = &td->td_pcb->pcb_ext->ext_vm86; 829 if (vm86->vm86_inited == 0) 830 return (EINVAL); 831 832 /* Go back to user mode if both flags are set. */ 833 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 834 trapsignal(p, SIGBUS, 0); 835 836 if (vm86->vm86_has_vme) { 837 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 838 (eflags & VME_USERCHANGE) | PSL_VM; 839 } else { 840 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 841 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | 842 (eflags & VM_USERCHANGE) | PSL_VM; 843 } 844 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe)); 845 tf->tf_eflags = eflags; 846 tf->tf_vm86_ds = tf->tf_ds; 847 tf->tf_vm86_es = tf->tf_es; 848 tf->tf_vm86_fs = tf->tf_fs; 849 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs; 850 tf->tf_ds = _udatasel; 851 tf->tf_es = _udatasel; 852 tf->tf_fs = _udatasel; 853 } else { 854 /* 855 * Don't allow users to change privileged or reserved flags. 856 */ 857 /* 858 * XXX do allow users to change the privileged flag PSL_RF. 859 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 860 * should sometimes set it there too. tf_eflags is kept in 861 * the signal context during signal handling and there is no 862 * other place to remember it, so the PSL_RF bit may be 863 * corrupted by the signal handler without us knowing. 864 * Corruption of the PSL_RF bit at worst causes one more or 865 * one less debugger trap, so allowing it is fairly harmless. 866 */ 867 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 868 printf("freebsd4_sigreturn: eflags = 0x%x\n", eflags); 869 return (EINVAL); 870 } 871 872 /* 873 * Don't allow users to load a valid privileged %cs. Let the 874 * hardware check for invalid selectors, excess privilege in 875 * other selectors, invalid %eip's and invalid %esp's. 876 */ 877 cs = ucp->uc_mcontext.mc_cs; 878 if (!CS_SECURE(cs)) { 879 printf("freebsd4_sigreturn: cs = 0x%x\n", cs); 880 trapsignal(p, SIGBUS, T_PROTFLT); 881 return (EINVAL); 882 } 883 884 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs)); 885 } 886 887 PROC_LOCK(p); 888#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 889 if (ucp->uc_mcontext.mc_onstack & 1) 890 p->p_sigstk.ss_flags |= SS_ONSTACK; 891 else 892 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 893#endif 894 895 p->p_sigmask = ucp->uc_sigmask; 896 SIG_CANTMASK(p->p_sigmask); 897 signotify(p); 898 PROC_UNLOCK(p); 899 return (EJUSTRETURN); 900} 901#endif /* COMPAT_FREEBSD4 */ 902 903/* 904 * MPSAFE 905 */ 906int 907sigreturn(td, uap) 908 struct thread *td; 909 struct sigreturn_args /* { 910 const __ucontext *sigcntxp; 911 } */ *uap; 912{ 913 ucontext_t uc; 914 struct proc *p = td->td_proc; 915 struct trapframe *regs; 916 const ucontext_t *ucp; 917 int cs, eflags, error, ret; 918 919 error = copyin(uap->sigcntxp, &uc, sizeof(uc)); 920 if (error != 0) 921 return (error); 922 ucp = &uc; 923 regs = td->td_frame; 924 eflags = ucp->uc_mcontext.mc_eflags; 925 if (eflags & PSL_VM) { 926 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 927 struct vm86_kernel *vm86; 928 929 /* 930 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 931 * set up the vm86 area, and we can't enter vm86 mode. 932 */ 933 if (td->td_pcb->pcb_ext == 0) 934 return (EINVAL); 935 vm86 = &td->td_pcb->pcb_ext->ext_vm86; 936 if (vm86->vm86_inited == 0) 937 return (EINVAL); 938 939 /* Go back to user mode if both flags are set. */ 940 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 941 trapsignal(p, SIGBUS, 0); 942 943 if (vm86->vm86_has_vme) { 944 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 945 (eflags & VME_USERCHANGE) | PSL_VM; 946 } else { 947 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 948 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | 949 (eflags & VM_USERCHANGE) | PSL_VM; 950 } 951 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe)); 952 tf->tf_eflags = eflags; 953 tf->tf_vm86_ds = tf->tf_ds; 954 tf->tf_vm86_es = tf->tf_es; 955 tf->tf_vm86_fs = tf->tf_fs; 956 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs; 957 tf->tf_ds = _udatasel; 958 tf->tf_es = _udatasel; 959 tf->tf_fs = _udatasel; 960 } else { 961 /* 962 * Don't allow users to change privileged or reserved flags. 963 */ 964 /* 965 * XXX do allow users to change the privileged flag PSL_RF. 966 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 967 * should sometimes set it there too. tf_eflags is kept in 968 * the signal context during signal handling and there is no 969 * other place to remember it, so the PSL_RF bit may be 970 * corrupted by the signal handler without us knowing. 971 * Corruption of the PSL_RF bit at worst causes one more or 972 * one less debugger trap, so allowing it is fairly harmless. 973 */ 974 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 975 printf("sigreturn: eflags = 0x%x\n", eflags); 976 return (EINVAL); 977 } 978 979 /* 980 * Don't allow users to load a valid privileged %cs. Let the 981 * hardware check for invalid selectors, excess privilege in 982 * other selectors, invalid %eip's and invalid %esp's. 983 */ 984 cs = ucp->uc_mcontext.mc_cs; 985 if (!CS_SECURE(cs)) { 986 printf("sigreturn: cs = 0x%x\n", cs); 987 trapsignal(p, SIGBUS, T_PROTFLT); 988 return (EINVAL); 989 } 990 991 ret = set_fpcontext(td, &ucp->uc_mcontext); 992 if (ret != 0) 993 return (ret); 994 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs)); 995 } 996 997 PROC_LOCK(p); 998#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 999 if (ucp->uc_mcontext.mc_onstack & 1) 1000 p->p_sigstk.ss_flags |= SS_ONSTACK; 1001 else 1002 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 1003#endif 1004 1005 p->p_sigmask = ucp->uc_sigmask; 1006 SIG_CANTMASK(p->p_sigmask); 1007 signotify(p); 1008 PROC_UNLOCK(p); 1009 return (EJUSTRETURN); 1010} 1011 1012/* 1013 * Machine dependent boot() routine 1014 * 1015 * I haven't seen anything to put here yet 1016 * Possibly some stuff might be grafted back here from boot() 1017 */ 1018void 1019cpu_boot(int howto) 1020{ 1021} 1022 1023/* 1024 * Shutdown the CPU as much as possible 1025 */ 1026void 1027cpu_halt(void) 1028{ 1029 for (;;) 1030 __asm__ ("hlt"); 1031} 1032 1033/* 1034 * Hook to idle the CPU when possible. In the SMP case we default to 1035 * off because a halted cpu will not currently pick up a new thread in the 1036 * run queue until the next timer tick. If turned on this will result in 1037 * approximately a 4.2% loss in real time performance in buildworld tests 1038 * (but improves user and sys times oddly enough), and saves approximately 1039 * 5% in power consumption on an idle machine (tests w/2xCPU 1.1GHz P3). 1040 * 1041 * XXX we need to have a cpu mask of idle cpus and generate an IPI or 1042 * otherwise generate some sort of interrupt to wake up cpus sitting in HLT. 1043 * Then we can have our cake and eat it too. 1044 */ 1045#ifdef SMP 1046static int cpu_idle_hlt = 0; 1047#else 1048static int cpu_idle_hlt = 1; 1049#endif 1050SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, 1051 &cpu_idle_hlt, 0, "Idle loop HLT enable"); 1052 1053/* 1054 * Note that we have to be careful here to avoid a race between checking 1055 * sched_runnable() and actually halting. If we don't do this, we may waste 1056 * the time between calling hlt and the next interrupt even though there 1057 * is a runnable process. 1058 */ 1059void 1060cpu_idle(void) 1061{ 1062 if (cpu_idle_hlt) { 1063 disable_intr(); 1064 if (sched_runnable()) { 1065 enable_intr(); 1066 } else { 1067 /* 1068 * we must absolutely guarentee that hlt is the 1069 * absolute next instruction after sti or we 1070 * introduce a timing window. 1071 */ 1072 __asm __volatile("sti; hlt"); 1073 } 1074 } 1075} 1076 1077/* 1078 * Clear registers on exec 1079 */ 1080void 1081exec_setregs(td, entry, stack, ps_strings) 1082 struct thread *td; 1083 u_long entry; 1084 u_long stack; 1085 u_long ps_strings; 1086{ 1087 struct trapframe *regs = td->td_frame; 1088 struct pcb *pcb = td->td_pcb; 1089 1090 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */ 1091 pcb->pcb_gs = _udatasel; 1092 load_gs(_udatasel); 1093 1094 if (td->td_proc->p_md.md_ldt) 1095 user_ldt_free(td); 1096 1097 bzero((char *)regs, sizeof(struct trapframe)); 1098 regs->tf_eip = entry; 1099 regs->tf_esp = stack; 1100 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T); 1101 regs->tf_ss = _udatasel; 1102 regs->tf_ds = _udatasel; 1103 regs->tf_es = _udatasel; 1104 regs->tf_fs = _udatasel; 1105 regs->tf_cs = _ucodesel; 1106 1107 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */ 1108 regs->tf_ebx = ps_strings; 1109 1110 /* 1111 * Reset the hardware debug registers if they were in use. 1112 * They won't have any meaning for the newly exec'd process. 1113 */ 1114 if (pcb->pcb_flags & PCB_DBREGS) { 1115 pcb->pcb_dr0 = 0; 1116 pcb->pcb_dr1 = 0; 1117 pcb->pcb_dr2 = 0; 1118 pcb->pcb_dr3 = 0; 1119 pcb->pcb_dr6 = 0; 1120 pcb->pcb_dr7 = 0; 1121 if (pcb == PCPU_GET(curpcb)) { 1122 /* 1123 * Clear the debug registers on the running 1124 * CPU, otherwise they will end up affecting 1125 * the next process we switch to. 1126 */ 1127 reset_dbregs(); 1128 } 1129 pcb->pcb_flags &= ~PCB_DBREGS; 1130 } 1131 1132 /* 1133 * Initialize the math emulator (if any) for the current process. 1134 * Actually, just clear the bit that says that the emulator has 1135 * been initialized. Initialization is delayed until the process 1136 * traps to the emulator (if it is done at all) mainly because 1137 * emulators don't provide an entry point for initialization. 1138 */ 1139 td->td_pcb->pcb_flags &= ~FP_SOFTFP; 1140 1141 /* 1142 * Arrange to trap the next npx or `fwait' instruction (see npx.c 1143 * for why fwait must be trapped at least if there is an npx or an 1144 * emulator). This is mainly to handle the case where npx0 is not 1145 * configured, since the npx routines normally set up the trap 1146 * otherwise. It should be done only at boot time, but doing it 1147 * here allows modifying `npx_exists' for testing the emulator on 1148 * systems with an npx. 1149 */ 1150 load_cr0(rcr0() | CR0_MP | CR0_TS); 1151 1152 /* Initialize the npx (if any) for the current process. */ 1153 /* 1154 * XXX the above load_cr0() also initializes it and is a layering 1155 * violation if NPX is configured. It drops the npx partially 1156 * and this would be fatal if we were interrupted now, and decided 1157 * to force the state to the pcb, and checked the invariant 1158 * (CR0_TS clear) if and only if PCPU_GET(fpcurthread) != NULL). 1159 * ALL of this can happen except the check. The check used to 1160 * happen and be fatal later when we didn't complete the drop 1161 * before returning to user mode. This should be fixed properly 1162 * soon. 1163 */ 1164 fpstate_drop(td); 1165 1166 /* 1167 * XXX - Linux emulator 1168 * Make sure sure edx is 0x0 on entry. Linux binaries depend 1169 * on it. 1170 */ 1171 td->td_retval[1] = 0; 1172} 1173 1174void 1175cpu_setregs(void) 1176{ 1177 unsigned int cr0; 1178 1179 cr0 = rcr0(); 1180#ifdef SMP 1181 cr0 |= CR0_NE; /* Done by npxinit() */ 1182#endif 1183 cr0 |= CR0_MP | CR0_TS; /* Done at every execve() too. */ 1184#ifndef I386_CPU 1185 cr0 |= CR0_WP | CR0_AM; 1186#endif 1187 load_cr0(cr0); 1188 load_gs(_udatasel); 1189} 1190 1191static int 1192sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS) 1193{ 1194 int error; 1195 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 1196 req); 1197 if (!error && req->newptr) 1198 resettodr(); 1199 return (error); 1200} 1201 1202SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 1203 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 1204 1205SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, 1206 CTLFLAG_RW, &disable_rtc_set, 0, ""); 1207 1208SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo, 1209 CTLFLAG_RD, &bootinfo, bootinfo, ""); 1210 1211SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, 1212 CTLFLAG_RW, &wall_cmos_clock, 0, ""); 1213 1214u_long bootdev; /* not a dev_t - encoding is different */ 1215SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev, 1216 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in dev_t format)"); 1217 1218/* 1219 * Initialize 386 and configure to run kernel 1220 */ 1221 1222/* 1223 * Initialize segments & interrupt table 1224 */ 1225 1226int _default_ldt; 1227union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */ 1228static struct gate_descriptor idt0[NIDT]; 1229struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 1230union descriptor ldt[NLDT]; /* local descriptor table */ 1231#ifdef SMP 1232/* table descriptors - used to load tables by microp */ 1233struct region_descriptor r_gdt, r_idt; 1234#endif 1235 1236int private_tss; /* flag indicating private tss */ 1237 1238#if defined(I586_CPU) && !defined(NO_F00F_HACK) 1239extern int has_f00f_bug; 1240#endif 1241 1242static struct i386tss dblfault_tss; 1243static char dblfault_stack[PAGE_SIZE]; 1244 1245extern struct user *proc0uarea; 1246extern vm_offset_t proc0kstack; 1247 1248 1249/* software prototypes -- in more palatable form */ 1250struct soft_segment_descriptor gdt_segs[] = { 1251/* GNULL_SEL 0 Null Descriptor */ 1252{ 0x0, /* segment base address */ 1253 0x0, /* length */ 1254 0, /* segment type */ 1255 0, /* segment descriptor priority level */ 1256 0, /* segment descriptor present */ 1257 0, 0, 1258 0, /* default 32 vs 16 bit size */ 1259 0 /* limit granularity (byte/page units)*/ }, 1260/* GCODE_SEL 1 Code Descriptor for kernel */ 1261{ 0x0, /* segment base address */ 1262 0xfffff, /* length - all address space */ 1263 SDT_MEMERA, /* segment type */ 1264 0, /* segment descriptor priority level */ 1265 1, /* segment descriptor present */ 1266 0, 0, 1267 1, /* default 32 vs 16 bit size */ 1268 1 /* limit granularity (byte/page units)*/ }, 1269/* GDATA_SEL 2 Data Descriptor for kernel */ 1270{ 0x0, /* segment base address */ 1271 0xfffff, /* length - all address space */ 1272 SDT_MEMRWA, /* segment type */ 1273 0, /* segment descriptor priority level */ 1274 1, /* segment descriptor present */ 1275 0, 0, 1276 1, /* default 32 vs 16 bit size */ 1277 1 /* limit granularity (byte/page units)*/ }, 1278/* GPRIV_SEL 3 SMP Per-Processor Private Data Descriptor */ 1279{ 0x0, /* segment base address */ 1280 0xfffff, /* length - all address space */ 1281 SDT_MEMRWA, /* segment type */ 1282 0, /* segment descriptor priority level */ 1283 1, /* segment descriptor present */ 1284 0, 0, 1285 1, /* default 32 vs 16 bit size */ 1286 1 /* limit granularity (byte/page units)*/ }, 1287/* GPROC0_SEL 4 Proc 0 Tss Descriptor */ 1288{ 1289 0x0, /* segment base address */ 1290 sizeof(struct i386tss)-1,/* length - all address space */ 1291 SDT_SYS386TSS, /* segment type */ 1292 0, /* segment descriptor priority level */ 1293 1, /* segment descriptor present */ 1294 0, 0, 1295 0, /* unused - default 32 vs 16 bit size */ 1296 0 /* limit granularity (byte/page units)*/ }, 1297/* GLDT_SEL 5 LDT Descriptor */ 1298{ (int) ldt, /* segment base address */ 1299 sizeof(ldt)-1, /* length - all address space */ 1300 SDT_SYSLDT, /* segment type */ 1301 SEL_UPL, /* segment descriptor priority level */ 1302 1, /* segment descriptor present */ 1303 0, 0, 1304 0, /* unused - default 32 vs 16 bit size */ 1305 0 /* limit granularity (byte/page units)*/ }, 1306/* GUSERLDT_SEL 6 User LDT Descriptor per process */ 1307{ (int) ldt, /* segment base address */ 1308 (512 * sizeof(union descriptor)-1), /* length */ 1309 SDT_SYSLDT, /* segment type */ 1310 0, /* segment descriptor priority level */ 1311 1, /* segment descriptor present */ 1312 0, 0, 1313 0, /* unused - default 32 vs 16 bit size */ 1314 0 /* limit granularity (byte/page units)*/ }, 1315/* GTGATE_SEL 7 Null Descriptor - Placeholder */ 1316{ 0x0, /* segment base address */ 1317 0x0, /* length - all address space */ 1318 0, /* segment type */ 1319 0, /* segment descriptor priority level */ 1320 0, /* segment descriptor present */ 1321 0, 0, 1322 0, /* default 32 vs 16 bit size */ 1323 0 /* limit granularity (byte/page units)*/ }, 1324/* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */ 1325{ 0x400, /* segment base address */ 1326 0xfffff, /* length */ 1327 SDT_MEMRWA, /* segment type */ 1328 0, /* segment descriptor priority level */ 1329 1, /* segment descriptor present */ 1330 0, 0, 1331 1, /* default 32 vs 16 bit size */ 1332 1 /* limit granularity (byte/page units)*/ }, 1333/* GPANIC_SEL 9 Panic Tss Descriptor */ 1334{ (int) &dblfault_tss, /* segment base address */ 1335 sizeof(struct i386tss)-1,/* length - all address space */ 1336 SDT_SYS386TSS, /* segment type */ 1337 0, /* segment descriptor priority level */ 1338 1, /* segment descriptor present */ 1339 0, 0, 1340 0, /* unused - default 32 vs 16 bit size */ 1341 0 /* limit granularity (byte/page units)*/ }, 1342/* GBIOSCODE32_SEL 10 BIOS 32-bit interface (32bit Code) */ 1343{ 0, /* segment base address (overwritten) */ 1344 0xfffff, /* length */ 1345 SDT_MEMERA, /* segment type */ 1346 0, /* segment descriptor priority level */ 1347 1, /* segment descriptor present */ 1348 0, 0, 1349 0, /* default 32 vs 16 bit size */ 1350 1 /* limit granularity (byte/page units)*/ }, 1351/* GBIOSCODE16_SEL 11 BIOS 32-bit interface (16bit Code) */ 1352{ 0, /* segment base address (overwritten) */ 1353 0xfffff, /* length */ 1354 SDT_MEMERA, /* segment type */ 1355 0, /* segment descriptor priority level */ 1356 1, /* segment descriptor present */ 1357 0, 0, 1358 0, /* default 32 vs 16 bit size */ 1359 1 /* limit granularity (byte/page units)*/ }, 1360/* GBIOSDATA_SEL 12 BIOS 32-bit interface (Data) */ 1361{ 0, /* segment base address (overwritten) */ 1362 0xfffff, /* length */ 1363 SDT_MEMRWA, /* segment type */ 1364 0, /* segment descriptor priority level */ 1365 1, /* segment descriptor present */ 1366 0, 0, 1367 1, /* default 32 vs 16 bit size */ 1368 1 /* limit granularity (byte/page units)*/ }, 1369/* GBIOSUTIL_SEL 13 BIOS 16-bit interface (Utility) */ 1370{ 0, /* segment base address (overwritten) */ 1371 0xfffff, /* length */ 1372 SDT_MEMRWA, /* segment type */ 1373 0, /* segment descriptor priority level */ 1374 1, /* segment descriptor present */ 1375 0, 0, 1376 0, /* default 32 vs 16 bit size */ 1377 1 /* limit granularity (byte/page units)*/ }, 1378/* GBIOSARGS_SEL 14 BIOS 16-bit interface (Arguments) */ 1379{ 0, /* segment base address (overwritten) */ 1380 0xfffff, /* length */ 1381 SDT_MEMRWA, /* segment type */ 1382 0, /* segment descriptor priority level */ 1383 1, /* segment descriptor present */ 1384 0, 0, 1385 0, /* default 32 vs 16 bit size */ 1386 1 /* limit granularity (byte/page units)*/ }, 1387}; 1388 1389static struct soft_segment_descriptor ldt_segs[] = { 1390 /* Null Descriptor - overwritten by call gate */ 1391{ 0x0, /* segment base address */ 1392 0x0, /* length - all address space */ 1393 0, /* segment type */ 1394 0, /* segment descriptor priority level */ 1395 0, /* segment descriptor present */ 1396 0, 0, 1397 0, /* default 32 vs 16 bit size */ 1398 0 /* limit granularity (byte/page units)*/ }, 1399 /* Null Descriptor - overwritten by call gate */ 1400{ 0x0, /* segment base address */ 1401 0x0, /* length - all address space */ 1402 0, /* segment type */ 1403 0, /* segment descriptor priority level */ 1404 0, /* segment descriptor present */ 1405 0, 0, 1406 0, /* default 32 vs 16 bit size */ 1407 0 /* limit granularity (byte/page units)*/ }, 1408 /* Null Descriptor - overwritten by call gate */ 1409{ 0x0, /* segment base address */ 1410 0x0, /* length - all address space */ 1411 0, /* segment type */ 1412 0, /* segment descriptor priority level */ 1413 0, /* segment descriptor present */ 1414 0, 0, 1415 0, /* default 32 vs 16 bit size */ 1416 0 /* limit granularity (byte/page units)*/ }, 1417 /* Code Descriptor for user */ 1418{ 0x0, /* segment base address */ 1419 0xfffff, /* length - all address space */ 1420 SDT_MEMERA, /* segment type */ 1421 SEL_UPL, /* segment descriptor priority level */ 1422 1, /* segment descriptor present */ 1423 0, 0, 1424 1, /* default 32 vs 16 bit size */ 1425 1 /* limit granularity (byte/page units)*/ }, 1426 /* Null Descriptor - overwritten by call gate */ 1427{ 0x0, /* segment base address */ 1428 0x0, /* length - all address space */ 1429 0, /* segment type */ 1430 0, /* segment descriptor priority level */ 1431 0, /* segment descriptor present */ 1432 0, 0, 1433 0, /* default 32 vs 16 bit size */ 1434 0 /* limit granularity (byte/page units)*/ }, 1435 /* Data Descriptor for user */ 1436{ 0x0, /* segment base address */ 1437 0xfffff, /* length - all address space */ 1438 SDT_MEMRWA, /* segment type */ 1439 SEL_UPL, /* segment descriptor priority level */ 1440 1, /* segment descriptor present */ 1441 0, 0, 1442 1, /* default 32 vs 16 bit size */ 1443 1 /* limit granularity (byte/page units)*/ }, 1444}; 1445 1446void 1447setidt(idx, func, typ, dpl, selec) 1448 int idx; 1449 inthand_t *func; 1450 int typ; 1451 int dpl; 1452 int selec; 1453{ 1454 struct gate_descriptor *ip; 1455 1456 ip = idt + idx; 1457 ip->gd_looffset = (int)func; 1458 ip->gd_selector = selec; 1459 ip->gd_stkcpy = 0; 1460 ip->gd_xx = 0; 1461 ip->gd_type = typ; 1462 ip->gd_dpl = dpl; 1463 ip->gd_p = 1; 1464 ip->gd_hioffset = ((int)func)>>16 ; 1465} 1466 1467#define IDTVEC(name) __CONCAT(X,name) 1468 1469extern inthand_t 1470 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 1471 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 1472 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 1473 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 1474 IDTVEC(xmm), IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall); 1475 1476void 1477sdtossd(sd, ssd) 1478 struct segment_descriptor *sd; 1479 struct soft_segment_descriptor *ssd; 1480{ 1481 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 1482 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 1483 ssd->ssd_type = sd->sd_type; 1484 ssd->ssd_dpl = sd->sd_dpl; 1485 ssd->ssd_p = sd->sd_p; 1486 ssd->ssd_def32 = sd->sd_def32; 1487 ssd->ssd_gran = sd->sd_gran; 1488} 1489 1490#define PHYSMAP_SIZE (2 * 8) 1491 1492/* 1493 * Populate the (physmap) array with base/bound pairs describing the 1494 * available physical memory in the system, then test this memory and 1495 * build the phys_avail array describing the actually-available memory. 1496 * 1497 * If we cannot accurately determine the physical memory map, then use 1498 * value from the 0xE801 call, and failing that, the RTC. 1499 * 1500 * Total memory size may be set by the kernel environment variable 1501 * hw.physmem or the compile-time define MAXMEM. 1502 */ 1503static void 1504getmemsize(int first) 1505{ 1506 int i, physmap_idx, pa_indx; 1507 u_int basemem, extmem; 1508 struct vm86frame vmf; 1509 struct vm86context vmc; 1510 vm_offset_t pa, physmap[PHYSMAP_SIZE]; 1511 pt_entry_t *pte; 1512 char *cp; 1513 struct bios_smap *smap; 1514 1515 bzero(&vmf, sizeof(struct vm86frame)); 1516 bzero(physmap, sizeof(physmap)); 1517 basemem = 0; 1518 1519 /* 1520 * map page 1 R/W into the kernel page table so we can use it 1521 * as a buffer. The kernel will unmap this page later. 1522 */ 1523 pmap_kenter(KERNBASE + (1 << PAGE_SHIFT), 1); 1524 1525 /* 1526 * get memory map with INT 15:E820 1527 */ 1528 vmc.npages = 0; 1529 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT)); 1530 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di); 1531 1532 physmap_idx = 0; 1533 vmf.vmf_ebx = 0; 1534 do { 1535 vmf.vmf_eax = 0xE820; 1536 vmf.vmf_edx = SMAP_SIG; 1537 vmf.vmf_ecx = sizeof(struct bios_smap); 1538 i = vm86_datacall(0x15, &vmf, &vmc); 1539 if (i || vmf.vmf_eax != SMAP_SIG) 1540 break; 1541 if (boothowto & RB_VERBOSE) 1542 printf("SMAP type=%02x base=%08x %08x len=%08x %08x\n", 1543 smap->type, 1544 *(u_int32_t *)((char *)&smap->base + 4), 1545 (u_int32_t)smap->base, 1546 *(u_int32_t *)((char *)&smap->length + 4), 1547 (u_int32_t)smap->length); 1548 1549 if (smap->type != 0x01) 1550 goto next_run; 1551 1552 if (smap->length == 0) 1553 goto next_run; 1554 1555 if (smap->base >= 0xffffffff) { 1556 printf("%uK of memory above 4GB ignored\n", 1557 (u_int)(smap->length / 1024)); 1558 goto next_run; 1559 } 1560 1561 for (i = 0; i <= physmap_idx; i += 2) { 1562 if (smap->base < physmap[i + 1]) { 1563 if (boothowto & RB_VERBOSE) 1564 printf( 1565 "Overlapping or non-montonic memory region, ignoring second region\n"); 1566 goto next_run; 1567 } 1568 } 1569 1570 if (smap->base == physmap[physmap_idx + 1]) { 1571 physmap[physmap_idx + 1] += smap->length; 1572 goto next_run; 1573 } 1574 1575 physmap_idx += 2; 1576 if (physmap_idx == PHYSMAP_SIZE) { 1577 printf( 1578 "Too many segments in the physical address map, giving up\n"); 1579 break; 1580 } 1581 physmap[physmap_idx] = smap->base; 1582 physmap[physmap_idx + 1] = smap->base + smap->length; 1583next_run: ; 1584 } while (vmf.vmf_ebx != 0); 1585 1586 /* 1587 * Perform "base memory" related probes & setup 1588 */ 1589 for (i = 0; i <= physmap_idx; i += 2) { 1590 if (physmap[i] == 0x00000000) { 1591 basemem = physmap[i + 1] / 1024; 1592 break; 1593 } 1594 } 1595 1596 /* Fall back to the old compatibility function for base memory */ 1597 if (basemem == 0) { 1598 vm86_intcall(0x12, &vmf); 1599 basemem = vmf.vmf_ax; 1600 } 1601 1602 if (basemem > 640) { 1603 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", 1604 basemem); 1605 basemem = 640; 1606 } 1607 1608 /* 1609 * XXX if biosbasemem is now < 640, there is a `hole' 1610 * between the end of base memory and the start of 1611 * ISA memory. The hole may be empty or it may 1612 * contain BIOS code or data. Map it read/write so 1613 * that the BIOS can write to it. (Memory from 0 to 1614 * the physical end of the kernel is mapped read-only 1615 * to begin with and then parts of it are remapped. 1616 * The parts that aren't remapped form holes that 1617 * remain read-only and are unused by the kernel. 1618 * The base memory area is below the physical end of 1619 * the kernel and right now forms a read-only hole. 1620 * The part of it from PAGE_SIZE to 1621 * (trunc_page(biosbasemem * 1024) - 1) will be 1622 * remapped and used by the kernel later.) 1623 * 1624 * This code is similar to the code used in 1625 * pmap_mapdev, but since no memory needs to be 1626 * allocated we simply change the mapping. 1627 */ 1628 for (pa = trunc_page(basemem * 1024); 1629 pa < ISA_HOLE_START; pa += PAGE_SIZE) 1630 pmap_kenter(KERNBASE + pa, pa); 1631 1632 /* 1633 * if basemem != 640, map pages r/w into vm86 page table so 1634 * that the bios can scribble on it. 1635 */ 1636 pte = (pt_entry_t *)vm86paddr; 1637 for (i = basemem / 4; i < 160; i++) 1638 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; 1639 1640 if (physmap[1] != 0) 1641 goto physmap_done; 1642 1643 /* 1644 * If we failed above, try memory map with INT 15:E801 1645 */ 1646 vmf.vmf_ax = 0xE801; 1647 if (vm86_intcall(0x15, &vmf) == 0) { 1648 extmem = vmf.vmf_cx + vmf.vmf_dx * 64; 1649 } else { 1650#if 0 1651 vmf.vmf_ah = 0x88; 1652 vm86_intcall(0x15, &vmf); 1653 extmem = vmf.vmf_ax; 1654#else 1655 /* 1656 * Prefer the RTC value for extended memory. 1657 */ 1658 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8); 1659#endif 1660 } 1661 1662 /* 1663 * Special hack for chipsets that still remap the 384k hole when 1664 * there's 16MB of memory - this really confuses people that 1665 * are trying to use bus mastering ISA controllers with the 1666 * "16MB limit"; they only have 16MB, but the remapping puts 1667 * them beyond the limit. 1668 * 1669 * If extended memory is between 15-16MB (16-17MB phys address range), 1670 * chop it to 15MB. 1671 */ 1672 if ((extmem > 15 * 1024) && (extmem < 16 * 1024)) 1673 extmem = 15 * 1024; 1674 1675 physmap[0] = 0; 1676 physmap[1] = basemem * 1024; 1677 physmap_idx = 2; 1678 physmap[physmap_idx] = 0x100000; 1679 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024; 1680 1681physmap_done: 1682 /* 1683 * Now, physmap contains a map of physical memory. 1684 */ 1685 1686#ifdef SMP 1687 /* make hole for AP bootstrap code */ 1688 physmap[1] = mp_bootaddress(physmap[1] / 1024); 1689 1690 /* look for the MP hardware - needed for apic addresses */ 1691 i386_mp_probe(); 1692#endif 1693 1694 /* 1695 * Maxmem isn't the "maximum memory", it's one larger than the 1696 * highest page of the physical address space. It should be 1697 * called something like "Maxphyspage". We may adjust this 1698 * based on ``hw.physmem'' and the results of the memory test. 1699 */ 1700 Maxmem = atop(physmap[physmap_idx + 1]); 1701 1702#ifdef MAXMEM 1703 Maxmem = MAXMEM / 4; 1704#endif 1705 1706 /* 1707 * hw.physmem is a size in bytes; we also allow k, m, and g suffixes 1708 * for the appropriate modifiers. This overrides MAXMEM. 1709 */ 1710 if ((cp = getenv("hw.physmem")) != NULL) { 1711 u_int64_t AllowMem, sanity; 1712 char *ep; 1713 1714 sanity = AllowMem = strtouq(cp, &ep, 0); 1715 if ((ep != cp) && (*ep != 0)) { 1716 switch(*ep) { 1717 case 'g': 1718 case 'G': 1719 AllowMem <<= 10; 1720 case 'm': 1721 case 'M': 1722 AllowMem <<= 10; 1723 case 'k': 1724 case 'K': 1725 AllowMem <<= 10; 1726 break; 1727 default: 1728 AllowMem = sanity = 0; 1729 } 1730 if (AllowMem < sanity) 1731 AllowMem = 0; 1732 } 1733 if (AllowMem == 0) 1734 printf("Ignoring invalid memory size of '%s'\n", cp); 1735 else 1736 Maxmem = atop(AllowMem); 1737 freeenv(cp); 1738 } 1739 1740 if (atop(physmap[physmap_idx + 1]) != Maxmem && 1741 (boothowto & RB_VERBOSE)) 1742 printf("Physical memory use set to %ldK\n", Maxmem * 4); 1743 1744 /* 1745 * If Maxmem has been increased beyond what the system has detected, 1746 * extend the last memory segment to the new limit. 1747 */ 1748 if (atop(physmap[physmap_idx + 1]) < Maxmem) 1749 physmap[physmap_idx + 1] = ptoa(Maxmem); 1750 1751 /* call pmap initialization to make new kernel address space */ 1752 pmap_bootstrap(first, 0); 1753 1754 /* 1755 * Size up each available chunk of physical memory. 1756 */ 1757 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 1758 pa_indx = 0; 1759 phys_avail[pa_indx++] = physmap[0]; 1760 phys_avail[pa_indx] = physmap[0]; 1761 pte = CMAP1; 1762 1763 /* 1764 * physmap is in bytes, so when converting to page boundaries, 1765 * round up the start address and round down the end address. 1766 */ 1767 for (i = 0; i <= physmap_idx; i += 2) { 1768 vm_offset_t end; 1769 1770 end = ptoa(Maxmem); 1771 if (physmap[i + 1] < end) 1772 end = trunc_page(physmap[i + 1]); 1773 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 1774 int tmp, page_bad; 1775 int *ptr = (int *)CADDR1; 1776 1777 /* 1778 * block out kernel memory as not available. 1779 */ 1780 if (pa >= 0x100000 && pa < first) 1781 continue; 1782 1783 page_bad = FALSE; 1784 1785 /* 1786 * map page into kernel: valid, read/write,non-cacheable 1787 */ 1788 *pte = pa | PG_V | PG_RW | PG_N; 1789 invltlb(); 1790 1791 tmp = *(int *)ptr; 1792 /* 1793 * Test for alternating 1's and 0's 1794 */ 1795 *(volatile int *)ptr = 0xaaaaaaaa; 1796 if (*(volatile int *)ptr != 0xaaaaaaaa) { 1797 page_bad = TRUE; 1798 } 1799 /* 1800 * Test for alternating 0's and 1's 1801 */ 1802 *(volatile int *)ptr = 0x55555555; 1803 if (*(volatile int *)ptr != 0x55555555) { 1804 page_bad = TRUE; 1805 } 1806 /* 1807 * Test for all 1's 1808 */ 1809 *(volatile int *)ptr = 0xffffffff; 1810 if (*(volatile int *)ptr != 0xffffffff) { 1811 page_bad = TRUE; 1812 } 1813 /* 1814 * Test for all 0's 1815 */ 1816 *(volatile int *)ptr = 0x0; 1817 if (*(volatile int *)ptr != 0x0) { 1818 page_bad = TRUE; 1819 } 1820 /* 1821 * Restore original value. 1822 */ 1823 *(int *)ptr = tmp; 1824 1825 /* 1826 * Adjust array of valid/good pages. 1827 */ 1828 if (page_bad == TRUE) { 1829 continue; 1830 } 1831 /* 1832 * If this good page is a continuation of the 1833 * previous set of good pages, then just increase 1834 * the end pointer. Otherwise start a new chunk. 1835 * Note that "end" points one higher than end, 1836 * making the range >= start and < end. 1837 * If we're also doing a speculative memory 1838 * test and we at or past the end, bump up Maxmem 1839 * so that we keep going. The first bad page 1840 * will terminate the loop. 1841 */ 1842 if (phys_avail[pa_indx] == pa) { 1843 phys_avail[pa_indx] += PAGE_SIZE; 1844 } else { 1845 pa_indx++; 1846 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1847 printf( 1848 "Too many holes in the physical address space, giving up\n"); 1849 pa_indx--; 1850 break; 1851 } 1852 phys_avail[pa_indx++] = pa; /* start */ 1853 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1854 } 1855 physmem++; 1856 } 1857 } 1858 *pte = 0; 1859 invltlb(); 1860 1861 /* 1862 * XXX 1863 * The last chunk must contain at least one page plus the message 1864 * buffer to avoid complicating other code (message buffer address 1865 * calculation, etc.). 1866 */ 1867 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1868 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { 1869 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1870 phys_avail[pa_indx--] = 0; 1871 phys_avail[pa_indx--] = 0; 1872 } 1873 1874 Maxmem = atop(phys_avail[pa_indx]); 1875 1876 /* Trim off space for the message buffer. */ 1877 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); 1878 1879 avail_end = phys_avail[pa_indx]; 1880} 1881 1882void 1883init386(first) 1884 int first; 1885{ 1886 struct gate_descriptor *gdp; 1887 int gsel_tss, metadata_missing, off, x; 1888#ifndef SMP 1889 /* table descriptors - used to load tables by microp */ 1890 struct region_descriptor r_gdt, r_idt; 1891#endif 1892 struct pcpu *pc; 1893 1894 proc0.p_uarea = proc0uarea; 1895 thread0.td_kstack = proc0kstack; 1896 thread0.td_pcb = (struct pcb *) 1897 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; 1898 atdevbase = ISA_HOLE_START + KERNBASE; 1899 1900 /* 1901 * This may be done better later if it gets more high level 1902 * components in it. If so just link td->td_proc here. 1903 */ 1904 proc_linkup(&proc0, &ksegrp0, &kse0, &thread0); 1905 1906 metadata_missing = 0; 1907 if (bootinfo.bi_modulep) { 1908 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE; 1909 preload_bootstrap_relocate(KERNBASE); 1910 } else { 1911 metadata_missing = 1; 1912 } 1913 if (envmode == 1) 1914 kern_envp = static_env; 1915 else if (bootinfo.bi_envp) 1916 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE; 1917 1918 /* Init basic tunables, hz etc */ 1919 init_param1(); 1920 1921 /* 1922 * make gdt memory segments, the code segment goes up to end of the 1923 * page with etext in it, the data segment goes to the end of 1924 * the address space 1925 */ 1926 /* 1927 * XXX text protection is temporarily (?) disabled. The limit was 1928 * i386_btop(round_page(etext)) - 1. 1929 */ 1930 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1); 1931 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1); 1932#ifdef SMP 1933 pc = &SMP_prvspace[0].pcpu; 1934 gdt_segs[GPRIV_SEL].ssd_limit = 1935 atop(sizeof(struct privatespace) - 1); 1936#else 1937 pc = &__pcpu; 1938 gdt_segs[GPRIV_SEL].ssd_limit = 1939 atop(sizeof(struct pcpu) - 1); 1940#endif 1941 gdt_segs[GPRIV_SEL].ssd_base = (int) pc; 1942 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss; 1943 1944 for (x = 0; x < NGDT; x++) { 1945#ifdef BDE_DEBUGGER 1946 /* avoid overwriting db entries with APM ones */ 1947 if (x >= GAPMCODE32_SEL && x <= GAPMDATA_SEL) 1948 continue; 1949#endif 1950 ssdtosd(&gdt_segs[x], &gdt[x].sd); 1951 } 1952 1953 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1954 r_gdt.rd_base = (int) gdt; 1955 lgdt(&r_gdt); 1956 1957 pcpu_init(pc, 0, sizeof(struct pcpu)); 1958 PCPU_SET(prvspace, pc); 1959 PCPU_SET(curthread, &thread0); 1960 1961 /* 1962 * Initialize mutexes. 1963 * 1964 * icu_lock: in order to allow an interrupt to occur in a critical 1965 * section, to set pcpu->ipending (etc...) properly, we 1966 * must be able to get the icu lock, so it can't be 1967 * under witness. 1968 */ 1969 mutex_init(); 1970 mtx_init(&clock_lock, "clk", NULL, MTX_SPIN | MTX_RECURSE); 1971 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS); 1972 1973 /* make ldt memory segments */ 1974 /* 1975 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it 1976 * should be spelled ...MAX_USER... 1977 */ 1978 ldt_segs[LUCODE_SEL].ssd_limit = atop(VM_MAXUSER_ADDRESS - 1); 1979 ldt_segs[LUDATA_SEL].ssd_limit = atop(VM_MAXUSER_ADDRESS - 1); 1980 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++) 1981 ssdtosd(&ldt_segs[x], &ldt[x].sd); 1982 1983 _default_ldt = GSEL(GLDT_SEL, SEL_KPL); 1984 lldt(_default_ldt); 1985 PCPU_SET(currentldt, _default_ldt); 1986 1987 /* exceptions */ 1988 for (x = 0; x < NIDT; x++) 1989 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, 1990 GSEL(GCODE_SEL, SEL_KPL)); 1991 setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, 1992 GSEL(GCODE_SEL, SEL_KPL)); 1993 setidt(1, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL, 1994 GSEL(GCODE_SEL, SEL_KPL)); 1995 setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, 1996 GSEL(GCODE_SEL, SEL_KPL)); 1997 setidt(3, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL, 1998 GSEL(GCODE_SEL, SEL_KPL)); 1999 setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, 2000 GSEL(GCODE_SEL, SEL_KPL)); 2001 setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, 2002 GSEL(GCODE_SEL, SEL_KPL)); 2003 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, 2004 GSEL(GCODE_SEL, SEL_KPL)); 2005 setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL 2006 , GSEL(GCODE_SEL, SEL_KPL)); 2007 setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL)); 2008 setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, 2009 GSEL(GCODE_SEL, SEL_KPL)); 2010 setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, 2011 GSEL(GCODE_SEL, SEL_KPL)); 2012 setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, 2013 GSEL(GCODE_SEL, SEL_KPL)); 2014 setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, 2015 GSEL(GCODE_SEL, SEL_KPL)); 2016 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, 2017 GSEL(GCODE_SEL, SEL_KPL)); 2018 setidt(14, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, 2019 GSEL(GCODE_SEL, SEL_KPL)); 2020 setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, 2021 GSEL(GCODE_SEL, SEL_KPL)); 2022 setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, 2023 GSEL(GCODE_SEL, SEL_KPL)); 2024 setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, 2025 GSEL(GCODE_SEL, SEL_KPL)); 2026 setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, 2027 GSEL(GCODE_SEL, SEL_KPL)); 2028 setidt(19, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL, 2029 GSEL(GCODE_SEL, SEL_KPL)); 2030 setidt(0x80, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL, 2031 GSEL(GCODE_SEL, SEL_KPL)); 2032 2033 r_idt.rd_limit = sizeof(idt0) - 1; 2034 r_idt.rd_base = (int) idt; 2035 lidt(&r_idt); 2036 2037 /* 2038 * Initialize the console before we print anything out. 2039 */ 2040 cninit(); 2041 2042 if (metadata_missing) 2043 printf("WARNING: loader(8) metadata is missing!\n"); 2044 2045#ifdef DEV_ISA 2046 isa_defaultirq(); 2047#endif 2048 2049#ifdef DDB 2050 kdb_init(); 2051 if (boothowto & RB_KDB) 2052 Debugger("Boot flags requested debugger"); 2053#endif 2054 2055 finishidentcpu(); /* Final stage of CPU initialization */ 2056 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, 2057 GSEL(GCODE_SEL, SEL_KPL)); 2058 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, 2059 GSEL(GCODE_SEL, SEL_KPL)); 2060 initializecpu(); /* Initialize CPU registers */ 2061 2062 /* make an initial tss so cpu can get interrupt stack on syscall! */ 2063 /* Note: -16 is so we can grow the trapframe if we came from vm86 */ 2064 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack + 2065 KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb) - 16); 2066 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); 2067 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 2068 private_tss = 0; 2069 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd); 2070 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 2071 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16); 2072 ltr(gsel_tss); 2073 2074 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = 2075 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)]; 2076 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = 2077 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); 2078 dblfault_tss.tss_cr3 = (int)IdlePTD; 2079 dblfault_tss.tss_eip = (int)dblfault_handler; 2080 dblfault_tss.tss_eflags = PSL_KERNEL; 2081 dblfault_tss.tss_ds = dblfault_tss.tss_es = 2082 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); 2083 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL); 2084 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); 2085 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); 2086 2087 vm86_initialize(); 2088 getmemsize(first); 2089 init_param2(physmem); 2090 2091 /* now running on new page tables, configured,and u/iom is accessible */ 2092 2093 /* Map the message buffer. */ 2094 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) 2095 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off); 2096 2097 msgbufinit(msgbufp, MSGBUF_SIZE); 2098 2099 /* make a call gate to reenter kernel with */ 2100 gdp = &ldt[LSYS5CALLS_SEL].gd; 2101 2102 x = (int) &IDTVEC(lcall_syscall); 2103 gdp->gd_looffset = x; 2104 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL); 2105 gdp->gd_stkcpy = 1; 2106 gdp->gd_type = SDT_SYS386CGT; 2107 gdp->gd_dpl = SEL_UPL; 2108 gdp->gd_p = 1; 2109 gdp->gd_hioffset = x >> 16; 2110 2111 /* XXX does this work? */ 2112 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL]; 2113 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL]; 2114 2115 /* transfer to user mode */ 2116 2117 _ucodesel = LSEL(LUCODE_SEL, SEL_UPL); 2118 _udatasel = LSEL(LUDATA_SEL, SEL_UPL); 2119 2120 /* setup proc 0's pcb */ 2121 thread0.td_pcb->pcb_flags = 0; /* XXXKSE */ 2122 thread0.td_pcb->pcb_cr3 = (int)IdlePTD; 2123 thread0.td_pcb->pcb_ext = 0; 2124 thread0.td_frame = &proc0_tf; 2125} 2126 2127void 2128cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) 2129{ 2130} 2131 2132#if defined(I586_CPU) && !defined(NO_F00F_HACK) 2133static void f00f_hack(void *unused); 2134SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL); 2135 2136static void 2137f00f_hack(void *unused) { 2138 struct gate_descriptor *new_idt; 2139#ifndef SMP 2140 struct region_descriptor r_idt; 2141#endif 2142 vm_offset_t tmp; 2143 2144 if (!has_f00f_bug) 2145 return; 2146 2147 GIANT_REQUIRED; 2148 2149 printf("Intel Pentium detected, installing workaround for F00F bug\n"); 2150 2151 r_idt.rd_limit = sizeof(idt0) - 1; 2152 2153 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2); 2154 if (tmp == 0) 2155 panic("kmem_alloc returned 0"); 2156 if (((unsigned int)tmp & (PAGE_SIZE-1)) != 0) 2157 panic("kmem_alloc returned non-page-aligned memory"); 2158 /* Put the first seven entries in the lower page */ 2159 new_idt = (struct gate_descriptor*)(tmp + PAGE_SIZE - (7*8)); 2160 bcopy(idt, new_idt, sizeof(idt0)); 2161 r_idt.rd_base = (int)new_idt; 2162 lidt(&r_idt); 2163 idt = new_idt; 2164 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE, 2165 VM_PROT_READ, FALSE) != KERN_SUCCESS) 2166 panic("vm_map_protect failed"); 2167 return; 2168} 2169#endif /* defined(I586_CPU) && !NO_F00F_HACK */ 2170 2171int 2172ptrace_set_pc(struct thread *td, unsigned long addr) 2173{ 2174 td->td_frame->tf_eip = addr; 2175 return (0); 2176} 2177 2178int 2179ptrace_single_step(struct thread *td) 2180{ 2181 td->td_frame->tf_eflags |= PSL_T; 2182 return (0); 2183} 2184 2185int 2186fill_regs(struct thread *td, struct reg *regs) 2187{ 2188 struct pcb *pcb; 2189 struct trapframe *tp; 2190 2191 tp = td->td_frame; 2192 regs->r_fs = tp->tf_fs; 2193 regs->r_es = tp->tf_es; 2194 regs->r_ds = tp->tf_ds; 2195 regs->r_edi = tp->tf_edi; 2196 regs->r_esi = tp->tf_esi; 2197 regs->r_ebp = tp->tf_ebp; 2198 regs->r_ebx = tp->tf_ebx; 2199 regs->r_edx = tp->tf_edx; 2200 regs->r_ecx = tp->tf_ecx; 2201 regs->r_eax = tp->tf_eax; 2202 regs->r_eip = tp->tf_eip; 2203 regs->r_cs = tp->tf_cs; 2204 regs->r_eflags = tp->tf_eflags; 2205 regs->r_esp = tp->tf_esp; 2206 regs->r_ss = tp->tf_ss; 2207 pcb = td->td_pcb; 2208 regs->r_gs = pcb->pcb_gs; 2209 return (0); 2210} 2211 2212int 2213set_regs(struct thread *td, struct reg *regs) 2214{ 2215 struct pcb *pcb; 2216 struct trapframe *tp; 2217 2218 tp = td->td_frame; 2219 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) || 2220 !CS_SECURE(regs->r_cs)) 2221 return (EINVAL); 2222 tp->tf_fs = regs->r_fs; 2223 tp->tf_es = regs->r_es; 2224 tp->tf_ds = regs->r_ds; 2225 tp->tf_edi = regs->r_edi; 2226 tp->tf_esi = regs->r_esi; 2227 tp->tf_ebp = regs->r_ebp; 2228 tp->tf_ebx = regs->r_ebx; 2229 tp->tf_edx = regs->r_edx; 2230 tp->tf_ecx = regs->r_ecx; 2231 tp->tf_eax = regs->r_eax; 2232 tp->tf_eip = regs->r_eip; 2233 tp->tf_cs = regs->r_cs; 2234 tp->tf_eflags = regs->r_eflags; 2235 tp->tf_esp = regs->r_esp; 2236 tp->tf_ss = regs->r_ss; 2237 pcb = td->td_pcb; 2238 pcb->pcb_gs = regs->r_gs; 2239 return (0); 2240} 2241 2242#ifdef CPU_ENABLE_SSE 2243static void 2244fill_fpregs_xmm(sv_xmm, sv_87) 2245 struct savexmm *sv_xmm; 2246 struct save87 *sv_87; 2247{ 2248 register struct env87 *penv_87 = &sv_87->sv_env; 2249 register struct envxmm *penv_xmm = &sv_xmm->sv_env; 2250 int i; 2251 2252 bzero(sv_87, sizeof(*sv_87)); 2253 2254 /* FPU control/status */ 2255 penv_87->en_cw = penv_xmm->en_cw; 2256 penv_87->en_sw = penv_xmm->en_sw; 2257 penv_87->en_tw = penv_xmm->en_tw; 2258 penv_87->en_fip = penv_xmm->en_fip; 2259 penv_87->en_fcs = penv_xmm->en_fcs; 2260 penv_87->en_opcode = penv_xmm->en_opcode; 2261 penv_87->en_foo = penv_xmm->en_foo; 2262 penv_87->en_fos = penv_xmm->en_fos; 2263 2264 /* FPU registers */ 2265 for (i = 0; i < 8; ++i) 2266 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc; 2267} 2268 2269static void 2270set_fpregs_xmm(sv_87, sv_xmm) 2271 struct save87 *sv_87; 2272 struct savexmm *sv_xmm; 2273{ 2274 register struct env87 *penv_87 = &sv_87->sv_env; 2275 register struct envxmm *penv_xmm = &sv_xmm->sv_env; 2276 int i; 2277 2278 /* FPU control/status */ 2279 penv_xmm->en_cw = penv_87->en_cw; 2280 penv_xmm->en_sw = penv_87->en_sw; 2281 penv_xmm->en_tw = penv_87->en_tw; 2282 penv_xmm->en_fip = penv_87->en_fip; 2283 penv_xmm->en_fcs = penv_87->en_fcs; 2284 penv_xmm->en_opcode = penv_87->en_opcode; 2285 penv_xmm->en_foo = penv_87->en_foo; 2286 penv_xmm->en_fos = penv_87->en_fos; 2287 2288 /* FPU registers */ 2289 for (i = 0; i < 8; ++i) 2290 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i]; 2291} 2292#endif /* CPU_ENABLE_SSE */ 2293 2294int 2295fill_fpregs(struct thread *td, struct fpreg *fpregs) 2296{ 2297#ifdef CPU_ENABLE_SSE 2298 if (cpu_fxsr) { 2299 fill_fpregs_xmm(&td->td_pcb->pcb_save.sv_xmm, 2300 (struct save87 *)fpregs); 2301 return (0); 2302 } 2303#endif /* CPU_ENABLE_SSE */ 2304 bcopy(&td->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs); 2305 return (0); 2306} 2307 2308int 2309set_fpregs(struct thread *td, struct fpreg *fpregs) 2310{ 2311#ifdef CPU_ENABLE_SSE 2312 if (cpu_fxsr) { 2313 set_fpregs_xmm((struct save87 *)fpregs, 2314 &td->td_pcb->pcb_save.sv_xmm); 2315 return (0); 2316 } 2317#endif /* CPU_ENABLE_SSE */ 2318 bcopy(fpregs, &td->td_pcb->pcb_save.sv_87, sizeof *fpregs); 2319 return (0); 2320} 2321 2322/* 2323 * Get machine context. 2324 */ 2325void 2326get_mcontext(struct thread *td, mcontext_t *mcp) 2327{ 2328 struct trapframe *tp; 2329 2330 tp = td->td_frame; 2331 2332 mcp->mc_onstack = sigonstack(tp->tf_esp); 2333 mcp->mc_gs = td->td_pcb->pcb_gs; 2334 mcp->mc_fs = tp->tf_fs; 2335 mcp->mc_es = tp->tf_es; 2336 mcp->mc_ds = tp->tf_ds; 2337 mcp->mc_edi = tp->tf_edi; 2338 mcp->mc_esi = tp->tf_esi; 2339 mcp->mc_ebp = tp->tf_ebp; 2340 mcp->mc_isp = tp->tf_isp; 2341 mcp->mc_ebx = tp->tf_ebx; 2342 mcp->mc_edx = tp->tf_edx; 2343 mcp->mc_ecx = tp->tf_ecx; 2344 mcp->mc_eax = tp->tf_eax; 2345 mcp->mc_eip = tp->tf_eip; 2346 mcp->mc_cs = tp->tf_cs; 2347 mcp->mc_eflags = tp->tf_eflags; 2348 mcp->mc_esp = tp->tf_esp; 2349 mcp->mc_ss = tp->tf_ss; 2350 mcp->mc_len = sizeof(*mcp); 2351 get_fpcontext(td, mcp); 2352} 2353 2354/* 2355 * Set machine context. 2356 * 2357 * However, we don't set any but the user modifiable flags, and we won't 2358 * touch the cs selector. 2359 */ 2360int 2361set_mcontext(struct thread *td, const mcontext_t *mcp) 2362{ 2363 struct trapframe *tp; 2364 int eflags, ret; 2365 2366 tp = td->td_frame; 2367 if (mcp->mc_len != sizeof(*mcp)) 2368 return (EINVAL); 2369 eflags = (mcp->mc_eflags & PSL_USERCHANGE) | 2370 (tp->tf_eflags & ~PSL_USERCHANGE); 2371 if ((ret = set_fpcontext(td, mcp)) == 0) { 2372 tp->tf_fs = mcp->mc_fs; 2373 tp->tf_es = mcp->mc_es; 2374 tp->tf_ds = mcp->mc_ds; 2375 tp->tf_edi = mcp->mc_edi; 2376 tp->tf_esi = mcp->mc_esi; 2377 tp->tf_ebp = mcp->mc_ebp; 2378 tp->tf_ebx = mcp->mc_ebx; 2379 tp->tf_edx = mcp->mc_edx; 2380 tp->tf_ecx = mcp->mc_ecx; 2381 tp->tf_eax = mcp->mc_eax; 2382 tp->tf_eip = mcp->mc_eip; 2383 tp->tf_eflags = eflags; 2384 tp->tf_esp = mcp->mc_esp; 2385 tp->tf_ss = mcp->mc_ss; 2386 td->td_pcb->pcb_gs = mcp->mc_gs; 2387 ret = 0; 2388 } 2389 return (ret); 2390} 2391 2392static void 2393get_fpcontext(struct thread *td, mcontext_t *mcp) 2394{ 2395#ifndef DEV_NPX 2396 mcp->mc_fpformat = _MC_FPFMT_NODEV; 2397 mcp->mc_ownedfp = _MC_FPOWNED_NONE; 2398#else 2399 union savefpu *addr; 2400 2401 /* 2402 * XXX mc_fpstate might be misaligned, since its declaration is not 2403 * unportabilized using __attribute__((aligned(16))) like the 2404 * declaration of struct savemm, and anyway, alignment doesn't work 2405 * for auto variables since we don't use gcc's pessimal stack 2406 * alignment. Work around this by abusing the spare fields after 2407 * mcp->mc_fpstate. 2408 * 2409 * XXX unpessimize most cases by only aligning when fxsave might be 2410 * called, although this requires knowing too much about 2411 * npxgetregs()'s internals. 2412 */ 2413 addr = (union savefpu *)&mcp->mc_fpstate; 2414 if (td == PCPU_GET(fpcurthread) && 2415#ifdef CPU_ENABLE_SSE 2416 cpu_fxsr && 2417#endif 2418 ((uintptr_t)(void *)addr & 0xF)) { 2419 do 2420 addr = (void *)((char *)addr + 4); 2421 while ((uintptr_t)(void *)addr & 0xF); 2422 } 2423 mcp->mc_ownedfp = npxgetregs(td, addr); 2424 if (addr != (union savefpu *)&mcp->mc_fpstate) { 2425 bcopy(addr, &mcp->mc_fpstate, sizeof(mcp->mc_fpstate)); 2426 bzero(&mcp->mc_spare2, sizeof(mcp->mc_spare2)); 2427 } 2428 mcp->mc_fpformat = npxformat(); 2429#endif 2430} 2431 2432static int 2433set_fpcontext(struct thread *td, const mcontext_t *mcp) 2434{ 2435 union savefpu *addr; 2436 2437 if (mcp->mc_fpformat == _MC_FPFMT_NODEV) 2438 return (0); 2439 else if (mcp->mc_fpformat != _MC_FPFMT_387 && 2440 mcp->mc_fpformat != _MC_FPFMT_XMM) 2441 return (EINVAL); 2442 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) 2443 /* We don't care what state is left in the FPU or PCB. */ 2444 fpstate_drop(td); 2445 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU || 2446 mcp->mc_ownedfp == _MC_FPOWNED_PCB) { 2447 /* XXX align as above. */ 2448 addr = (union savefpu *)&mcp->mc_fpstate; 2449 if (td == PCPU_GET(fpcurthread) && 2450#ifdef CPU_ENABLE_SSE 2451 cpu_fxsr && 2452#endif 2453 ((uintptr_t)(void *)addr & 0xF)) { 2454 do 2455 addr = (void *)((char *)addr + 4); 2456 while ((uintptr_t)(void *)addr & 0xF); 2457 bcopy(&mcp->mc_fpstate, addr, sizeof(mcp->mc_fpstate)); 2458 } 2459#ifdef DEV_NPX 2460 /* 2461 * XXX we violate the dubious requirement that npxsetregs() 2462 * be called with interrupts disabled. 2463 */ 2464 npxsetregs(td, addr); 2465#endif 2466 /* 2467 * Don't bother putting things back where they were in the 2468 * misaligned case, since we know that the caller won't use 2469 * them again. 2470 */ 2471 } else 2472 return (EINVAL); 2473 return (0); 2474} 2475 2476static void 2477fpstate_drop(struct thread *td) 2478{ 2479 register_t s; 2480 2481 s = intr_disable(); 2482#ifdef DEV_NPX 2483 if (PCPU_GET(fpcurthread) == td) 2484 npxdrop(); 2485#endif 2486 /* 2487 * XXX force a full drop of the npx. The above only drops it if we 2488 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case. 2489 * 2490 * XXX I don't much like npxgetregs()'s semantics of doing a full 2491 * drop. Dropping only to the pcb matches fnsave's behaviour. 2492 * We only need to drop to !PCB_INITDONE in sendsig(). But 2493 * sendsig() is the only caller of npxgetregs()... perhaps we just 2494 * have too many layers. 2495 */ 2496 curthread->td_pcb->pcb_flags &= ~PCB_NPXINITDONE; 2497 intr_restore(s); 2498} 2499 2500int 2501fill_dbregs(struct thread *td, struct dbreg *dbregs) 2502{ 2503 struct pcb *pcb; 2504 2505 if (td == NULL) { 2506 dbregs->dr[0] = rdr0(); 2507 dbregs->dr[1] = rdr1(); 2508 dbregs->dr[2] = rdr2(); 2509 dbregs->dr[3] = rdr3(); 2510 dbregs->dr[4] = rdr4(); 2511 dbregs->dr[5] = rdr5(); 2512 dbregs->dr[6] = rdr6(); 2513 dbregs->dr[7] = rdr7(); 2514 } else { 2515 pcb = td->td_pcb; 2516 dbregs->dr[0] = pcb->pcb_dr0; 2517 dbregs->dr[1] = pcb->pcb_dr1; 2518 dbregs->dr[2] = pcb->pcb_dr2; 2519 dbregs->dr[3] = pcb->pcb_dr3; 2520 dbregs->dr[4] = 0; 2521 dbregs->dr[5] = 0; 2522 dbregs->dr[6] = pcb->pcb_dr6; 2523 dbregs->dr[7] = pcb->pcb_dr7; 2524 } 2525 return (0); 2526} 2527 2528int 2529set_dbregs(struct thread *td, struct dbreg *dbregs) 2530{ 2531 struct pcb *pcb; 2532 int i; 2533 u_int32_t mask1, mask2; 2534 2535 if (td == NULL) { 2536 load_dr0(dbregs->dr[0]); 2537 load_dr1(dbregs->dr[1]); 2538 load_dr2(dbregs->dr[2]); 2539 load_dr3(dbregs->dr[3]); 2540 load_dr4(dbregs->dr[4]); 2541 load_dr5(dbregs->dr[5]); 2542 load_dr6(dbregs->dr[6]); 2543 load_dr7(dbregs->dr[7]); 2544 } else { 2545 /* 2546 * Don't let an illegal value for dr7 get set. Specifically, 2547 * check for undefined settings. Setting these bit patterns 2548 * result in undefined behaviour and can lead to an unexpected 2549 * TRCTRAP. 2550 */ 2551 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 8; 2552 i++, mask1 <<= 2, mask2 <<= 2) 2553 if ((dbregs->dr[7] & mask1) == mask2) 2554 return (EINVAL); 2555 2556 pcb = td->td_pcb; 2557 2558 /* 2559 * Don't let a process set a breakpoint that is not within the 2560 * process's address space. If a process could do this, it 2561 * could halt the system by setting a breakpoint in the kernel 2562 * (if ddb was enabled). Thus, we need to check to make sure 2563 * that no breakpoints are being enabled for addresses outside 2564 * process's address space, unless, perhaps, we were called by 2565 * uid 0. 2566 * 2567 * XXX - what about when the watched area of the user's 2568 * address space is written into from within the kernel 2569 * ... wouldn't that still cause a breakpoint to be generated 2570 * from within kernel mode? 2571 */ 2572 2573 if (suser(td) != 0) { 2574 if (dbregs->dr[7] & 0x3) { 2575 /* dr0 is enabled */ 2576 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS) 2577 return (EINVAL); 2578 } 2579 2580 if (dbregs->dr[7] & (0x3<<2)) { 2581 /* dr1 is enabled */ 2582 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS) 2583 return (EINVAL); 2584 } 2585 2586 if (dbregs->dr[7] & (0x3<<4)) { 2587 /* dr2 is enabled */ 2588 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS) 2589 return (EINVAL); 2590 } 2591 2592 if (dbregs->dr[7] & (0x3<<6)) { 2593 /* dr3 is enabled */ 2594 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS) 2595 return (EINVAL); 2596 } 2597 } 2598 2599 pcb->pcb_dr0 = dbregs->dr[0]; 2600 pcb->pcb_dr1 = dbregs->dr[1]; 2601 pcb->pcb_dr2 = dbregs->dr[2]; 2602 pcb->pcb_dr3 = dbregs->dr[3]; 2603 pcb->pcb_dr6 = dbregs->dr[6]; 2604 pcb->pcb_dr7 = dbregs->dr[7]; 2605 2606 pcb->pcb_flags |= PCB_DBREGS; 2607 } 2608 2609 return (0); 2610} 2611 2612/* 2613 * Return > 0 if a hardware breakpoint has been hit, and the 2614 * breakpoint was in user space. Return 0, otherwise. 2615 */ 2616int 2617user_dbreg_trap(void) 2618{ 2619 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */ 2620 u_int32_t bp; /* breakpoint bits extracted from dr6 */ 2621 int nbp; /* number of breakpoints that triggered */ 2622 caddr_t addr[4]; /* breakpoint addresses */ 2623 int i; 2624 2625 dr7 = rdr7(); 2626 if ((dr7 & 0x000000ff) == 0) { 2627 /* 2628 * all GE and LE bits in the dr7 register are zero, 2629 * thus the trap couldn't have been caused by the 2630 * hardware debug registers 2631 */ 2632 return 0; 2633 } 2634 2635 nbp = 0; 2636 dr6 = rdr6(); 2637 bp = dr6 & 0x0000000f; 2638 2639 if (!bp) { 2640 /* 2641 * None of the breakpoint bits are set meaning this 2642 * trap was not caused by any of the debug registers 2643 */ 2644 return 0; 2645 } 2646 2647 /* 2648 * at least one of the breakpoints were hit, check to see 2649 * which ones and if any of them are user space addresses 2650 */ 2651 2652 if (bp & 0x01) { 2653 addr[nbp++] = (caddr_t)rdr0(); 2654 } 2655 if (bp & 0x02) { 2656 addr[nbp++] = (caddr_t)rdr1(); 2657 } 2658 if (bp & 0x04) { 2659 addr[nbp++] = (caddr_t)rdr2(); 2660 } 2661 if (bp & 0x08) { 2662 addr[nbp++] = (caddr_t)rdr3(); 2663 } 2664 2665 for (i=0; i<nbp; i++) { 2666 if (addr[i] < 2667 (caddr_t)VM_MAXUSER_ADDRESS) { 2668 /* 2669 * addr[i] is in user space 2670 */ 2671 return nbp; 2672 } 2673 } 2674 2675 /* 2676 * None of the breakpoints are in user space. 2677 */ 2678 return 0; 2679} 2680 2681 2682#ifndef DDB 2683void 2684Debugger(const char *msg) 2685{ 2686 printf("Debugger(\"%s\") called.\n", msg); 2687} 2688#endif /* no DDB */ 2689 2690#ifdef DDB 2691 2692/* 2693 * Provide inb() and outb() as functions. They are normally only 2694 * available as macros calling inlined functions, thus cannot be 2695 * called inside DDB. 2696 * 2697 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 2698 */ 2699 2700#undef inb 2701#undef outb 2702 2703/* silence compiler warnings */ 2704u_char inb(u_int); 2705void outb(u_int, u_char); 2706 2707u_char 2708inb(u_int port) 2709{ 2710 u_char data; 2711 /* 2712 * We use %%dx and not %1 here because i/o is done at %dx and not at 2713 * %edx, while gcc generates inferior code (movw instead of movl) 2714 * if we tell it to load (u_short) port. 2715 */ 2716 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 2717 return (data); 2718} 2719 2720void 2721outb(u_int port, u_char data) 2722{ 2723 u_char al; 2724 /* 2725 * Use an unnecessary assignment to help gcc's register allocator. 2726 * This make a large difference for gcc-1.40 and a tiny difference 2727 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 2728 * best results. gcc-2.6.0 can't handle this. 2729 */ 2730 al = data; 2731 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 2732} 2733 2734#endif /* DDB */ 2735