machdep.c revision 106605
1/*- 2 * Copyright (c) 1992 Terrence R. Lambert. 3 * Copyright (c) 1982, 1987, 1990 The Regents of the University of California. 4 * All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)machdep.c 7.4 (Berkeley) 6/3/91 38 * $FreeBSD: head/sys/amd64/amd64/machdep.c 106605 2002-11-07 23:57:17Z tmm $ 39 */ 40 41#include "opt_atalk.h" 42#include "opt_compat.h" 43#include "opt_cpu.h" 44#include "opt_ddb.h" 45#include "opt_inet.h" 46#include "opt_ipx.h" 47#include "opt_isa.h" 48#include "opt_maxmem.h" 49#include "opt_msgbuf.h" 50#include "opt_npx.h" 51#include "opt_perfmon.h" 52#include "opt_kstack_pages.h" 53 54#include <sys/param.h> 55#include <sys/systm.h> 56#include <sys/sysproto.h> 57#include <sys/signalvar.h> 58#include <sys/imgact.h> 59#include <sys/kernel.h> 60#include <sys/ktr.h> 61#include <sys/linker.h> 62#include <sys/lock.h> 63#include <sys/malloc.h> 64#include <sys/mutex.h> 65#include <sys/pcpu.h> 66#include <sys/proc.h> 67#include <sys/bio.h> 68#include <sys/buf.h> 69#include <sys/reboot.h> 70#include <sys/callout.h> 71#include <sys/msgbuf.h> 72#include <sys/sched.h> 73#include <sys/sysent.h> 74#include <sys/sysctl.h> 75#include <sys/ucontext.h> 76#include <sys/vmmeter.h> 77#include <sys/bus.h> 78#include <sys/eventhandler.h> 79 80#include <vm/vm.h> 81#include <vm/vm_param.h> 82#include <vm/vm_kern.h> 83#include <vm/vm_object.h> 84#include <vm/vm_page.h> 85#include <vm/vm_map.h> 86#include <vm/vm_pager.h> 87#include <vm/vm_extern.h> 88 89#include <sys/user.h> 90#include <sys/exec.h> 91#include <sys/cons.h> 92 93#include <ddb/ddb.h> 94 95#include <net/netisr.h> 96 97#include <machine/cpu.h> 98#include <machine/cputypes.h> 99#include <machine/reg.h> 100#include <machine/clock.h> 101#include <machine/specialreg.h> 102#include <machine/bootinfo.h> 103#include <machine/md_var.h> 104#include <machine/pc/bios.h> 105#include <machine/pcb_ext.h> /* pcb.h included via sys/user.h */ 106#include <machine/proc.h> 107#ifdef PERFMON 108#include <machine/perfmon.h> 109#endif 110#ifdef SMP 111#include <machine/privatespace.h> 112#include <machine/smp.h> 113#endif 114 115#include <i386/isa/icu.h> 116#include <i386/isa/intr_machdep.h> 117#include <isa/rtc.h> 118#include <machine/vm86.h> 119#include <sys/ptrace.h> 120#include <machine/sigframe.h> 121 122extern void init386(int first); 123extern void dblfault_handler(void); 124 125extern void printcpuinfo(void); /* XXX header file */ 126extern void earlysetcpuclass(void); /* same header file */ 127extern void finishidentcpu(void); 128extern void panicifcpuunsupported(void); 129extern void initializecpu(void); 130 131#define CS_SECURE(cs) (ISPL(cs) == SEL_UPL) 132#define EFL_SECURE(ef, oef) ((((ef) ^ (oef)) & ~PSL_USERCHANGE) == 0) 133 134#if !defined(CPU_ENABLE_SSE) && defined(I686_CPU) 135#define CPU_ENABLE_SSE 136#endif 137#if defined(CPU_DISABLE_SSE) 138#undef CPU_ENABLE_SSE 139#endif 140 141static void cpu_startup(void *); 142static void fpstate_drop(struct thread *td); 143static void get_fpcontext(struct thread *td, mcontext_t *mcp); 144static int set_fpcontext(struct thread *td, const mcontext_t *mcp); 145#ifdef CPU_ENABLE_SSE 146static void set_fpregs_xmm(struct save87 *, struct savexmm *); 147static void fill_fpregs_xmm(struct savexmm *, struct save87 *); 148#endif /* CPU_ENABLE_SSE */ 149SYSINIT(cpu, SI_SUB_CPU, SI_ORDER_FIRST, cpu_startup, NULL) 150 151int _udatasel, _ucodesel; 152u_int atdevbase; 153 154#if defined(SWTCH_OPTIM_STATS) 155extern int swtch_optim_stats; 156SYSCTL_INT(_debug, OID_AUTO, swtch_optim_stats, 157 CTLFLAG_RD, &swtch_optim_stats, 0, ""); 158SYSCTL_INT(_debug, OID_AUTO, tlb_flush_count, 159 CTLFLAG_RD, &tlb_flush_count, 0, ""); 160#endif 161 162int cold = 1; 163 164#ifdef COMPAT_43 165static void osendsig(sig_t catcher, int sig, sigset_t *mask, u_long code); 166#endif 167#ifdef COMPAT_FREEBSD4 168static void freebsd4_sendsig(sig_t catcher, int sig, sigset_t *mask, 169 u_long code); 170#endif 171 172long Maxmem = 0; 173 174vm_offset_t phys_avail[10]; 175 176/* must be 2 less so 0 0 can signal end of chunks */ 177#define PHYS_AVAIL_ARRAY_END ((sizeof(phys_avail) / sizeof(vm_offset_t)) - 2) 178 179struct kva_md_info kmi; 180 181static struct trapframe proc0_tf; 182#ifndef SMP 183static struct pcpu __pcpu; 184#endif 185 186struct mtx icu_lock; 187 188static void 189cpu_startup(dummy) 190 void *dummy; 191{ 192 /* 193 * Good {morning,afternoon,evening,night}. 194 */ 195 earlysetcpuclass(); 196 startrtclock(); 197 printcpuinfo(); 198 panicifcpuunsupported(); 199#ifdef PERFMON 200 perfmon_init(); 201#endif 202 printf("real memory = %u (%uK bytes)\n", ptoa(Maxmem), 203 ptoa(Maxmem) / 1024); 204 /* 205 * Display any holes after the first chunk of extended memory. 206 */ 207 if (bootverbose) { 208 int indx; 209 210 printf("Physical memory chunk(s):\n"); 211 for (indx = 0; phys_avail[indx + 1] != 0; indx += 2) { 212 unsigned int size1; 213 214 size1 = phys_avail[indx + 1] - phys_avail[indx]; 215 printf("0x%08x - 0x%08x, %u bytes (%u pages)\n", 216 phys_avail[indx], phys_avail[indx + 1] - 1, size1, 217 size1 / PAGE_SIZE); 218 } 219 } 220 221 vm_ksubmap_init(&kmi); 222 223 printf("avail memory = %u (%uK bytes)\n", ptoa(cnt.v_free_count), 224 ptoa(cnt.v_free_count) / 1024); 225 226 /* 227 * Set up buffers, so they can be used to read disk labels. 228 */ 229 bufinit(); 230 vm_pager_bufferinit(); 231 232#ifndef SMP 233 /* For SMP, we delay the cpu_setregs() until after SMP startup. */ 234 cpu_setregs(); 235#endif 236} 237 238/* 239 * Send an interrupt to process. 240 * 241 * Stack is set up to allow sigcode stored 242 * at top to call routine, followed by kcall 243 * to sigreturn routine below. After sigreturn 244 * resets the signal mask, the stack, and the 245 * frame pointer, it returns to the user 246 * specified pc, psl. 247 */ 248#ifdef COMPAT_43 249static void 250osendsig(catcher, sig, mask, code) 251 sig_t catcher; 252 int sig; 253 sigset_t *mask; 254 u_long code; 255{ 256 struct osigframe sf, *fp; 257 struct proc *p; 258 struct thread *td; 259 struct sigacts *psp; 260 struct trapframe *regs; 261 int oonstack; 262 263 td = curthread; 264 p = td->td_proc; 265 PROC_LOCK_ASSERT(p, MA_OWNED); 266 psp = p->p_sigacts; 267 regs = td->td_frame; 268 oonstack = sigonstack(regs->tf_esp); 269 270 /* Allocate space for the signal handler context. */ 271 if ((p->p_flag & P_ALTSTACK) && !oonstack && 272 SIGISMEMBER(psp->ps_sigonstack, sig)) { 273 fp = (struct osigframe *)(p->p_sigstk.ss_sp + 274 p->p_sigstk.ss_size - sizeof(struct osigframe)); 275#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 276 p->p_sigstk.ss_flags |= SS_ONSTACK; 277#endif 278 } else 279 fp = (struct osigframe *)regs->tf_esp - 1; 280 PROC_UNLOCK(p); 281 282 /* Translate the signal if appropriate. */ 283 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 284 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 285 286 /* Build the argument list for the signal handler. */ 287 sf.sf_signum = sig; 288 sf.sf_scp = (register_t)&fp->sf_siginfo.si_sc; 289 PROC_LOCK(p); 290 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 291 /* Signal handler installed with SA_SIGINFO. */ 292 sf.sf_arg2 = (register_t)&fp->sf_siginfo; 293 sf.sf_siginfo.si_signo = sig; 294 sf.sf_siginfo.si_code = code; 295 sf.sf_ahu.sf_action = (__osiginfohandler_t *)catcher; 296 } else { 297 /* Old FreeBSD-style arguments. */ 298 sf.sf_arg2 = code; 299 sf.sf_addr = regs->tf_err; 300 sf.sf_ahu.sf_handler = catcher; 301 } 302 PROC_UNLOCK(p); 303 304 /* Save most if not all of trap frame. */ 305 sf.sf_siginfo.si_sc.sc_eax = regs->tf_eax; 306 sf.sf_siginfo.si_sc.sc_ebx = regs->tf_ebx; 307 sf.sf_siginfo.si_sc.sc_ecx = regs->tf_ecx; 308 sf.sf_siginfo.si_sc.sc_edx = regs->tf_edx; 309 sf.sf_siginfo.si_sc.sc_esi = regs->tf_esi; 310 sf.sf_siginfo.si_sc.sc_edi = regs->tf_edi; 311 sf.sf_siginfo.si_sc.sc_cs = regs->tf_cs; 312 sf.sf_siginfo.si_sc.sc_ds = regs->tf_ds; 313 sf.sf_siginfo.si_sc.sc_ss = regs->tf_ss; 314 sf.sf_siginfo.si_sc.sc_es = regs->tf_es; 315 sf.sf_siginfo.si_sc.sc_fs = regs->tf_fs; 316 sf.sf_siginfo.si_sc.sc_gs = rgs(); 317 sf.sf_siginfo.si_sc.sc_isp = regs->tf_isp; 318 319 /* Build the signal context to be used by osigreturn(). */ 320 sf.sf_siginfo.si_sc.sc_onstack = (oonstack) ? 1 : 0; 321 SIG2OSIG(*mask, sf.sf_siginfo.si_sc.sc_mask); 322 sf.sf_siginfo.si_sc.sc_sp = regs->tf_esp; 323 sf.sf_siginfo.si_sc.sc_fp = regs->tf_ebp; 324 sf.sf_siginfo.si_sc.sc_pc = regs->tf_eip; 325 sf.sf_siginfo.si_sc.sc_ps = regs->tf_eflags; 326 sf.sf_siginfo.si_sc.sc_trapno = regs->tf_trapno; 327 sf.sf_siginfo.si_sc.sc_err = regs->tf_err; 328 329 /* 330 * If we're a vm86 process, we want to save the segment registers. 331 * We also change eflags to be our emulated eflags, not the actual 332 * eflags. 333 */ 334 if (regs->tf_eflags & PSL_VM) { 335 /* XXX confusing names: `tf' isn't a trapframe; `regs' is. */ 336 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 337 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86; 338 339 sf.sf_siginfo.si_sc.sc_gs = tf->tf_vm86_gs; 340 sf.sf_siginfo.si_sc.sc_fs = tf->tf_vm86_fs; 341 sf.sf_siginfo.si_sc.sc_es = tf->tf_vm86_es; 342 sf.sf_siginfo.si_sc.sc_ds = tf->tf_vm86_ds; 343 344 if (vm86->vm86_has_vme == 0) 345 sf.sf_siginfo.si_sc.sc_ps = 346 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 347 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 348 349 /* See sendsig() for comments. */ 350 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); 351 } 352 353 /* 354 * Copy the sigframe out to the user's stack. 355 */ 356 if (copyout(&sf, fp, sizeof(*fp)) != 0) { 357#ifdef DEBUG 358 printf("process %ld has trashed its stack\n", (long)p->p_pid); 359#endif 360 PROC_LOCK(p); 361 sigexit(td, SIGILL); 362 } 363 364 regs->tf_esp = (int)fp; 365 regs->tf_eip = PS_STRINGS - szosigcode; 366 regs->tf_eflags &= ~PSL_T; 367 regs->tf_cs = _ucodesel; 368 regs->tf_ds = _udatasel; 369 regs->tf_es = _udatasel; 370 regs->tf_fs = _udatasel; 371 load_gs(_udatasel); 372 regs->tf_ss = _udatasel; 373 PROC_LOCK(p); 374} 375#endif /* COMPAT_43 */ 376 377#ifdef COMPAT_FREEBSD4 378static void 379freebsd4_sendsig(catcher, sig, mask, code) 380 sig_t catcher; 381 int sig; 382 sigset_t *mask; 383 u_long code; 384{ 385 struct sigframe4 sf, *sfp; 386 struct proc *p; 387 struct thread *td; 388 struct sigacts *psp; 389 struct trapframe *regs; 390 int oonstack; 391 392 td = curthread; 393 p = td->td_proc; 394 PROC_LOCK_ASSERT(p, MA_OWNED); 395 psp = p->p_sigacts; 396 regs = td->td_frame; 397 oonstack = sigonstack(regs->tf_esp); 398 399 /* Save user context. */ 400 bzero(&sf, sizeof(sf)); 401 sf.sf_uc.uc_sigmask = *mask; 402 sf.sf_uc.uc_stack = p->p_sigstk; 403 sf.sf_uc.uc_stack.ss_flags = (p->p_flag & P_ALTSTACK) 404 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 405 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 406 sf.sf_uc.uc_mcontext.mc_gs = rgs(); 407 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs)); 408 409 /* Allocate space for the signal handler context. */ 410 if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack && 411 SIGISMEMBER(psp->ps_sigonstack, sig)) { 412 sfp = (struct sigframe4 *)(p->p_sigstk.ss_sp + 413 p->p_sigstk.ss_size - sizeof(struct sigframe4)); 414#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 415 p->p_sigstk.ss_flags |= SS_ONSTACK; 416#endif 417 } else 418 sfp = (struct sigframe4 *)regs->tf_esp - 1; 419 PROC_UNLOCK(p); 420 421 /* Translate the signal if appropriate. */ 422 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 423 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 424 425 /* Build the argument list for the signal handler. */ 426 sf.sf_signum = sig; 427 sf.sf_ucontext = (register_t)&sfp->sf_uc; 428 PROC_LOCK(p); 429 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 430 /* Signal handler installed with SA_SIGINFO. */ 431 sf.sf_siginfo = (register_t)&sfp->sf_si; 432 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 433 434 /* Fill in POSIX parts */ 435 sf.sf_si.si_signo = sig; 436 sf.sf_si.si_code = code; 437 sf.sf_si.si_addr = (void *)regs->tf_err; 438 } else { 439 /* Old FreeBSD-style arguments. */ 440 sf.sf_siginfo = code; 441 sf.sf_addr = regs->tf_err; 442 sf.sf_ahu.sf_handler = catcher; 443 } 444 PROC_UNLOCK(p); 445 446 /* 447 * If we're a vm86 process, we want to save the segment registers. 448 * We also change eflags to be our emulated eflags, not the actual 449 * eflags. 450 */ 451 if (regs->tf_eflags & PSL_VM) { 452 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 453 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86; 454 455 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 456 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 457 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 458 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 459 460 if (vm86->vm86_has_vme == 0) 461 sf.sf_uc.uc_mcontext.mc_eflags = 462 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 463 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 464 465 /* 466 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 467 * syscalls made by the signal handler. This just avoids 468 * wasting time for our lazy fixup of such faults. PSL_NT 469 * does nothing in vm86 mode, but vm86 programs can set it 470 * almost legitimately in probes for old cpu types. 471 */ 472 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); 473 } 474 475 /* 476 * Copy the sigframe out to the user's stack. 477 */ 478 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { 479#ifdef DEBUG 480 printf("process %ld has trashed its stack\n", (long)p->p_pid); 481#endif 482 PROC_LOCK(p); 483 sigexit(td, SIGILL); 484 } 485 486 regs->tf_esp = (int)sfp; 487 regs->tf_eip = PS_STRINGS - szfreebsd4_sigcode; 488 regs->tf_eflags &= ~PSL_T; 489 regs->tf_cs = _ucodesel; 490 regs->tf_ds = _udatasel; 491 regs->tf_es = _udatasel; 492 regs->tf_fs = _udatasel; 493 regs->tf_ss = _udatasel; 494 PROC_LOCK(p); 495} 496#endif /* COMPAT_FREEBSD4 */ 497 498void 499sendsig(catcher, sig, mask, code) 500 sig_t catcher; 501 int sig; 502 sigset_t *mask; 503 u_long code; 504{ 505 struct sigframe sf, *sfp; 506 struct proc *p; 507 struct thread *td; 508 struct sigacts *psp; 509 struct trapframe *regs; 510 int oonstack; 511 512 td = curthread; 513 p = td->td_proc; 514 PROC_LOCK_ASSERT(p, MA_OWNED); 515 psp = p->p_sigacts; 516#ifdef COMPAT_FREEBSD4 517 if (SIGISMEMBER(psp->ps_freebsd4, sig)) { 518 freebsd4_sendsig(catcher, sig, mask, code); 519 return; 520 } 521#endif 522#ifdef COMPAT_43 523 if (SIGISMEMBER(psp->ps_osigset, sig)) { 524 osendsig(catcher, sig, mask, code); 525 return; 526 } 527#endif 528 regs = td->td_frame; 529 oonstack = sigonstack(regs->tf_esp); 530 531 /* Save user context. */ 532 bzero(&sf, sizeof(sf)); 533 sf.sf_uc.uc_sigmask = *mask; 534 sf.sf_uc.uc_stack = p->p_sigstk; 535 sf.sf_uc.uc_stack.ss_flags = (p->p_flag & P_ALTSTACK) 536 ? ((oonstack) ? SS_ONSTACK : 0) : SS_DISABLE; 537 sf.sf_uc.uc_mcontext.mc_onstack = (oonstack) ? 1 : 0; 538 sf.sf_uc.uc_mcontext.mc_gs = rgs(); 539 bcopy(regs, &sf.sf_uc.uc_mcontext.mc_fs, sizeof(*regs)); 540 sf.sf_uc.uc_mcontext.mc_len = sizeof(sf.sf_uc.uc_mcontext); /* magic */ 541 get_fpcontext(td, &sf.sf_uc.uc_mcontext); 542 fpstate_drop(td); 543 544 /* Allocate space for the signal handler context. */ 545 if ((p->p_flag & P_ALTSTACK) != 0 && !oonstack && 546 SIGISMEMBER(psp->ps_sigonstack, sig)) { 547 sfp = (struct sigframe *)(p->p_sigstk.ss_sp + 548 p->p_sigstk.ss_size - sizeof(struct sigframe)); 549#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 550 p->p_sigstk.ss_flags |= SS_ONSTACK; 551#endif 552 } else 553 sfp = (struct sigframe *)regs->tf_esp - 1; 554 PROC_UNLOCK(p); 555 556 /* Translate the signal if appropriate. */ 557 if (p->p_sysent->sv_sigtbl && sig <= p->p_sysent->sv_sigsize) 558 sig = p->p_sysent->sv_sigtbl[_SIG_IDX(sig)]; 559 560 /* Build the argument list for the signal handler. */ 561 sf.sf_signum = sig; 562 sf.sf_ucontext = (register_t)&sfp->sf_uc; 563 PROC_LOCK(p); 564 if (SIGISMEMBER(p->p_sigacts->ps_siginfo, sig)) { 565 /* Signal handler installed with SA_SIGINFO. */ 566 sf.sf_siginfo = (register_t)&sfp->sf_si; 567 sf.sf_ahu.sf_action = (__siginfohandler_t *)catcher; 568 569 /* Fill in POSIX parts */ 570 sf.sf_si.si_signo = sig; 571 sf.sf_si.si_code = code; 572 sf.sf_si.si_addr = (void *)regs->tf_err; 573 } else { 574 /* Old FreeBSD-style arguments. */ 575 sf.sf_siginfo = code; 576 sf.sf_addr = regs->tf_err; 577 sf.sf_ahu.sf_handler = catcher; 578 } 579 PROC_UNLOCK(p); 580 581 /* 582 * If we're a vm86 process, we want to save the segment registers. 583 * We also change eflags to be our emulated eflags, not the actual 584 * eflags. 585 */ 586 if (regs->tf_eflags & PSL_VM) { 587 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 588 struct vm86_kernel *vm86 = &td->td_pcb->pcb_ext->ext_vm86; 589 590 sf.sf_uc.uc_mcontext.mc_gs = tf->tf_vm86_gs; 591 sf.sf_uc.uc_mcontext.mc_fs = tf->tf_vm86_fs; 592 sf.sf_uc.uc_mcontext.mc_es = tf->tf_vm86_es; 593 sf.sf_uc.uc_mcontext.mc_ds = tf->tf_vm86_ds; 594 595 if (vm86->vm86_has_vme == 0) 596 sf.sf_uc.uc_mcontext.mc_eflags = 597 (tf->tf_eflags & ~(PSL_VIF | PSL_VIP)) | 598 (vm86->vm86_eflags & (PSL_VIF | PSL_VIP)); 599 600 /* 601 * Clear PSL_NT to inhibit T_TSSFLT faults on return from 602 * syscalls made by the signal handler. This just avoids 603 * wasting time for our lazy fixup of such faults. PSL_NT 604 * does nothing in vm86 mode, but vm86 programs can set it 605 * almost legitimately in probes for old cpu types. 606 */ 607 tf->tf_eflags &= ~(PSL_VM | PSL_NT | PSL_VIF | PSL_VIP); 608 } 609 610 /* 611 * Copy the sigframe out to the user's stack. 612 */ 613 if (copyout(&sf, sfp, sizeof(*sfp)) != 0) { 614#ifdef DEBUG 615 printf("process %ld has trashed its stack\n", (long)p->p_pid); 616#endif 617 PROC_LOCK(p); 618 sigexit(td, SIGILL); 619 } 620 621 regs->tf_esp = (int)sfp; 622 regs->tf_eip = PS_STRINGS - *(p->p_sysent->sv_szsigcode); 623 regs->tf_eflags &= ~PSL_T; 624 regs->tf_cs = _ucodesel; 625 regs->tf_ds = _udatasel; 626 regs->tf_es = _udatasel; 627 regs->tf_fs = _udatasel; 628 regs->tf_ss = _udatasel; 629 PROC_LOCK(p); 630} 631 632/* 633 * System call to cleanup state after a signal 634 * has been taken. Reset signal mask and 635 * stack state from context left by sendsig (above). 636 * Return to previous pc and psl as specified by 637 * context left by sendsig. Check carefully to 638 * make sure that the user has not modified the 639 * state to gain improper privileges. 640 * 641 * MPSAFE 642 */ 643#ifdef COMPAT_43 644int 645osigreturn(td, uap) 646 struct thread *td; 647 struct osigreturn_args /* { 648 struct osigcontext *sigcntxp; 649 } */ *uap; 650{ 651 struct osigcontext sc; 652 struct trapframe *regs; 653 struct osigcontext *scp; 654 struct proc *p = td->td_proc; 655 int eflags, error; 656 657 regs = td->td_frame; 658 error = copyin(uap->sigcntxp, &sc, sizeof(sc)); 659 if (error != 0) 660 return (error); 661 scp = ≻ 662 eflags = scp->sc_ps; 663 if (eflags & PSL_VM) { 664 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 665 struct vm86_kernel *vm86; 666 667 /* 668 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 669 * set up the vm86 area, and we can't enter vm86 mode. 670 */ 671 if (td->td_pcb->pcb_ext == 0) 672 return (EINVAL); 673 vm86 = &td->td_pcb->pcb_ext->ext_vm86; 674 if (vm86->vm86_inited == 0) 675 return (EINVAL); 676 677 /* Go back to user mode if both flags are set. */ 678 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 679 trapsignal(p, SIGBUS, 0); 680 681 if (vm86->vm86_has_vme) { 682 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 683 (eflags & VME_USERCHANGE) | PSL_VM; 684 } else { 685 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 686 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | 687 (eflags & VM_USERCHANGE) | PSL_VM; 688 } 689 tf->tf_vm86_ds = scp->sc_ds; 690 tf->tf_vm86_es = scp->sc_es; 691 tf->tf_vm86_fs = scp->sc_fs; 692 tf->tf_vm86_gs = scp->sc_gs; 693 tf->tf_ds = _udatasel; 694 tf->tf_es = _udatasel; 695 tf->tf_fs = _udatasel; 696 } else { 697 /* 698 * Don't allow users to change privileged or reserved flags. 699 */ 700 /* 701 * XXX do allow users to change the privileged flag PSL_RF. 702 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 703 * should sometimes set it there too. tf_eflags is kept in 704 * the signal context during signal handling and there is no 705 * other place to remember it, so the PSL_RF bit may be 706 * corrupted by the signal handler without us knowing. 707 * Corruption of the PSL_RF bit at worst causes one more or 708 * one less debugger trap, so allowing it is fairly harmless. 709 */ 710 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 711 return (EINVAL); 712 } 713 714 /* 715 * Don't allow users to load a valid privileged %cs. Let the 716 * hardware check for invalid selectors, excess privilege in 717 * other selectors, invalid %eip's and invalid %esp's. 718 */ 719 if (!CS_SECURE(scp->sc_cs)) { 720 trapsignal(p, SIGBUS, T_PROTFLT); 721 return (EINVAL); 722 } 723 regs->tf_ds = scp->sc_ds; 724 regs->tf_es = scp->sc_es; 725 regs->tf_fs = scp->sc_fs; 726 } 727 728 /* Restore remaining registers. */ 729 regs->tf_eax = scp->sc_eax; 730 regs->tf_ebx = scp->sc_ebx; 731 regs->tf_ecx = scp->sc_ecx; 732 regs->tf_edx = scp->sc_edx; 733 regs->tf_esi = scp->sc_esi; 734 regs->tf_edi = scp->sc_edi; 735 regs->tf_cs = scp->sc_cs; 736 regs->tf_ss = scp->sc_ss; 737 regs->tf_isp = scp->sc_isp; 738 regs->tf_ebp = scp->sc_fp; 739 regs->tf_esp = scp->sc_sp; 740 regs->tf_eip = scp->sc_pc; 741 regs->tf_eflags = eflags; 742 743 PROC_LOCK(p); 744#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 745 if (scp->sc_onstack & 1) 746 p->p_sigstk.ss_flags |= SS_ONSTACK; 747 else 748 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 749#endif 750 SIGSETOLD(p->p_sigmask, scp->sc_mask); 751 SIG_CANTMASK(p->p_sigmask); 752 signotify(p); 753 PROC_UNLOCK(p); 754 return (EJUSTRETURN); 755} 756#endif /* COMPAT_43 */ 757 758#ifdef COMPAT_FREEBSD4 759/* 760 * MPSAFE 761 */ 762int 763freebsd4_sigreturn(td, uap) 764 struct thread *td; 765 struct freebsd4_sigreturn_args /* { 766 const ucontext4 *sigcntxp; 767 } */ *uap; 768{ 769 struct ucontext4 uc; 770 struct proc *p = td->td_proc; 771 struct trapframe *regs; 772 const struct ucontext4 *ucp; 773 int cs, eflags, error; 774 775 error = copyin(uap->sigcntxp, &uc, sizeof(uc)); 776 if (error != 0) 777 return (error); 778 ucp = &uc; 779 regs = td->td_frame; 780 eflags = ucp->uc_mcontext.mc_eflags; 781 if (eflags & PSL_VM) { 782 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 783 struct vm86_kernel *vm86; 784 785 /* 786 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 787 * set up the vm86 area, and we can't enter vm86 mode. 788 */ 789 if (td->td_pcb->pcb_ext == 0) 790 return (EINVAL); 791 vm86 = &td->td_pcb->pcb_ext->ext_vm86; 792 if (vm86->vm86_inited == 0) 793 return (EINVAL); 794 795 /* Go back to user mode if both flags are set. */ 796 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 797 trapsignal(p, SIGBUS, 0); 798 799 if (vm86->vm86_has_vme) { 800 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 801 (eflags & VME_USERCHANGE) | PSL_VM; 802 } else { 803 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 804 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | 805 (eflags & VM_USERCHANGE) | PSL_VM; 806 } 807 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe)); 808 tf->tf_eflags = eflags; 809 tf->tf_vm86_ds = tf->tf_ds; 810 tf->tf_vm86_es = tf->tf_es; 811 tf->tf_vm86_fs = tf->tf_fs; 812 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs; 813 tf->tf_ds = _udatasel; 814 tf->tf_es = _udatasel; 815 tf->tf_fs = _udatasel; 816 } else { 817 /* 818 * Don't allow users to change privileged or reserved flags. 819 */ 820 /* 821 * XXX do allow users to change the privileged flag PSL_RF. 822 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 823 * should sometimes set it there too. tf_eflags is kept in 824 * the signal context during signal handling and there is no 825 * other place to remember it, so the PSL_RF bit may be 826 * corrupted by the signal handler without us knowing. 827 * Corruption of the PSL_RF bit at worst causes one more or 828 * one less debugger trap, so allowing it is fairly harmless. 829 */ 830 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 831 printf("freebsd4_sigreturn: eflags = 0x%x\n", eflags); 832 return (EINVAL); 833 } 834 835 /* 836 * Don't allow users to load a valid privileged %cs. Let the 837 * hardware check for invalid selectors, excess privilege in 838 * other selectors, invalid %eip's and invalid %esp's. 839 */ 840 cs = ucp->uc_mcontext.mc_cs; 841 if (!CS_SECURE(cs)) { 842 printf("freebsd4_sigreturn: cs = 0x%x\n", cs); 843 trapsignal(p, SIGBUS, T_PROTFLT); 844 return (EINVAL); 845 } 846 847 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs)); 848 } 849 850 PROC_LOCK(p); 851#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 852 if (ucp->uc_mcontext.mc_onstack & 1) 853 p->p_sigstk.ss_flags |= SS_ONSTACK; 854 else 855 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 856#endif 857 858 p->p_sigmask = ucp->uc_sigmask; 859 SIG_CANTMASK(p->p_sigmask); 860 signotify(p); 861 PROC_UNLOCK(p); 862 return (EJUSTRETURN); 863} 864#endif /* COMPAT_FREEBSD4 */ 865 866/* 867 * MPSAFE 868 */ 869int 870sigreturn(td, uap) 871 struct thread *td; 872 struct sigreturn_args /* { 873 const __ucontext *sigcntxp; 874 } */ *uap; 875{ 876 ucontext_t uc; 877 struct proc *p = td->td_proc; 878 struct trapframe *regs; 879 const ucontext_t *ucp; 880 int cs, eflags, error, ret; 881 882 error = copyin(uap->sigcntxp, &uc, sizeof(uc)); 883 if (error != 0) 884 return (error); 885 ucp = &uc; 886 regs = td->td_frame; 887 eflags = ucp->uc_mcontext.mc_eflags; 888 if (eflags & PSL_VM) { 889 struct trapframe_vm86 *tf = (struct trapframe_vm86 *)regs; 890 struct vm86_kernel *vm86; 891 892 /* 893 * if pcb_ext == 0 or vm86_inited == 0, the user hasn't 894 * set up the vm86 area, and we can't enter vm86 mode. 895 */ 896 if (td->td_pcb->pcb_ext == 0) 897 return (EINVAL); 898 vm86 = &td->td_pcb->pcb_ext->ext_vm86; 899 if (vm86->vm86_inited == 0) 900 return (EINVAL); 901 902 /* Go back to user mode if both flags are set. */ 903 if ((eflags & PSL_VIP) && (eflags & PSL_VIF)) 904 trapsignal(p, SIGBUS, 0); 905 906 if (vm86->vm86_has_vme) { 907 eflags = (tf->tf_eflags & ~VME_USERCHANGE) | 908 (eflags & VME_USERCHANGE) | PSL_VM; 909 } else { 910 vm86->vm86_eflags = eflags; /* save VIF, VIP */ 911 eflags = (tf->tf_eflags & ~VM_USERCHANGE) | 912 (eflags & VM_USERCHANGE) | PSL_VM; 913 } 914 bcopy(&ucp->uc_mcontext.mc_fs, tf, sizeof(struct trapframe)); 915 tf->tf_eflags = eflags; 916 tf->tf_vm86_ds = tf->tf_ds; 917 tf->tf_vm86_es = tf->tf_es; 918 tf->tf_vm86_fs = tf->tf_fs; 919 tf->tf_vm86_gs = ucp->uc_mcontext.mc_gs; 920 tf->tf_ds = _udatasel; 921 tf->tf_es = _udatasel; 922 tf->tf_fs = _udatasel; 923 } else { 924 /* 925 * Don't allow users to change privileged or reserved flags. 926 */ 927 /* 928 * XXX do allow users to change the privileged flag PSL_RF. 929 * The cpu sets PSL_RF in tf_eflags for faults. Debuggers 930 * should sometimes set it there too. tf_eflags is kept in 931 * the signal context during signal handling and there is no 932 * other place to remember it, so the PSL_RF bit may be 933 * corrupted by the signal handler without us knowing. 934 * Corruption of the PSL_RF bit at worst causes one more or 935 * one less debugger trap, so allowing it is fairly harmless. 936 */ 937 if (!EFL_SECURE(eflags & ~PSL_RF, regs->tf_eflags & ~PSL_RF)) { 938 printf("sigreturn: eflags = 0x%x\n", eflags); 939 return (EINVAL); 940 } 941 942 /* 943 * Don't allow users to load a valid privileged %cs. Let the 944 * hardware check for invalid selectors, excess privilege in 945 * other selectors, invalid %eip's and invalid %esp's. 946 */ 947 cs = ucp->uc_mcontext.mc_cs; 948 if (!CS_SECURE(cs)) { 949 printf("sigreturn: cs = 0x%x\n", cs); 950 trapsignal(p, SIGBUS, T_PROTFLT); 951 return (EINVAL); 952 } 953 954 ret = set_fpcontext(td, &ucp->uc_mcontext); 955 if (ret != 0) 956 return (ret); 957 bcopy(&ucp->uc_mcontext.mc_fs, regs, sizeof(*regs)); 958 } 959 960 PROC_LOCK(p); 961#if defined(COMPAT_43) || defined(COMPAT_SUNOS) 962 if (ucp->uc_mcontext.mc_onstack & 1) 963 p->p_sigstk.ss_flags |= SS_ONSTACK; 964 else 965 p->p_sigstk.ss_flags &= ~SS_ONSTACK; 966#endif 967 968 p->p_sigmask = ucp->uc_sigmask; 969 SIG_CANTMASK(p->p_sigmask); 970 signotify(p); 971 PROC_UNLOCK(p); 972 return (EJUSTRETURN); 973} 974 975/* 976 * Machine dependent boot() routine 977 * 978 * I haven't seen anything to put here yet 979 * Possibly some stuff might be grafted back here from boot() 980 */ 981void 982cpu_boot(int howto) 983{ 984} 985 986/* 987 * Shutdown the CPU as much as possible 988 */ 989void 990cpu_halt(void) 991{ 992 for (;;) 993 __asm__ ("hlt"); 994} 995 996/* 997 * Hook to idle the CPU when possible. In the SMP case we default to 998 * off because a halted cpu will not currently pick up a new thread in the 999 * run queue until the next timer tick. If turned on this will result in 1000 * approximately a 4.2% loss in real time performance in buildworld tests 1001 * (but improves user and sys times oddly enough), and saves approximately 1002 * 5% in power consumption on an idle machine (tests w/2xCPU 1.1GHz P3). 1003 * 1004 * XXX we need to have a cpu mask of idle cpus and generate an IPI or 1005 * otherwise generate some sort of interrupt to wake up cpus sitting in HLT. 1006 * Then we can have our cake and eat it too. 1007 */ 1008#ifdef SMP 1009static int cpu_idle_hlt = 0; 1010#else 1011static int cpu_idle_hlt = 1; 1012#endif 1013SYSCTL_INT(_machdep, OID_AUTO, cpu_idle_hlt, CTLFLAG_RW, 1014 &cpu_idle_hlt, 0, "Idle loop HLT enable"); 1015 1016/* 1017 * Note that we have to be careful here to avoid a race between checking 1018 * sched_runnable() and actually halting. If we don't do this, we may waste 1019 * the time between calling hlt and the next interrupt even though there 1020 * is a runnable process. 1021 */ 1022void 1023cpu_idle(void) 1024{ 1025 if (cpu_idle_hlt) { 1026 disable_intr(); 1027 if (sched_runnable()) { 1028 enable_intr(); 1029 } else { 1030 /* 1031 * we must absolutely guarentee that hlt is the 1032 * absolute next instruction after sti or we 1033 * introduce a timing window. 1034 */ 1035 __asm __volatile("sti; hlt"); 1036 } 1037 } 1038} 1039 1040/* 1041 * Clear registers on exec 1042 */ 1043void 1044exec_setregs(td, entry, stack, ps_strings) 1045 struct thread *td; 1046 u_long entry; 1047 u_long stack; 1048 u_long ps_strings; 1049{ 1050 struct trapframe *regs = td->td_frame; 1051 struct pcb *pcb = td->td_pcb; 1052 1053 /* Reset pc->pcb_gs and %gs before possibly invalidating it. */ 1054 pcb->pcb_gs = _udatasel; 1055 load_gs(_udatasel); 1056 1057 if (td->td_proc->p_md.md_ldt) 1058 user_ldt_free(td); 1059 1060 bzero((char *)regs, sizeof(struct trapframe)); 1061 regs->tf_eip = entry; 1062 regs->tf_esp = stack; 1063 regs->tf_eflags = PSL_USER | (regs->tf_eflags & PSL_T); 1064 regs->tf_ss = _udatasel; 1065 regs->tf_ds = _udatasel; 1066 regs->tf_es = _udatasel; 1067 regs->tf_fs = _udatasel; 1068 regs->tf_cs = _ucodesel; 1069 1070 /* PS_STRINGS value for BSD/OS binaries. It is 0 for non-BSD/OS. */ 1071 regs->tf_ebx = ps_strings; 1072 1073 /* 1074 * Reset the hardware debug registers if they were in use. 1075 * They won't have any meaning for the newly exec'd process. 1076 */ 1077 if (pcb->pcb_flags & PCB_DBREGS) { 1078 pcb->pcb_dr0 = 0; 1079 pcb->pcb_dr1 = 0; 1080 pcb->pcb_dr2 = 0; 1081 pcb->pcb_dr3 = 0; 1082 pcb->pcb_dr6 = 0; 1083 pcb->pcb_dr7 = 0; 1084 if (pcb == PCPU_GET(curpcb)) { 1085 /* 1086 * Clear the debug registers on the running 1087 * CPU, otherwise they will end up affecting 1088 * the next process we switch to. 1089 */ 1090 reset_dbregs(); 1091 } 1092 pcb->pcb_flags &= ~PCB_DBREGS; 1093 } 1094 1095 /* 1096 * Initialize the math emulator (if any) for the current process. 1097 * Actually, just clear the bit that says that the emulator has 1098 * been initialized. Initialization is delayed until the process 1099 * traps to the emulator (if it is done at all) mainly because 1100 * emulators don't provide an entry point for initialization. 1101 */ 1102 td->td_pcb->pcb_flags &= ~FP_SOFTFP; 1103 1104 /* 1105 * Arrange to trap the next npx or `fwait' instruction (see npx.c 1106 * for why fwait must be trapped at least if there is an npx or an 1107 * emulator). This is mainly to handle the case where npx0 is not 1108 * configured, since the npx routines normally set up the trap 1109 * otherwise. It should be done only at boot time, but doing it 1110 * here allows modifying `npx_exists' for testing the emulator on 1111 * systems with an npx. 1112 */ 1113 load_cr0(rcr0() | CR0_MP | CR0_TS); 1114 1115 /* Initialize the npx (if any) for the current process. */ 1116 /* 1117 * XXX the above load_cr0() also initializes it and is a layering 1118 * violation if NPX is configured. It drops the npx partially 1119 * and this would be fatal if we were interrupted now, and decided 1120 * to force the state to the pcb, and checked the invariant 1121 * (CR0_TS clear) if and only if PCPU_GET(fpcurthread) != NULL). 1122 * ALL of this can happen except the check. The check used to 1123 * happen and be fatal later when we didn't complete the drop 1124 * before returning to user mode. This should be fixed properly 1125 * soon. 1126 */ 1127 fpstate_drop(td); 1128 1129 /* 1130 * XXX - Linux emulator 1131 * Make sure sure edx is 0x0 on entry. Linux binaries depend 1132 * on it. 1133 */ 1134 td->td_retval[1] = 0; 1135} 1136 1137void 1138cpu_setregs(void) 1139{ 1140 unsigned int cr0; 1141 1142 cr0 = rcr0(); 1143#ifdef SMP 1144 cr0 |= CR0_NE; /* Done by npxinit() */ 1145#endif 1146 cr0 |= CR0_MP | CR0_TS; /* Done at every execve() too. */ 1147#ifndef I386_CPU 1148 cr0 |= CR0_WP | CR0_AM; 1149#endif 1150 load_cr0(cr0); 1151 load_gs(_udatasel); 1152} 1153 1154static int 1155sysctl_machdep_adjkerntz(SYSCTL_HANDLER_ARGS) 1156{ 1157 int error; 1158 error = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, 1159 req); 1160 if (!error && req->newptr) 1161 resettodr(); 1162 return (error); 1163} 1164 1165SYSCTL_PROC(_machdep, CPU_ADJKERNTZ, adjkerntz, CTLTYPE_INT|CTLFLAG_RW, 1166 &adjkerntz, 0, sysctl_machdep_adjkerntz, "I", ""); 1167 1168SYSCTL_INT(_machdep, CPU_DISRTCSET, disable_rtc_set, 1169 CTLFLAG_RW, &disable_rtc_set, 0, ""); 1170 1171SYSCTL_STRUCT(_machdep, CPU_BOOTINFO, bootinfo, 1172 CTLFLAG_RD, &bootinfo, bootinfo, ""); 1173 1174SYSCTL_INT(_machdep, CPU_WALLCLOCK, wall_cmos_clock, 1175 CTLFLAG_RW, &wall_cmos_clock, 0, ""); 1176 1177u_long bootdev; /* not a dev_t - encoding is different */ 1178SYSCTL_ULONG(_machdep, OID_AUTO, guessed_bootdev, 1179 CTLFLAG_RD, &bootdev, 0, "Maybe the Boot device (not in dev_t format)"); 1180 1181/* 1182 * Initialize 386 and configure to run kernel 1183 */ 1184 1185/* 1186 * Initialize segments & interrupt table 1187 */ 1188 1189int _default_ldt; 1190union descriptor gdt[NGDT * MAXCPU]; /* global descriptor table */ 1191static struct gate_descriptor idt0[NIDT]; 1192struct gate_descriptor *idt = &idt0[0]; /* interrupt descriptor table */ 1193union descriptor ldt[NLDT]; /* local descriptor table */ 1194#ifdef SMP 1195/* table descriptors - used to load tables by microp */ 1196struct region_descriptor r_gdt, r_idt; 1197#endif 1198 1199int private_tss; /* flag indicating private tss */ 1200 1201#if defined(I586_CPU) && !defined(NO_F00F_HACK) 1202extern int has_f00f_bug; 1203#endif 1204 1205static struct i386tss dblfault_tss; 1206static char dblfault_stack[PAGE_SIZE]; 1207 1208extern struct user *proc0uarea; 1209extern vm_offset_t proc0kstack; 1210 1211 1212/* software prototypes -- in more palatable form */ 1213struct soft_segment_descriptor gdt_segs[] = { 1214/* GNULL_SEL 0 Null Descriptor */ 1215{ 0x0, /* segment base address */ 1216 0x0, /* length */ 1217 0, /* segment type */ 1218 0, /* segment descriptor priority level */ 1219 0, /* segment descriptor present */ 1220 0, 0, 1221 0, /* default 32 vs 16 bit size */ 1222 0 /* limit granularity (byte/page units)*/ }, 1223/* GCODE_SEL 1 Code Descriptor for kernel */ 1224{ 0x0, /* segment base address */ 1225 0xfffff, /* length - all address space */ 1226 SDT_MEMERA, /* segment type */ 1227 0, /* segment descriptor priority level */ 1228 1, /* segment descriptor present */ 1229 0, 0, 1230 1, /* default 32 vs 16 bit size */ 1231 1 /* limit granularity (byte/page units)*/ }, 1232/* GDATA_SEL 2 Data Descriptor for kernel */ 1233{ 0x0, /* segment base address */ 1234 0xfffff, /* length - all address space */ 1235 SDT_MEMRWA, /* segment type */ 1236 0, /* segment descriptor priority level */ 1237 1, /* segment descriptor present */ 1238 0, 0, 1239 1, /* default 32 vs 16 bit size */ 1240 1 /* limit granularity (byte/page units)*/ }, 1241/* GPRIV_SEL 3 SMP Per-Processor Private Data Descriptor */ 1242{ 0x0, /* segment base address */ 1243 0xfffff, /* length - all address space */ 1244 SDT_MEMRWA, /* segment type */ 1245 0, /* segment descriptor priority level */ 1246 1, /* segment descriptor present */ 1247 0, 0, 1248 1, /* default 32 vs 16 bit size */ 1249 1 /* limit granularity (byte/page units)*/ }, 1250/* GPROC0_SEL 4 Proc 0 Tss Descriptor */ 1251{ 1252 0x0, /* segment base address */ 1253 sizeof(struct i386tss)-1,/* length - all address space */ 1254 SDT_SYS386TSS, /* segment type */ 1255 0, /* segment descriptor priority level */ 1256 1, /* segment descriptor present */ 1257 0, 0, 1258 0, /* unused - default 32 vs 16 bit size */ 1259 0 /* limit granularity (byte/page units)*/ }, 1260/* GLDT_SEL 5 LDT Descriptor */ 1261{ (int) ldt, /* segment base address */ 1262 sizeof(ldt)-1, /* length - all address space */ 1263 SDT_SYSLDT, /* segment type */ 1264 SEL_UPL, /* segment descriptor priority level */ 1265 1, /* segment descriptor present */ 1266 0, 0, 1267 0, /* unused - default 32 vs 16 bit size */ 1268 0 /* limit granularity (byte/page units)*/ }, 1269/* GUSERLDT_SEL 6 User LDT Descriptor per process */ 1270{ (int) ldt, /* segment base address */ 1271 (512 * sizeof(union descriptor)-1), /* length */ 1272 SDT_SYSLDT, /* segment type */ 1273 0, /* segment descriptor priority level */ 1274 1, /* segment descriptor present */ 1275 0, 0, 1276 0, /* unused - default 32 vs 16 bit size */ 1277 0 /* limit granularity (byte/page units)*/ }, 1278/* GTGATE_SEL 7 Null Descriptor - Placeholder */ 1279{ 0x0, /* segment base address */ 1280 0x0, /* length - all address space */ 1281 0, /* segment type */ 1282 0, /* segment descriptor priority level */ 1283 0, /* segment descriptor present */ 1284 0, 0, 1285 0, /* default 32 vs 16 bit size */ 1286 0 /* limit granularity (byte/page units)*/ }, 1287/* GBIOSLOWMEM_SEL 8 BIOS access to realmode segment 0x40, must be #8 in GDT */ 1288{ 0x400, /* segment base address */ 1289 0xfffff, /* length */ 1290 SDT_MEMRWA, /* segment type */ 1291 0, /* segment descriptor priority level */ 1292 1, /* segment descriptor present */ 1293 0, 0, 1294 1, /* default 32 vs 16 bit size */ 1295 1 /* limit granularity (byte/page units)*/ }, 1296/* GPANIC_SEL 9 Panic Tss Descriptor */ 1297{ (int) &dblfault_tss, /* segment base address */ 1298 sizeof(struct i386tss)-1,/* length - all address space */ 1299 SDT_SYS386TSS, /* segment type */ 1300 0, /* segment descriptor priority level */ 1301 1, /* segment descriptor present */ 1302 0, 0, 1303 0, /* unused - default 32 vs 16 bit size */ 1304 0 /* limit granularity (byte/page units)*/ }, 1305/* GBIOSCODE32_SEL 10 BIOS 32-bit interface (32bit Code) */ 1306{ 0, /* segment base address (overwritten) */ 1307 0xfffff, /* length */ 1308 SDT_MEMERA, /* segment type */ 1309 0, /* segment descriptor priority level */ 1310 1, /* segment descriptor present */ 1311 0, 0, 1312 0, /* default 32 vs 16 bit size */ 1313 1 /* limit granularity (byte/page units)*/ }, 1314/* GBIOSCODE16_SEL 11 BIOS 32-bit interface (16bit Code) */ 1315{ 0, /* segment base address (overwritten) */ 1316 0xfffff, /* length */ 1317 SDT_MEMERA, /* segment type */ 1318 0, /* segment descriptor priority level */ 1319 1, /* segment descriptor present */ 1320 0, 0, 1321 0, /* default 32 vs 16 bit size */ 1322 1 /* limit granularity (byte/page units)*/ }, 1323/* GBIOSDATA_SEL 12 BIOS 32-bit interface (Data) */ 1324{ 0, /* segment base address (overwritten) */ 1325 0xfffff, /* length */ 1326 SDT_MEMRWA, /* segment type */ 1327 0, /* segment descriptor priority level */ 1328 1, /* segment descriptor present */ 1329 0, 0, 1330 1, /* default 32 vs 16 bit size */ 1331 1 /* limit granularity (byte/page units)*/ }, 1332/* GBIOSUTIL_SEL 13 BIOS 16-bit interface (Utility) */ 1333{ 0, /* segment base address (overwritten) */ 1334 0xfffff, /* length */ 1335 SDT_MEMRWA, /* segment type */ 1336 0, /* segment descriptor priority level */ 1337 1, /* segment descriptor present */ 1338 0, 0, 1339 0, /* default 32 vs 16 bit size */ 1340 1 /* limit granularity (byte/page units)*/ }, 1341/* GBIOSARGS_SEL 14 BIOS 16-bit interface (Arguments) */ 1342{ 0, /* segment base address (overwritten) */ 1343 0xfffff, /* length */ 1344 SDT_MEMRWA, /* segment type */ 1345 0, /* segment descriptor priority level */ 1346 1, /* segment descriptor present */ 1347 0, 0, 1348 0, /* default 32 vs 16 bit size */ 1349 1 /* limit granularity (byte/page units)*/ }, 1350}; 1351 1352static struct soft_segment_descriptor ldt_segs[] = { 1353 /* Null Descriptor - overwritten by call gate */ 1354{ 0x0, /* segment base address */ 1355 0x0, /* length - all address space */ 1356 0, /* segment type */ 1357 0, /* segment descriptor priority level */ 1358 0, /* segment descriptor present */ 1359 0, 0, 1360 0, /* default 32 vs 16 bit size */ 1361 0 /* limit granularity (byte/page units)*/ }, 1362 /* Null Descriptor - overwritten by call gate */ 1363{ 0x0, /* segment base address */ 1364 0x0, /* length - all address space */ 1365 0, /* segment type */ 1366 0, /* segment descriptor priority level */ 1367 0, /* segment descriptor present */ 1368 0, 0, 1369 0, /* default 32 vs 16 bit size */ 1370 0 /* limit granularity (byte/page units)*/ }, 1371 /* Null Descriptor - overwritten by call gate */ 1372{ 0x0, /* segment base address */ 1373 0x0, /* length - all address space */ 1374 0, /* segment type */ 1375 0, /* segment descriptor priority level */ 1376 0, /* segment descriptor present */ 1377 0, 0, 1378 0, /* default 32 vs 16 bit size */ 1379 0 /* limit granularity (byte/page units)*/ }, 1380 /* Code Descriptor for user */ 1381{ 0x0, /* segment base address */ 1382 0xfffff, /* length - all address space */ 1383 SDT_MEMERA, /* segment type */ 1384 SEL_UPL, /* segment descriptor priority level */ 1385 1, /* segment descriptor present */ 1386 0, 0, 1387 1, /* default 32 vs 16 bit size */ 1388 1 /* limit granularity (byte/page units)*/ }, 1389 /* Null Descriptor - overwritten by call gate */ 1390{ 0x0, /* segment base address */ 1391 0x0, /* length - all address space */ 1392 0, /* segment type */ 1393 0, /* segment descriptor priority level */ 1394 0, /* segment descriptor present */ 1395 0, 0, 1396 0, /* default 32 vs 16 bit size */ 1397 0 /* limit granularity (byte/page units)*/ }, 1398 /* Data Descriptor for user */ 1399{ 0x0, /* segment base address */ 1400 0xfffff, /* length - all address space */ 1401 SDT_MEMRWA, /* segment type */ 1402 SEL_UPL, /* segment descriptor priority level */ 1403 1, /* segment descriptor present */ 1404 0, 0, 1405 1, /* default 32 vs 16 bit size */ 1406 1 /* limit granularity (byte/page units)*/ }, 1407}; 1408 1409void 1410setidt(idx, func, typ, dpl, selec) 1411 int idx; 1412 inthand_t *func; 1413 int typ; 1414 int dpl; 1415 int selec; 1416{ 1417 struct gate_descriptor *ip; 1418 1419 ip = idt + idx; 1420 ip->gd_looffset = (int)func; 1421 ip->gd_selector = selec; 1422 ip->gd_stkcpy = 0; 1423 ip->gd_xx = 0; 1424 ip->gd_type = typ; 1425 ip->gd_dpl = dpl; 1426 ip->gd_p = 1; 1427 ip->gd_hioffset = ((int)func)>>16 ; 1428} 1429 1430#define IDTVEC(name) __CONCAT(X,name) 1431 1432extern inthand_t 1433 IDTVEC(div), IDTVEC(dbg), IDTVEC(nmi), IDTVEC(bpt), IDTVEC(ofl), 1434 IDTVEC(bnd), IDTVEC(ill), IDTVEC(dna), IDTVEC(fpusegm), 1435 IDTVEC(tss), IDTVEC(missing), IDTVEC(stk), IDTVEC(prot), 1436 IDTVEC(page), IDTVEC(mchk), IDTVEC(rsvd), IDTVEC(fpu), IDTVEC(align), 1437 IDTVEC(xmm), IDTVEC(lcall_syscall), IDTVEC(int0x80_syscall); 1438 1439void 1440sdtossd(sd, ssd) 1441 struct segment_descriptor *sd; 1442 struct soft_segment_descriptor *ssd; 1443{ 1444 ssd->ssd_base = (sd->sd_hibase << 24) | sd->sd_lobase; 1445 ssd->ssd_limit = (sd->sd_hilimit << 16) | sd->sd_lolimit; 1446 ssd->ssd_type = sd->sd_type; 1447 ssd->ssd_dpl = sd->sd_dpl; 1448 ssd->ssd_p = sd->sd_p; 1449 ssd->ssd_def32 = sd->sd_def32; 1450 ssd->ssd_gran = sd->sd_gran; 1451} 1452 1453#define PHYSMAP_SIZE (2 * 8) 1454 1455/* 1456 * Populate the (physmap) array with base/bound pairs describing the 1457 * available physical memory in the system, then test this memory and 1458 * build the phys_avail array describing the actually-available memory. 1459 * 1460 * If we cannot accurately determine the physical memory map, then use 1461 * value from the 0xE801 call, and failing that, the RTC. 1462 * 1463 * Total memory size may be set by the kernel environment variable 1464 * hw.physmem or the compile-time define MAXMEM. 1465 */ 1466static void 1467getmemsize(int first) 1468{ 1469 int i, physmap_idx, pa_indx; 1470 u_int basemem, extmem; 1471 struct vm86frame vmf; 1472 struct vm86context vmc; 1473 vm_offset_t pa, physmap[PHYSMAP_SIZE]; 1474 pt_entry_t *pte; 1475 char *cp; 1476 struct bios_smap *smap; 1477 1478 bzero(&vmf, sizeof(struct vm86frame)); 1479 bzero(physmap, sizeof(physmap)); 1480 basemem = 0; 1481 1482 /* 1483 * map page 1 R/W into the kernel page table so we can use it 1484 * as a buffer. The kernel will unmap this page later. 1485 */ 1486 pmap_kenter(KERNBASE + (1 << PAGE_SHIFT), 1); 1487 1488 /* 1489 * get memory map with INT 15:E820 1490 */ 1491 vmc.npages = 0; 1492 smap = (void *)vm86_addpage(&vmc, 1, KERNBASE + (1 << PAGE_SHIFT)); 1493 vm86_getptr(&vmc, (vm_offset_t)smap, &vmf.vmf_es, &vmf.vmf_di); 1494 1495 physmap_idx = 0; 1496 vmf.vmf_ebx = 0; 1497 do { 1498 vmf.vmf_eax = 0xE820; 1499 vmf.vmf_edx = SMAP_SIG; 1500 vmf.vmf_ecx = sizeof(struct bios_smap); 1501 i = vm86_datacall(0x15, &vmf, &vmc); 1502 if (i || vmf.vmf_eax != SMAP_SIG) 1503 break; 1504 if (boothowto & RB_VERBOSE) 1505 printf("SMAP type=%02x base=%08x %08x len=%08x %08x\n", 1506 smap->type, 1507 *(u_int32_t *)((char *)&smap->base + 4), 1508 (u_int32_t)smap->base, 1509 *(u_int32_t *)((char *)&smap->length + 4), 1510 (u_int32_t)smap->length); 1511 1512 if (smap->type != 0x01) 1513 goto next_run; 1514 1515 if (smap->length == 0) 1516 goto next_run; 1517 1518 if (smap->base >= 0xffffffff) { 1519 printf("%uK of memory above 4GB ignored\n", 1520 (u_int)(smap->length / 1024)); 1521 goto next_run; 1522 } 1523 1524 for (i = 0; i <= physmap_idx; i += 2) { 1525 if (smap->base < physmap[i + 1]) { 1526 if (boothowto & RB_VERBOSE) 1527 printf( 1528 "Overlapping or non-montonic memory region, ignoring second region\n"); 1529 goto next_run; 1530 } 1531 } 1532 1533 if (smap->base == physmap[physmap_idx + 1]) { 1534 physmap[physmap_idx + 1] += smap->length; 1535 goto next_run; 1536 } 1537 1538 physmap_idx += 2; 1539 if (physmap_idx == PHYSMAP_SIZE) { 1540 printf( 1541 "Too many segments in the physical address map, giving up\n"); 1542 break; 1543 } 1544 physmap[physmap_idx] = smap->base; 1545 physmap[physmap_idx + 1] = smap->base + smap->length; 1546next_run: ; 1547 } while (vmf.vmf_ebx != 0); 1548 1549 /* 1550 * Perform "base memory" related probes & setup 1551 */ 1552 for (i = 0; i <= physmap_idx; i += 2) { 1553 if (physmap[i] == 0x00000000) { 1554 basemem = physmap[i + 1] / 1024; 1555 break; 1556 } 1557 } 1558 1559 /* Fall back to the old compatibility function for base memory */ 1560 if (basemem == 0) { 1561 vm86_intcall(0x12, &vmf); 1562 basemem = vmf.vmf_ax; 1563 } 1564 1565 if (basemem > 640) { 1566 printf("Preposterous BIOS basemem of %uK, truncating to 640K\n", 1567 basemem); 1568 basemem = 640; 1569 } 1570 1571 /* 1572 * XXX if biosbasemem is now < 640, there is a `hole' 1573 * between the end of base memory and the start of 1574 * ISA memory. The hole may be empty or it may 1575 * contain BIOS code or data. Map it read/write so 1576 * that the BIOS can write to it. (Memory from 0 to 1577 * the physical end of the kernel is mapped read-only 1578 * to begin with and then parts of it are remapped. 1579 * The parts that aren't remapped form holes that 1580 * remain read-only and are unused by the kernel. 1581 * The base memory area is below the physical end of 1582 * the kernel and right now forms a read-only hole. 1583 * The part of it from PAGE_SIZE to 1584 * (trunc_page(biosbasemem * 1024) - 1) will be 1585 * remapped and used by the kernel later.) 1586 * 1587 * This code is similar to the code used in 1588 * pmap_mapdev, but since no memory needs to be 1589 * allocated we simply change the mapping. 1590 */ 1591 for (pa = trunc_page(basemem * 1024); 1592 pa < ISA_HOLE_START; pa += PAGE_SIZE) 1593 pmap_kenter(KERNBASE + pa, pa); 1594 1595 /* 1596 * if basemem != 640, map pages r/w into vm86 page table so 1597 * that the bios can scribble on it. 1598 */ 1599 pte = (pt_entry_t *)vm86paddr; 1600 for (i = basemem / 4; i < 160; i++) 1601 pte[i] = (i << PAGE_SHIFT) | PG_V | PG_RW | PG_U; 1602 1603 if (physmap[1] != 0) 1604 goto physmap_done; 1605 1606 /* 1607 * If we failed above, try memory map with INT 15:E801 1608 */ 1609 vmf.vmf_ax = 0xE801; 1610 if (vm86_intcall(0x15, &vmf) == 0) { 1611 extmem = vmf.vmf_cx + vmf.vmf_dx * 64; 1612 } else { 1613#if 0 1614 vmf.vmf_ah = 0x88; 1615 vm86_intcall(0x15, &vmf); 1616 extmem = vmf.vmf_ax; 1617#else 1618 /* 1619 * Prefer the RTC value for extended memory. 1620 */ 1621 extmem = rtcin(RTC_EXTLO) + (rtcin(RTC_EXTHI) << 8); 1622#endif 1623 } 1624 1625 /* 1626 * Special hack for chipsets that still remap the 384k hole when 1627 * there's 16MB of memory - this really confuses people that 1628 * are trying to use bus mastering ISA controllers with the 1629 * "16MB limit"; they only have 16MB, but the remapping puts 1630 * them beyond the limit. 1631 * 1632 * If extended memory is between 15-16MB (16-17MB phys address range), 1633 * chop it to 15MB. 1634 */ 1635 if ((extmem > 15 * 1024) && (extmem < 16 * 1024)) 1636 extmem = 15 * 1024; 1637 1638 physmap[0] = 0; 1639 physmap[1] = basemem * 1024; 1640 physmap_idx = 2; 1641 physmap[physmap_idx] = 0x100000; 1642 physmap[physmap_idx + 1] = physmap[physmap_idx] + extmem * 1024; 1643 1644physmap_done: 1645 /* 1646 * Now, physmap contains a map of physical memory. 1647 */ 1648 1649#ifdef SMP 1650 /* make hole for AP bootstrap code */ 1651 physmap[1] = mp_bootaddress(physmap[1] / 1024); 1652 1653 /* look for the MP hardware - needed for apic addresses */ 1654 i386_mp_probe(); 1655#endif 1656 1657 /* 1658 * Maxmem isn't the "maximum memory", it's one larger than the 1659 * highest page of the physical address space. It should be 1660 * called something like "Maxphyspage". We may adjust this 1661 * based on ``hw.physmem'' and the results of the memory test. 1662 */ 1663 Maxmem = atop(physmap[physmap_idx + 1]); 1664 1665#ifdef MAXMEM 1666 Maxmem = MAXMEM / 4; 1667#endif 1668 1669 /* 1670 * hw.physmem is a size in bytes; we also allow k, m, and g suffixes 1671 * for the appropriate modifiers. This overrides MAXMEM. 1672 */ 1673 if ((cp = getenv("hw.physmem")) != NULL) { 1674 u_int64_t AllowMem, sanity; 1675 char *ep; 1676 1677 sanity = AllowMem = strtouq(cp, &ep, 0); 1678 if ((ep != cp) && (*ep != 0)) { 1679 switch(*ep) { 1680 case 'g': 1681 case 'G': 1682 AllowMem <<= 10; 1683 case 'm': 1684 case 'M': 1685 AllowMem <<= 10; 1686 case 'k': 1687 case 'K': 1688 AllowMem <<= 10; 1689 break; 1690 default: 1691 AllowMem = sanity = 0; 1692 } 1693 if (AllowMem < sanity) 1694 AllowMem = 0; 1695 } 1696 if (AllowMem == 0) 1697 printf("Ignoring invalid memory size of '%s'\n", cp); 1698 else 1699 Maxmem = atop(AllowMem); 1700 freeenv(cp); 1701 } 1702 1703 if (atop(physmap[physmap_idx + 1]) != Maxmem && 1704 (boothowto & RB_VERBOSE)) 1705 printf("Physical memory use set to %ldK\n", Maxmem * 4); 1706 1707 /* 1708 * If Maxmem has been increased beyond what the system has detected, 1709 * extend the last memory segment to the new limit. 1710 */ 1711 if (atop(physmap[physmap_idx + 1]) < Maxmem) 1712 physmap[physmap_idx + 1] = ptoa(Maxmem); 1713 1714 /* call pmap initialization to make new kernel address space */ 1715 pmap_bootstrap(first, 0); 1716 1717 /* 1718 * Size up each available chunk of physical memory. 1719 */ 1720 physmap[0] = PAGE_SIZE; /* mask off page 0 */ 1721 pa_indx = 0; 1722 phys_avail[pa_indx++] = physmap[0]; 1723 phys_avail[pa_indx] = physmap[0]; 1724 pte = CMAP1; 1725 1726 /* 1727 * physmap is in bytes, so when converting to page boundaries, 1728 * round up the start address and round down the end address. 1729 */ 1730 for (i = 0; i <= physmap_idx; i += 2) { 1731 vm_offset_t end; 1732 1733 end = ptoa(Maxmem); 1734 if (physmap[i + 1] < end) 1735 end = trunc_page(physmap[i + 1]); 1736 for (pa = round_page(physmap[i]); pa < end; pa += PAGE_SIZE) { 1737 int tmp, page_bad; 1738 int *ptr = (int *)CADDR1; 1739 1740 /* 1741 * block out kernel memory as not available. 1742 */ 1743 if (pa >= 0x100000 && pa < first) 1744 continue; 1745 1746 page_bad = FALSE; 1747 1748 /* 1749 * map page into kernel: valid, read/write,non-cacheable 1750 */ 1751 *pte = pa | PG_V | PG_RW | PG_N; 1752 invltlb(); 1753 1754 tmp = *(int *)ptr; 1755 /* 1756 * Test for alternating 1's and 0's 1757 */ 1758 *(volatile int *)ptr = 0xaaaaaaaa; 1759 if (*(volatile int *)ptr != 0xaaaaaaaa) { 1760 page_bad = TRUE; 1761 } 1762 /* 1763 * Test for alternating 0's and 1's 1764 */ 1765 *(volatile int *)ptr = 0x55555555; 1766 if (*(volatile int *)ptr != 0x55555555) { 1767 page_bad = TRUE; 1768 } 1769 /* 1770 * Test for all 1's 1771 */ 1772 *(volatile int *)ptr = 0xffffffff; 1773 if (*(volatile int *)ptr != 0xffffffff) { 1774 page_bad = TRUE; 1775 } 1776 /* 1777 * Test for all 0's 1778 */ 1779 *(volatile int *)ptr = 0x0; 1780 if (*(volatile int *)ptr != 0x0) { 1781 page_bad = TRUE; 1782 } 1783 /* 1784 * Restore original value. 1785 */ 1786 *(int *)ptr = tmp; 1787 1788 /* 1789 * Adjust array of valid/good pages. 1790 */ 1791 if (page_bad == TRUE) { 1792 continue; 1793 } 1794 /* 1795 * If this good page is a continuation of the 1796 * previous set of good pages, then just increase 1797 * the end pointer. Otherwise start a new chunk. 1798 * Note that "end" points one higher than end, 1799 * making the range >= start and < end. 1800 * If we're also doing a speculative memory 1801 * test and we at or past the end, bump up Maxmem 1802 * so that we keep going. The first bad page 1803 * will terminate the loop. 1804 */ 1805 if (phys_avail[pa_indx] == pa) { 1806 phys_avail[pa_indx] += PAGE_SIZE; 1807 } else { 1808 pa_indx++; 1809 if (pa_indx == PHYS_AVAIL_ARRAY_END) { 1810 printf( 1811 "Too many holes in the physical address space, giving up\n"); 1812 pa_indx--; 1813 break; 1814 } 1815 phys_avail[pa_indx++] = pa; /* start */ 1816 phys_avail[pa_indx] = pa + PAGE_SIZE; /* end */ 1817 } 1818 physmem++; 1819 } 1820 } 1821 *pte = 0; 1822 invltlb(); 1823 1824 /* 1825 * XXX 1826 * The last chunk must contain at least one page plus the message 1827 * buffer to avoid complicating other code (message buffer address 1828 * calculation, etc.). 1829 */ 1830 while (phys_avail[pa_indx - 1] + PAGE_SIZE + 1831 round_page(MSGBUF_SIZE) >= phys_avail[pa_indx]) { 1832 physmem -= atop(phys_avail[pa_indx] - phys_avail[pa_indx - 1]); 1833 phys_avail[pa_indx--] = 0; 1834 phys_avail[pa_indx--] = 0; 1835 } 1836 1837 Maxmem = atop(phys_avail[pa_indx]); 1838 1839 /* Trim off space for the message buffer. */ 1840 phys_avail[pa_indx] -= round_page(MSGBUF_SIZE); 1841 1842 avail_end = phys_avail[pa_indx]; 1843} 1844 1845void 1846init386(first) 1847 int first; 1848{ 1849 struct gate_descriptor *gdp; 1850 int gsel_tss, metadata_missing, off, x; 1851#ifndef SMP 1852 /* table descriptors - used to load tables by microp */ 1853 struct region_descriptor r_gdt, r_idt; 1854#endif 1855 struct pcpu *pc; 1856 1857 proc0.p_uarea = proc0uarea; 1858 thread0.td_kstack = proc0kstack; 1859 thread0.td_pcb = (struct pcb *) 1860 (thread0.td_kstack + KSTACK_PAGES * PAGE_SIZE) - 1; 1861 atdevbase = ISA_HOLE_START + KERNBASE; 1862 1863 /* 1864 * This may be done better later if it gets more high level 1865 * components in it. If so just link td->td_proc here. 1866 */ 1867 proc_linkup(&proc0, &ksegrp0, &kse0, &thread0); 1868 1869 metadata_missing = 0; 1870 if (bootinfo.bi_modulep) { 1871 preload_metadata = (caddr_t)bootinfo.bi_modulep + KERNBASE; 1872 preload_bootstrap_relocate(KERNBASE); 1873 } else { 1874 metadata_missing = 1; 1875 } 1876 if (envmode == 1) 1877 kern_envp = static_env; 1878 else if (bootinfo.bi_envp) 1879 kern_envp = (caddr_t)bootinfo.bi_envp + KERNBASE; 1880 1881 /* Init basic tunables, hz etc */ 1882 init_param1(); 1883 1884 /* 1885 * make gdt memory segments, the code segment goes up to end of the 1886 * page with etext in it, the data segment goes to the end of 1887 * the address space 1888 */ 1889 /* 1890 * XXX text protection is temporarily (?) disabled. The limit was 1891 * i386_btop(round_page(etext)) - 1. 1892 */ 1893 gdt_segs[GCODE_SEL].ssd_limit = atop(0 - 1); 1894 gdt_segs[GDATA_SEL].ssd_limit = atop(0 - 1); 1895#ifdef SMP 1896 pc = &SMP_prvspace[0].pcpu; 1897 gdt_segs[GPRIV_SEL].ssd_limit = 1898 atop(sizeof(struct privatespace) - 1); 1899#else 1900 pc = &__pcpu; 1901 gdt_segs[GPRIV_SEL].ssd_limit = 1902 atop(sizeof(struct pcpu) - 1); 1903#endif 1904 gdt_segs[GPRIV_SEL].ssd_base = (int) pc; 1905 gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss; 1906 1907 for (x = 0; x < NGDT; x++) { 1908#ifdef BDE_DEBUGGER 1909 /* avoid overwriting db entries with APM ones */ 1910 if (x >= GAPMCODE32_SEL && x <= GAPMDATA_SEL) 1911 continue; 1912#endif 1913 ssdtosd(&gdt_segs[x], &gdt[x].sd); 1914 } 1915 1916 r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; 1917 r_gdt.rd_base = (int) gdt; 1918 lgdt(&r_gdt); 1919 1920 pcpu_init(pc, 0, sizeof(struct pcpu)); 1921 PCPU_SET(prvspace, pc); 1922 PCPU_SET(curthread, &thread0); 1923 1924 /* 1925 * Initialize mutexes. 1926 * 1927 * icu_lock: in order to allow an interrupt to occur in a critical 1928 * section, to set pcpu->ipending (etc...) properly, we 1929 * must be able to get the icu lock, so it can't be 1930 * under witness. 1931 */ 1932 mutex_init(); 1933 mtx_init(&clock_lock, "clk", NULL, MTX_SPIN | MTX_RECURSE); 1934 mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS); 1935 1936 /* make ldt memory segments */ 1937 /* 1938 * XXX - VM_MAXUSER_ADDRESS is an end address, not a max. And it 1939 * should be spelled ...MAX_USER... 1940 */ 1941 ldt_segs[LUCODE_SEL].ssd_limit = atop(VM_MAXUSER_ADDRESS - 1); 1942 ldt_segs[LUDATA_SEL].ssd_limit = atop(VM_MAXUSER_ADDRESS - 1); 1943 for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++) 1944 ssdtosd(&ldt_segs[x], &ldt[x].sd); 1945 1946 _default_ldt = GSEL(GLDT_SEL, SEL_KPL); 1947 lldt(_default_ldt); 1948 PCPU_SET(currentldt, _default_ldt); 1949 1950 /* exceptions */ 1951 for (x = 0; x < NIDT; x++) 1952 setidt(x, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, 1953 GSEL(GCODE_SEL, SEL_KPL)); 1954 setidt(0, &IDTVEC(div), SDT_SYS386TGT, SEL_KPL, 1955 GSEL(GCODE_SEL, SEL_KPL)); 1956 setidt(1, &IDTVEC(dbg), SDT_SYS386IGT, SEL_KPL, 1957 GSEL(GCODE_SEL, SEL_KPL)); 1958 setidt(2, &IDTVEC(nmi), SDT_SYS386TGT, SEL_KPL, 1959 GSEL(GCODE_SEL, SEL_KPL)); 1960 setidt(3, &IDTVEC(bpt), SDT_SYS386IGT, SEL_UPL, 1961 GSEL(GCODE_SEL, SEL_KPL)); 1962 setidt(4, &IDTVEC(ofl), SDT_SYS386TGT, SEL_UPL, 1963 GSEL(GCODE_SEL, SEL_KPL)); 1964 setidt(5, &IDTVEC(bnd), SDT_SYS386TGT, SEL_KPL, 1965 GSEL(GCODE_SEL, SEL_KPL)); 1966 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, 1967 GSEL(GCODE_SEL, SEL_KPL)); 1968 setidt(7, &IDTVEC(dna), SDT_SYS386TGT, SEL_KPL 1969 , GSEL(GCODE_SEL, SEL_KPL)); 1970 setidt(8, 0, SDT_SYSTASKGT, SEL_KPL, GSEL(GPANIC_SEL, SEL_KPL)); 1971 setidt(9, &IDTVEC(fpusegm), SDT_SYS386TGT, SEL_KPL, 1972 GSEL(GCODE_SEL, SEL_KPL)); 1973 setidt(10, &IDTVEC(tss), SDT_SYS386TGT, SEL_KPL, 1974 GSEL(GCODE_SEL, SEL_KPL)); 1975 setidt(11, &IDTVEC(missing), SDT_SYS386TGT, SEL_KPL, 1976 GSEL(GCODE_SEL, SEL_KPL)); 1977 setidt(12, &IDTVEC(stk), SDT_SYS386TGT, SEL_KPL, 1978 GSEL(GCODE_SEL, SEL_KPL)); 1979 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, 1980 GSEL(GCODE_SEL, SEL_KPL)); 1981 setidt(14, &IDTVEC(page), SDT_SYS386IGT, SEL_KPL, 1982 GSEL(GCODE_SEL, SEL_KPL)); 1983 setidt(15, &IDTVEC(rsvd), SDT_SYS386TGT, SEL_KPL, 1984 GSEL(GCODE_SEL, SEL_KPL)); 1985 setidt(16, &IDTVEC(fpu), SDT_SYS386TGT, SEL_KPL, 1986 GSEL(GCODE_SEL, SEL_KPL)); 1987 setidt(17, &IDTVEC(align), SDT_SYS386TGT, SEL_KPL, 1988 GSEL(GCODE_SEL, SEL_KPL)); 1989 setidt(18, &IDTVEC(mchk), SDT_SYS386TGT, SEL_KPL, 1990 GSEL(GCODE_SEL, SEL_KPL)); 1991 setidt(19, &IDTVEC(xmm), SDT_SYS386TGT, SEL_KPL, 1992 GSEL(GCODE_SEL, SEL_KPL)); 1993 setidt(0x80, &IDTVEC(int0x80_syscall), SDT_SYS386TGT, SEL_UPL, 1994 GSEL(GCODE_SEL, SEL_KPL)); 1995 1996 r_idt.rd_limit = sizeof(idt0) - 1; 1997 r_idt.rd_base = (int) idt; 1998 lidt(&r_idt); 1999 2000 /* 2001 * Initialize the console before we print anything out. 2002 */ 2003 cninit(); 2004 2005 if (metadata_missing) 2006 printf("WARNING: loader(8) metadata is missing!\n"); 2007 2008#ifdef DEV_ISA 2009 isa_defaultirq(); 2010#endif 2011 2012#ifdef DDB 2013 kdb_init(); 2014 if (boothowto & RB_KDB) 2015 Debugger("Boot flags requested debugger"); 2016#endif 2017 2018 finishidentcpu(); /* Final stage of CPU initialization */ 2019 setidt(6, &IDTVEC(ill), SDT_SYS386TGT, SEL_KPL, 2020 GSEL(GCODE_SEL, SEL_KPL)); 2021 setidt(13, &IDTVEC(prot), SDT_SYS386TGT, SEL_KPL, 2022 GSEL(GCODE_SEL, SEL_KPL)); 2023 initializecpu(); /* Initialize CPU registers */ 2024 2025 /* make an initial tss so cpu can get interrupt stack on syscall! */ 2026 /* Note: -16 is so we can grow the trapframe if we came from vm86 */ 2027 PCPU_SET(common_tss.tss_esp0, thread0.td_kstack + 2028 KSTACK_PAGES * PAGE_SIZE - sizeof(struct pcb) - 16); 2029 PCPU_SET(common_tss.tss_ss0, GSEL(GDATA_SEL, SEL_KPL)); 2030 gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); 2031 private_tss = 0; 2032 PCPU_SET(tss_gdt, &gdt[GPROC0_SEL].sd); 2033 PCPU_SET(common_tssd, *PCPU_GET(tss_gdt)); 2034 PCPU_SET(common_tss.tss_ioopt, (sizeof (struct i386tss)) << 16); 2035 ltr(gsel_tss); 2036 2037 dblfault_tss.tss_esp = dblfault_tss.tss_esp0 = dblfault_tss.tss_esp1 = 2038 dblfault_tss.tss_esp2 = (int)&dblfault_stack[sizeof(dblfault_stack)]; 2039 dblfault_tss.tss_ss = dblfault_tss.tss_ss0 = dblfault_tss.tss_ss1 = 2040 dblfault_tss.tss_ss2 = GSEL(GDATA_SEL, SEL_KPL); 2041 dblfault_tss.tss_cr3 = (int)IdlePTD; 2042 dblfault_tss.tss_eip = (int)dblfault_handler; 2043 dblfault_tss.tss_eflags = PSL_KERNEL; 2044 dblfault_tss.tss_ds = dblfault_tss.tss_es = 2045 dblfault_tss.tss_gs = GSEL(GDATA_SEL, SEL_KPL); 2046 dblfault_tss.tss_fs = GSEL(GPRIV_SEL, SEL_KPL); 2047 dblfault_tss.tss_cs = GSEL(GCODE_SEL, SEL_KPL); 2048 dblfault_tss.tss_ldt = GSEL(GLDT_SEL, SEL_KPL); 2049 2050 vm86_initialize(); 2051 getmemsize(first); 2052 init_param2(physmem); 2053 2054 /* now running on new page tables, configured,and u/iom is accessible */ 2055 2056 /* Map the message buffer. */ 2057 for (off = 0; off < round_page(MSGBUF_SIZE); off += PAGE_SIZE) 2058 pmap_kenter((vm_offset_t)msgbufp + off, avail_end + off); 2059 2060 msgbufinit(msgbufp, MSGBUF_SIZE); 2061 2062 /* make a call gate to reenter kernel with */ 2063 gdp = &ldt[LSYS5CALLS_SEL].gd; 2064 2065 x = (int) &IDTVEC(lcall_syscall); 2066 gdp->gd_looffset = x; 2067 gdp->gd_selector = GSEL(GCODE_SEL,SEL_KPL); 2068 gdp->gd_stkcpy = 1; 2069 gdp->gd_type = SDT_SYS386CGT; 2070 gdp->gd_dpl = SEL_UPL; 2071 gdp->gd_p = 1; 2072 gdp->gd_hioffset = x >> 16; 2073 2074 /* XXX does this work? */ 2075 ldt[LBSDICALLS_SEL] = ldt[LSYS5CALLS_SEL]; 2076 ldt[LSOL26CALLS_SEL] = ldt[LSYS5CALLS_SEL]; 2077 2078 /* transfer to user mode */ 2079 2080 _ucodesel = LSEL(LUCODE_SEL, SEL_UPL); 2081 _udatasel = LSEL(LUDATA_SEL, SEL_UPL); 2082 2083 /* setup proc 0's pcb */ 2084 thread0.td_pcb->pcb_flags = 0; /* XXXKSE */ 2085 thread0.td_pcb->pcb_cr3 = (int)IdlePTD; 2086 thread0.td_pcb->pcb_ext = 0; 2087 thread0.td_frame = &proc0_tf; 2088} 2089 2090void 2091cpu_pcpu_init(struct pcpu *pcpu, int cpuid, size_t size) 2092{ 2093} 2094 2095#if defined(I586_CPU) && !defined(NO_F00F_HACK) 2096static void f00f_hack(void *unused); 2097SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL); 2098 2099static void 2100f00f_hack(void *unused) { 2101 struct gate_descriptor *new_idt; 2102#ifndef SMP 2103 struct region_descriptor r_idt; 2104#endif 2105 vm_offset_t tmp; 2106 2107 if (!has_f00f_bug) 2108 return; 2109 2110 GIANT_REQUIRED; 2111 2112 printf("Intel Pentium detected, installing workaround for F00F bug\n"); 2113 2114 r_idt.rd_limit = sizeof(idt0) - 1; 2115 2116 tmp = kmem_alloc(kernel_map, PAGE_SIZE * 2); 2117 if (tmp == 0) 2118 panic("kmem_alloc returned 0"); 2119 if (((unsigned int)tmp & (PAGE_SIZE-1)) != 0) 2120 panic("kmem_alloc returned non-page-aligned memory"); 2121 /* Put the first seven entries in the lower page */ 2122 new_idt = (struct gate_descriptor*)(tmp + PAGE_SIZE - (7*8)); 2123 bcopy(idt, new_idt, sizeof(idt0)); 2124 r_idt.rd_base = (int)new_idt; 2125 lidt(&r_idt); 2126 idt = new_idt; 2127 if (vm_map_protect(kernel_map, tmp, tmp + PAGE_SIZE, 2128 VM_PROT_READ, FALSE) != KERN_SUCCESS) 2129 panic("vm_map_protect failed"); 2130 return; 2131} 2132#endif /* defined(I586_CPU) && !NO_F00F_HACK */ 2133 2134int 2135ptrace_set_pc(struct thread *td, unsigned long addr) 2136{ 2137 td->td_frame->tf_eip = addr; 2138 return (0); 2139} 2140 2141int 2142ptrace_single_step(struct thread *td) 2143{ 2144 td->td_frame->tf_eflags |= PSL_T; 2145 return (0); 2146} 2147 2148int 2149fill_regs(struct thread *td, struct reg *regs) 2150{ 2151 struct pcb *pcb; 2152 struct trapframe *tp; 2153 2154 tp = td->td_frame; 2155 regs->r_fs = tp->tf_fs; 2156 regs->r_es = tp->tf_es; 2157 regs->r_ds = tp->tf_ds; 2158 regs->r_edi = tp->tf_edi; 2159 regs->r_esi = tp->tf_esi; 2160 regs->r_ebp = tp->tf_ebp; 2161 regs->r_ebx = tp->tf_ebx; 2162 regs->r_edx = tp->tf_edx; 2163 regs->r_ecx = tp->tf_ecx; 2164 regs->r_eax = tp->tf_eax; 2165 regs->r_eip = tp->tf_eip; 2166 regs->r_cs = tp->tf_cs; 2167 regs->r_eflags = tp->tf_eflags; 2168 regs->r_esp = tp->tf_esp; 2169 regs->r_ss = tp->tf_ss; 2170 pcb = td->td_pcb; 2171 regs->r_gs = pcb->pcb_gs; 2172 return (0); 2173} 2174 2175int 2176set_regs(struct thread *td, struct reg *regs) 2177{ 2178 struct pcb *pcb; 2179 struct trapframe *tp; 2180 2181 tp = td->td_frame; 2182 if (!EFL_SECURE(regs->r_eflags, tp->tf_eflags) || 2183 !CS_SECURE(regs->r_cs)) 2184 return (EINVAL); 2185 tp->tf_fs = regs->r_fs; 2186 tp->tf_es = regs->r_es; 2187 tp->tf_ds = regs->r_ds; 2188 tp->tf_edi = regs->r_edi; 2189 tp->tf_esi = regs->r_esi; 2190 tp->tf_ebp = regs->r_ebp; 2191 tp->tf_ebx = regs->r_ebx; 2192 tp->tf_edx = regs->r_edx; 2193 tp->tf_ecx = regs->r_ecx; 2194 tp->tf_eax = regs->r_eax; 2195 tp->tf_eip = regs->r_eip; 2196 tp->tf_cs = regs->r_cs; 2197 tp->tf_eflags = regs->r_eflags; 2198 tp->tf_esp = regs->r_esp; 2199 tp->tf_ss = regs->r_ss; 2200 pcb = td->td_pcb; 2201 pcb->pcb_gs = regs->r_gs; 2202 return (0); 2203} 2204 2205#ifdef CPU_ENABLE_SSE 2206static void 2207fill_fpregs_xmm(sv_xmm, sv_87) 2208 struct savexmm *sv_xmm; 2209 struct save87 *sv_87; 2210{ 2211 register struct env87 *penv_87 = &sv_87->sv_env; 2212 register struct envxmm *penv_xmm = &sv_xmm->sv_env; 2213 int i; 2214 2215 bzero(sv_87, sizeof(*sv_87)); 2216 2217 /* FPU control/status */ 2218 penv_87->en_cw = penv_xmm->en_cw; 2219 penv_87->en_sw = penv_xmm->en_sw; 2220 penv_87->en_tw = penv_xmm->en_tw; 2221 penv_87->en_fip = penv_xmm->en_fip; 2222 penv_87->en_fcs = penv_xmm->en_fcs; 2223 penv_87->en_opcode = penv_xmm->en_opcode; 2224 penv_87->en_foo = penv_xmm->en_foo; 2225 penv_87->en_fos = penv_xmm->en_fos; 2226 2227 /* FPU registers */ 2228 for (i = 0; i < 8; ++i) 2229 sv_87->sv_ac[i] = sv_xmm->sv_fp[i].fp_acc; 2230} 2231 2232static void 2233set_fpregs_xmm(sv_87, sv_xmm) 2234 struct save87 *sv_87; 2235 struct savexmm *sv_xmm; 2236{ 2237 register struct env87 *penv_87 = &sv_87->sv_env; 2238 register struct envxmm *penv_xmm = &sv_xmm->sv_env; 2239 int i; 2240 2241 /* FPU control/status */ 2242 penv_xmm->en_cw = penv_87->en_cw; 2243 penv_xmm->en_sw = penv_87->en_sw; 2244 penv_xmm->en_tw = penv_87->en_tw; 2245 penv_xmm->en_fip = penv_87->en_fip; 2246 penv_xmm->en_fcs = penv_87->en_fcs; 2247 penv_xmm->en_opcode = penv_87->en_opcode; 2248 penv_xmm->en_foo = penv_87->en_foo; 2249 penv_xmm->en_fos = penv_87->en_fos; 2250 2251 /* FPU registers */ 2252 for (i = 0; i < 8; ++i) 2253 sv_xmm->sv_fp[i].fp_acc = sv_87->sv_ac[i]; 2254} 2255#endif /* CPU_ENABLE_SSE */ 2256 2257int 2258fill_fpregs(struct thread *td, struct fpreg *fpregs) 2259{ 2260#ifdef CPU_ENABLE_SSE 2261 if (cpu_fxsr) { 2262 fill_fpregs_xmm(&td->td_pcb->pcb_save.sv_xmm, 2263 (struct save87 *)fpregs); 2264 return (0); 2265 } 2266#endif /* CPU_ENABLE_SSE */ 2267 bcopy(&td->td_pcb->pcb_save.sv_87, fpregs, sizeof *fpregs); 2268 return (0); 2269} 2270 2271int 2272set_fpregs(struct thread *td, struct fpreg *fpregs) 2273{ 2274#ifdef CPU_ENABLE_SSE 2275 if (cpu_fxsr) { 2276 set_fpregs_xmm((struct save87 *)fpregs, 2277 &td->td_pcb->pcb_save.sv_xmm); 2278 return (0); 2279 } 2280#endif /* CPU_ENABLE_SSE */ 2281 bcopy(fpregs, &td->td_pcb->pcb_save.sv_87, sizeof *fpregs); 2282 return (0); 2283} 2284 2285/* 2286 * Get machine context. 2287 */ 2288void 2289get_mcontext(struct thread *td, mcontext_t *mcp) 2290{ 2291 struct trapframe *tp; 2292 2293 tp = td->td_frame; 2294 2295 mcp->mc_onstack = sigonstack(tp->tf_esp); 2296 mcp->mc_gs = td->td_pcb->pcb_gs; 2297 mcp->mc_fs = tp->tf_fs; 2298 mcp->mc_es = tp->tf_es; 2299 mcp->mc_ds = tp->tf_ds; 2300 mcp->mc_edi = tp->tf_edi; 2301 mcp->mc_esi = tp->tf_esi; 2302 mcp->mc_ebp = tp->tf_ebp; 2303 mcp->mc_isp = tp->tf_isp; 2304 mcp->mc_ebx = tp->tf_ebx; 2305 mcp->mc_edx = tp->tf_edx; 2306 mcp->mc_ecx = tp->tf_ecx; 2307 mcp->mc_eax = tp->tf_eax; 2308 mcp->mc_eip = tp->tf_eip; 2309 mcp->mc_cs = tp->tf_cs; 2310 mcp->mc_eflags = tp->tf_eflags; 2311 mcp->mc_esp = tp->tf_esp; 2312 mcp->mc_ss = tp->tf_ss; 2313 mcp->mc_len = sizeof(*mcp); 2314 get_fpcontext(td, mcp); 2315} 2316 2317/* 2318 * Set machine context. 2319 * 2320 * However, we don't set any but the user modifiable flags, and we won't 2321 * touch the cs selector. 2322 */ 2323int 2324set_mcontext(struct thread *td, const mcontext_t *mcp) 2325{ 2326 struct trapframe *tp; 2327 int eflags, ret; 2328 2329 tp = td->td_frame; 2330 if (mcp->mc_len != sizeof(*mcp)) 2331 return (EINVAL); 2332 eflags = (mcp->mc_eflags & PSL_USERCHANGE) | 2333 (tp->tf_eflags & ~PSL_USERCHANGE); 2334 if ((ret = set_fpcontext(td, mcp)) == 0) { 2335 tp->tf_fs = mcp->mc_fs; 2336 tp->tf_es = mcp->mc_es; 2337 tp->tf_ds = mcp->mc_ds; 2338 tp->tf_edi = mcp->mc_edi; 2339 tp->tf_esi = mcp->mc_esi; 2340 tp->tf_ebp = mcp->mc_ebp; 2341 tp->tf_ebx = mcp->mc_ebx; 2342 tp->tf_edx = mcp->mc_edx; 2343 tp->tf_ecx = mcp->mc_ecx; 2344 tp->tf_eax = mcp->mc_eax; 2345 tp->tf_eip = mcp->mc_eip; 2346 tp->tf_eflags = eflags; 2347 tp->tf_esp = mcp->mc_esp; 2348 tp->tf_ss = mcp->mc_ss; 2349 td->td_pcb->pcb_gs = mcp->mc_gs; 2350 ret = 0; 2351 } 2352 return (ret); 2353} 2354 2355static void 2356get_fpcontext(struct thread *td, mcontext_t *mcp) 2357{ 2358#ifndef DEV_NPX 2359 mcp->mc_fpformat = _MC_FPFMT_NODEV; 2360 mcp->mc_ownedfp = _MC_FPOWNED_NONE; 2361#else 2362 union savefpu *addr; 2363 2364 /* 2365 * XXX mc_fpstate might be misaligned, since its declaration is not 2366 * unportabilized using __attribute__((aligned(16))) like the 2367 * declaration of struct savemm, and anyway, alignment doesn't work 2368 * for auto variables since we don't use gcc's pessimal stack 2369 * alignment. Work around this by abusing the spare fields after 2370 * mcp->mc_fpstate. 2371 * 2372 * XXX unpessimize most cases by only aligning when fxsave might be 2373 * called, although this requires knowing too much about 2374 * npxgetregs()'s internals. 2375 */ 2376 addr = (union savefpu *)&mcp->mc_fpstate; 2377 if (td == PCPU_GET(fpcurthread) && 2378#ifdef CPU_ENABLE_SSE 2379 cpu_fxsr && 2380#endif 2381 ((uintptr_t)(void *)addr & 0xF)) { 2382 do 2383 addr = (void *)((char *)addr + 4); 2384 while ((uintptr_t)(void *)addr & 0xF); 2385 } 2386 mcp->mc_ownedfp = npxgetregs(td, addr); 2387 if (addr != (union savefpu *)&mcp->mc_fpstate) { 2388 bcopy(addr, &mcp->mc_fpstate, sizeof(mcp->mc_fpstate)); 2389 bzero(&mcp->mc_spare2, sizeof(mcp->mc_spare2)); 2390 } 2391 mcp->mc_fpformat = npxformat(); 2392#endif 2393} 2394 2395static int 2396set_fpcontext(struct thread *td, const mcontext_t *mcp) 2397{ 2398 union savefpu *addr; 2399 2400 if (mcp->mc_fpformat == _MC_FPFMT_NODEV) 2401 return (0); 2402 else if (mcp->mc_fpformat != _MC_FPFMT_387 && 2403 mcp->mc_fpformat != _MC_FPFMT_XMM) 2404 return (EINVAL); 2405 else if (mcp->mc_ownedfp == _MC_FPOWNED_NONE) 2406 /* We don't care what state is left in the FPU or PCB. */ 2407 fpstate_drop(td); 2408 else if (mcp->mc_ownedfp == _MC_FPOWNED_FPU || 2409 mcp->mc_ownedfp == _MC_FPOWNED_PCB) { 2410 /* XXX align as above. */ 2411 addr = (union savefpu *)&mcp->mc_fpstate; 2412 if (td == PCPU_GET(fpcurthread) && 2413#ifdef CPU_ENABLE_SSE 2414 cpu_fxsr && 2415#endif 2416 ((uintptr_t)(void *)addr & 0xF)) { 2417 do 2418 addr = (void *)((char *)addr + 4); 2419 while ((uintptr_t)(void *)addr & 0xF); 2420 bcopy(&mcp->mc_fpstate, addr, sizeof(mcp->mc_fpstate)); 2421 } 2422#ifdef DEV_NPX 2423 /* 2424 * XXX we violate the dubious requirement that npxsetregs() 2425 * be called with interrupts disabled. 2426 */ 2427 npxsetregs(td, addr); 2428#endif 2429 /* 2430 * Don't bother putting things back where they were in the 2431 * misaligned case, since we know that the caller won't use 2432 * them again. 2433 */ 2434 } else 2435 return (EINVAL); 2436 return (0); 2437} 2438 2439static void 2440fpstate_drop(struct thread *td) 2441{ 2442 register_t s; 2443 2444 s = intr_disable(); 2445#ifdef DEV_NPX 2446 if (PCPU_GET(fpcurthread) == td) 2447 npxdrop(); 2448#endif 2449 /* 2450 * XXX force a full drop of the npx. The above only drops it if we 2451 * owned it. npxgetregs() has the same bug in the !cpu_fxsr case. 2452 * 2453 * XXX I don't much like npxgetregs()'s semantics of doing a full 2454 * drop. Dropping only to the pcb matches fnsave's behaviour. 2455 * We only need to drop to !PCB_INITDONE in sendsig(). But 2456 * sendsig() is the only caller of npxgetregs()... perhaps we just 2457 * have too many layers. 2458 */ 2459 curthread->td_pcb->pcb_flags &= ~PCB_NPXINITDONE; 2460 intr_restore(s); 2461} 2462 2463int 2464fill_dbregs(struct thread *td, struct dbreg *dbregs) 2465{ 2466 struct pcb *pcb; 2467 2468 if (td == NULL) { 2469 dbregs->dr[0] = rdr0(); 2470 dbregs->dr[1] = rdr1(); 2471 dbregs->dr[2] = rdr2(); 2472 dbregs->dr[3] = rdr3(); 2473 dbregs->dr[4] = rdr4(); 2474 dbregs->dr[5] = rdr5(); 2475 dbregs->dr[6] = rdr6(); 2476 dbregs->dr[7] = rdr7(); 2477 } else { 2478 pcb = td->td_pcb; 2479 dbregs->dr[0] = pcb->pcb_dr0; 2480 dbregs->dr[1] = pcb->pcb_dr1; 2481 dbregs->dr[2] = pcb->pcb_dr2; 2482 dbregs->dr[3] = pcb->pcb_dr3; 2483 dbregs->dr[4] = 0; 2484 dbregs->dr[5] = 0; 2485 dbregs->dr[6] = pcb->pcb_dr6; 2486 dbregs->dr[7] = pcb->pcb_dr7; 2487 } 2488 return (0); 2489} 2490 2491int 2492set_dbregs(struct thread *td, struct dbreg *dbregs) 2493{ 2494 struct pcb *pcb; 2495 int i; 2496 u_int32_t mask1, mask2; 2497 2498 if (td == NULL) { 2499 load_dr0(dbregs->dr[0]); 2500 load_dr1(dbregs->dr[1]); 2501 load_dr2(dbregs->dr[2]); 2502 load_dr3(dbregs->dr[3]); 2503 load_dr4(dbregs->dr[4]); 2504 load_dr5(dbregs->dr[5]); 2505 load_dr6(dbregs->dr[6]); 2506 load_dr7(dbregs->dr[7]); 2507 } else { 2508 /* 2509 * Don't let an illegal value for dr7 get set. Specifically, 2510 * check for undefined settings. Setting these bit patterns 2511 * result in undefined behaviour and can lead to an unexpected 2512 * TRCTRAP. 2513 */ 2514 for (i = 0, mask1 = 0x3<<16, mask2 = 0x2<<16; i < 8; 2515 i++, mask1 <<= 2, mask2 <<= 2) 2516 if ((dbregs->dr[7] & mask1) == mask2) 2517 return (EINVAL); 2518 2519 pcb = td->td_pcb; 2520 2521 /* 2522 * Don't let a process set a breakpoint that is not within the 2523 * process's address space. If a process could do this, it 2524 * could halt the system by setting a breakpoint in the kernel 2525 * (if ddb was enabled). Thus, we need to check to make sure 2526 * that no breakpoints are being enabled for addresses outside 2527 * process's address space, unless, perhaps, we were called by 2528 * uid 0. 2529 * 2530 * XXX - what about when the watched area of the user's 2531 * address space is written into from within the kernel 2532 * ... wouldn't that still cause a breakpoint to be generated 2533 * from within kernel mode? 2534 */ 2535 2536 if (suser(td) != 0) { 2537 if (dbregs->dr[7] & 0x3) { 2538 /* dr0 is enabled */ 2539 if (dbregs->dr[0] >= VM_MAXUSER_ADDRESS) 2540 return (EINVAL); 2541 } 2542 2543 if (dbregs->dr[7] & (0x3<<2)) { 2544 /* dr1 is enabled */ 2545 if (dbregs->dr[1] >= VM_MAXUSER_ADDRESS) 2546 return (EINVAL); 2547 } 2548 2549 if (dbregs->dr[7] & (0x3<<4)) { 2550 /* dr2 is enabled */ 2551 if (dbregs->dr[2] >= VM_MAXUSER_ADDRESS) 2552 return (EINVAL); 2553 } 2554 2555 if (dbregs->dr[7] & (0x3<<6)) { 2556 /* dr3 is enabled */ 2557 if (dbregs->dr[3] >= VM_MAXUSER_ADDRESS) 2558 return (EINVAL); 2559 } 2560 } 2561 2562 pcb->pcb_dr0 = dbregs->dr[0]; 2563 pcb->pcb_dr1 = dbregs->dr[1]; 2564 pcb->pcb_dr2 = dbregs->dr[2]; 2565 pcb->pcb_dr3 = dbregs->dr[3]; 2566 pcb->pcb_dr6 = dbregs->dr[6]; 2567 pcb->pcb_dr7 = dbregs->dr[7]; 2568 2569 pcb->pcb_flags |= PCB_DBREGS; 2570 } 2571 2572 return (0); 2573} 2574 2575/* 2576 * Return > 0 if a hardware breakpoint has been hit, and the 2577 * breakpoint was in user space. Return 0, otherwise. 2578 */ 2579int 2580user_dbreg_trap(void) 2581{ 2582 u_int32_t dr7, dr6; /* debug registers dr6 and dr7 */ 2583 u_int32_t bp; /* breakpoint bits extracted from dr6 */ 2584 int nbp; /* number of breakpoints that triggered */ 2585 caddr_t addr[4]; /* breakpoint addresses */ 2586 int i; 2587 2588 dr7 = rdr7(); 2589 if ((dr7 & 0x000000ff) == 0) { 2590 /* 2591 * all GE and LE bits in the dr7 register are zero, 2592 * thus the trap couldn't have been caused by the 2593 * hardware debug registers 2594 */ 2595 return 0; 2596 } 2597 2598 nbp = 0; 2599 dr6 = rdr6(); 2600 bp = dr6 & 0x0000000f; 2601 2602 if (!bp) { 2603 /* 2604 * None of the breakpoint bits are set meaning this 2605 * trap was not caused by any of the debug registers 2606 */ 2607 return 0; 2608 } 2609 2610 /* 2611 * at least one of the breakpoints were hit, check to see 2612 * which ones and if any of them are user space addresses 2613 */ 2614 2615 if (bp & 0x01) { 2616 addr[nbp++] = (caddr_t)rdr0(); 2617 } 2618 if (bp & 0x02) { 2619 addr[nbp++] = (caddr_t)rdr1(); 2620 } 2621 if (bp & 0x04) { 2622 addr[nbp++] = (caddr_t)rdr2(); 2623 } 2624 if (bp & 0x08) { 2625 addr[nbp++] = (caddr_t)rdr3(); 2626 } 2627 2628 for (i=0; i<nbp; i++) { 2629 if (addr[i] < 2630 (caddr_t)VM_MAXUSER_ADDRESS) { 2631 /* 2632 * addr[i] is in user space 2633 */ 2634 return nbp; 2635 } 2636 } 2637 2638 /* 2639 * None of the breakpoints are in user space. 2640 */ 2641 return 0; 2642} 2643 2644 2645#ifndef DDB 2646void 2647Debugger(const char *msg) 2648{ 2649 printf("Debugger(\"%s\") called.\n", msg); 2650} 2651#endif /* no DDB */ 2652 2653#ifdef DDB 2654 2655/* 2656 * Provide inb() and outb() as functions. They are normally only 2657 * available as macros calling inlined functions, thus cannot be 2658 * called inside DDB. 2659 * 2660 * The actual code is stolen from <machine/cpufunc.h>, and de-inlined. 2661 */ 2662 2663#undef inb 2664#undef outb 2665 2666/* silence compiler warnings */ 2667u_char inb(u_int); 2668void outb(u_int, u_char); 2669 2670u_char 2671inb(u_int port) 2672{ 2673 u_char data; 2674 /* 2675 * We use %%dx and not %1 here because i/o is done at %dx and not at 2676 * %edx, while gcc generates inferior code (movw instead of movl) 2677 * if we tell it to load (u_short) port. 2678 */ 2679 __asm __volatile("inb %%dx,%0" : "=a" (data) : "d" (port)); 2680 return (data); 2681} 2682 2683void 2684outb(u_int port, u_char data) 2685{ 2686 u_char al; 2687 /* 2688 * Use an unnecessary assignment to help gcc's register allocator. 2689 * This make a large difference for gcc-1.40 and a tiny difference 2690 * for gcc-2.6.0. For gcc-1.40, al had to be ``asm("ax")'' for 2691 * best results. gcc-2.6.0 can't handle this. 2692 */ 2693 al = data; 2694 __asm __volatile("outb %0,%%dx" : : "a" (al), "d" (port)); 2695} 2696 2697#endif /* DDB */ 2698