subr_syscall.c revision 66716
1234285Sdim/*- 2234285Sdim * Copyright (C) 1994, David Greenman 3234285Sdim * Copyright (c) 1990, 1993 4234285Sdim * The Regents of the University of California. All rights reserved. 5234285Sdim * 6234285Sdim * This code is derived from software contributed to Berkeley by 7234285Sdim * the University of Utah, and William Jolitz. 8234285Sdim * 9234285Sdim * Redistribution and use in source and binary forms, with or without 10234285Sdim * modification, are permitted provided that the following conditions 11234285Sdim * are met: 12234285Sdim * 1. Redistributions of source code must retain the above copyright 13234285Sdim * notice, this list of conditions and the following disclaimer. 14234285Sdim * 2. Redistributions in binary form must reproduce the above copyright 15234285Sdim * notice, this list of conditions and the following disclaimer in the 16234285Sdim * documentation and/or other materials provided with the distribution. 17234285Sdim * 3. All advertising materials mentioning features or use of this software 18234285Sdim * must display the following acknowledgement: 19239462Sdim * This product includes software developed by the University of 20234285Sdim * California, Berkeley and its contributors. 21234285Sdim * 4. Neither the name of the University nor the names of its contributors 22234285Sdim * may be used to endorse or promote products derived from this software 23234285Sdim * without specific prior written permission. 24234285Sdim * 25234285Sdim * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26234285Sdim * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27234285Sdim * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28234285Sdim * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29234285Sdim * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30234285Sdim * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31234285Sdim * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32234285Sdim * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33234285Sdim * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34234285Sdim * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35234285Sdim * SUCH DAMAGE. 36234285Sdim * 37234285Sdim * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 38234285Sdim * $FreeBSD: head/sys/kern/subr_trap.c 66716 2000-10-06 02:20:21Z jhb $ 39234285Sdim */ 40234285Sdim 41234285Sdim/* 42234285Sdim * 386 Trap and System call handling 43234285Sdim */ 44234285Sdim 45234285Sdim#include "opt_cpu.h" 46234285Sdim#include "opt_ddb.h" 47234285Sdim#include "opt_ktrace.h" 48234285Sdim#include "opt_clock.h" 49234285Sdim#include "opt_trap.h" 50234285Sdim 51234285Sdim#include <sys/param.h> 52234285Sdim#include <sys/bus.h> 53234285Sdim#include <sys/systm.h> 54234285Sdim#include <sys/proc.h> 55234285Sdim#include <sys/pioctl.h> 56234285Sdim#include <sys/kernel.h> 57234285Sdim#include <sys/ktr.h> 58234285Sdim#include <sys/resourcevar.h> 59234285Sdim#include <sys/signalvar.h> 60234285Sdim#include <sys/syscall.h> 61234285Sdim#include <sys/sysctl.h> 62234285Sdim#include <sys/sysent.h> 63234285Sdim#include <sys/uio.h> 64234285Sdim#include <sys/vmmeter.h> 65234285Sdim#ifdef KTRACE 66234285Sdim#include <sys/ktrace.h> 67234285Sdim#endif 68234285Sdim 69234285Sdim#include <vm/vm.h> 70234285Sdim#include <vm/vm_param.h> 71234285Sdim#include <sys/lock.h> 72234285Sdim#include <vm/pmap.h> 73234285Sdim#include <vm/vm_kern.h> 74234285Sdim#include <vm/vm_map.h> 75234285Sdim#include <vm/vm_page.h> 76234285Sdim#include <vm/vm_extern.h> 77234285Sdim 78234285Sdim#include <machine/cpu.h> 79234285Sdim#include <machine/ipl.h> 80234285Sdim#include <machine/md_var.h> 81234285Sdim#include <machine/mutex.h> 82234285Sdim#include <machine/pcb.h> 83234285Sdim#ifdef SMP 84234285Sdim#include <machine/smp.h> 85234285Sdim#endif 86234285Sdim#include <machine/tss.h> 87234285Sdim 88234285Sdim#include <i386/isa/icu.h> 89234285Sdim#include <i386/isa/intr_machdep.h> 90234285Sdim 91234285Sdim#ifdef POWERFAIL_NMI 92234285Sdim#include <sys/syslog.h> 93234285Sdim#include <machine/clock.h> 94234285Sdim#endif 95234285Sdim 96234285Sdim#include <machine/vm86.h> 97234285Sdim 98234285Sdim#include <ddb/ddb.h> 99234285Sdim 100234285Sdim#include "isa.h" 101234285Sdim#include "npx.h" 102234285Sdim 103234285Sdim#include <sys/sysctl.h> 104234285Sdim 105234285Sdimint (*pmath_emulate) __P((struct trapframe *)); 106234285Sdim 107234285Sdimextern void trap __P((struct trapframe frame)); 108234285Sdimextern int trapwrite __P((unsigned addr)); 109234285Sdimextern void syscall2 __P((struct trapframe frame)); 110234285Sdimextern void ast __P((struct trapframe frame)); 111234285Sdim 112234285Sdimstatic int trap_pfault __P((struct trapframe *, int, vm_offset_t)); 113234285Sdimstatic void trap_fatal __P((struct trapframe *, vm_offset_t)); 114234285Sdimvoid dblfault_handler __P((void)); 115234285Sdim 116234285Sdimextern inthand_t IDTVEC(syscall); 117234285Sdim 118234285Sdim#define MAX_TRAP_MSG 28 119234285Sdimstatic char *trap_msg[] = { 120234285Sdim "", /* 0 unused */ 121234285Sdim "privileged instruction fault", /* 1 T_PRIVINFLT */ 122234285Sdim "", /* 2 unused */ 123234285Sdim "breakpoint instruction fault", /* 3 T_BPTFLT */ 124234285Sdim "", /* 4 unused */ 125234285Sdim "", /* 5 unused */ 126234285Sdim "arithmetic trap", /* 6 T_ARITHTRAP */ 127234285Sdim "system forced exception", /* 7 T_ASTFLT */ 128234285Sdim "", /* 8 unused */ 129234285Sdim "general protection fault", /* 9 T_PROTFLT */ 130234285Sdim "trace trap", /* 10 T_TRCTRAP */ 131234285Sdim "", /* 11 unused */ 132234285Sdim "page fault", /* 12 T_PAGEFLT */ 133234285Sdim "", /* 13 unused */ 134234285Sdim "alignment fault", /* 14 T_ALIGNFLT */ 135234285Sdim "", /* 15 unused */ 136234285Sdim "", /* 16 unused */ 137234285Sdim "", /* 17 unused */ 138234285Sdim "integer divide fault", /* 18 T_DIVIDE */ 139234285Sdim "non-maskable interrupt trap", /* 19 T_NMI */ 140234285Sdim "overflow trap", /* 20 T_OFLOW */ 141239462Sdim "FPU bounds check fault", /* 21 T_BOUND */ 142234285Sdim "FPU device not available", /* 22 T_DNA */ 143234285Sdim "double fault", /* 23 T_DOUBLEFLT */ 144234285Sdim "FPU operand fetch fault", /* 24 T_FPOPFLT */ 145234285Sdim "invalid TSS fault", /* 25 T_TSSFLT */ 146234285Sdim "segment not present fault", /* 26 T_SEGNPFLT */ 147234285Sdim "stack fault", /* 27 T_STKFLT */ 148234285Sdim "machine check trap", /* 28 T_MCHK */ 149234285Sdim}; 150234285Sdim 151234285Sdimstatic __inline int userret __P((struct proc *p, struct trapframe *frame, 152234285Sdim u_quad_t oticks, int have_giant)); 153234285Sdim 154234285Sdim#if defined(I586_CPU) && !defined(NO_F00F_HACK) 155239462Sdimextern int has_f00f_bug; 156234285Sdim#endif 157234285Sdim 158234285Sdim#ifdef DDB 159234285Sdimstatic int ddb_on_nmi = 1; 160234285SdimSYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW, 161234285Sdim &ddb_on_nmi, 0, "Go to DDB on NMI"); 162234285Sdim#endif 163234285Sdimstatic int panic_on_nmi = 1; 164234285SdimSYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW, 165234285Sdim &panic_on_nmi, 0, "Panic on NMI"); 166234285Sdim 167234285Sdimstatic __inline int 168234285Sdimuserret(p, frame, oticks, have_giant) 169234285Sdim struct proc *p; 170234285Sdim struct trapframe *frame; 171234285Sdim u_quad_t oticks; 172234285Sdim int have_giant; 173234285Sdim{ 174234285Sdim int sig, s; 175234285Sdim 176234285Sdim while ((sig = CURSIG(p)) != 0) { 177234285Sdim if (have_giant == 0) { 178234285Sdim mtx_enter(&Giant, MTX_DEF); 179234285Sdim have_giant = 1; 180234285Sdim } 181234285Sdim postsig(sig); 182234285Sdim } 183234285Sdim 184 p->p_priority = p->p_usrpri; 185 if (resched_wanted()) { 186 /* 187 * Since we are curproc, clock will normally just change 188 * our priority without moving us from one queue to another 189 * (since the running process is not on a queue.) 190 * If that happened after we setrunqueue ourselves but before we 191 * mi_switch()'ed, we might not be on the queue indicated by 192 * our priority. 193 */ 194 s = splhigh(); 195 mtx_enter(&sched_lock, MTX_SPIN); 196 setrunqueue(p); 197 p->p_stats->p_ru.ru_nivcsw++; 198 mi_switch(); 199 mtx_exit(&sched_lock, MTX_SPIN); 200 splx(s); 201 while ((sig = CURSIG(p)) != 0) { 202 if (have_giant == 0) { 203 mtx_enter(&Giant, MTX_DEF); 204 have_giant = 1; 205 } 206 postsig(sig); 207 } 208 } 209 /* 210 * Charge system time if profiling. 211 */ 212 if (p->p_flag & P_PROFIL) { 213 if (have_giant == 0) { 214 mtx_enter(&Giant, MTX_DEF); 215 have_giant = 1; 216 } 217 addupc_task(p, frame->tf_eip, 218 (u_int)(p->p_sticks - oticks) * psratio); 219 } 220 curpriority = p->p_priority; 221 return(have_giant); 222} 223 224/* 225 * Exception, fault, and trap interface to the FreeBSD kernel. 226 * This common code is called from assembly language IDT gate entry 227 * routines that prepare a suitable stack frame, and restore this 228 * frame after the exception has been processed. 229 */ 230 231void 232trap(frame) 233 struct trapframe frame; 234{ 235 struct proc *p = curproc; 236 u_quad_t sticks = 0; 237 int i = 0, ucode = 0, type, code; 238 vm_offset_t eva; 239#ifdef POWERFAIL_NMI 240 static int lastalert = 0; 241#endif 242 243 atomic_add_int(&cnt.v_trap, 1); 244 245 if ((frame.tf_eflags & PSL_I) == 0) { 246 /* 247 * Buggy application or kernel code has disabled 248 * interrupts and then trapped. Enabling interrupts 249 * now is wrong, but it is better than running with 250 * interrupts disabled until they are accidentally 251 * enabled later. XXX Consider whether is this still 252 * correct. 253 */ 254 type = frame.tf_trapno; 255 if (ISPL(frame.tf_cs) == SEL_UPL || (frame.tf_eflags & PSL_VM)) 256 printf( 257 "pid %ld (%s): trap %d with interrupts disabled\n", 258 (long)curproc->p_pid, curproc->p_comm, type); 259 else if (type != T_BPTFLT && type != T_TRCTRAP) 260 /* 261 * XXX not quite right, since this may be for a 262 * multiple fault in user mode. 263 */ 264 printf("kernel trap %d with interrupts disabled\n", 265 type); 266 enable_intr(); 267 } 268 269 eva = 0; 270 if (frame.tf_trapno == T_PAGEFLT) { 271 /* 272 * For some Cyrix CPUs, %cr2 is clobbered by 273 * interrupts. This problem is worked around by using 274 * an interrupt gate for the pagefault handler. We 275 * are finally ready to read %cr2 and then must 276 * reenable interrupts. 277 */ 278 eva = rcr2(); 279 enable_intr(); 280 } 281 282 mtx_enter(&Giant, MTX_DEF); 283 284#if defined(I586_CPU) && !defined(NO_F00F_HACK) 285restart: 286#endif 287 288 type = frame.tf_trapno; 289 code = frame.tf_err; 290 291 if ((ISPL(frame.tf_cs) == SEL_UPL) || 292 ((frame.tf_eflags & PSL_VM) && !in_vm86call)) { 293 /* user trap */ 294 295 sticks = p->p_sticks; 296 p->p_md.md_regs = &frame; 297 298 switch (type) { 299 case T_PRIVINFLT: /* privileged instruction fault */ 300 ucode = type; 301 i = SIGILL; 302 break; 303 304 case T_BPTFLT: /* bpt instruction fault */ 305 case T_TRCTRAP: /* trace trap */ 306 frame.tf_eflags &= ~PSL_T; 307 i = SIGTRAP; 308 break; 309 310 case T_ARITHTRAP: /* arithmetic trap */ 311 ucode = code; 312 i = SIGFPE; 313 break; 314 315 /* 316 * The following two traps can happen in 317 * vm86 mode, and, if so, we want to handle 318 * them specially. 319 */ 320 case T_PROTFLT: /* general protection fault */ 321 case T_STKFLT: /* stack fault */ 322 if (frame.tf_eflags & PSL_VM) { 323 i = vm86_emulate((struct vm86frame *)&frame); 324 if (i == 0) 325 goto user; 326 break; 327 } 328 /* FALL THROUGH */ 329 330 case T_SEGNPFLT: /* segment not present fault */ 331 case T_TSSFLT: /* invalid TSS fault */ 332 case T_DOUBLEFLT: /* double fault */ 333 default: 334 ucode = code + BUS_SEGM_FAULT ; 335 i = SIGBUS; 336 break; 337 338 case T_PAGEFLT: /* page fault */ 339 i = trap_pfault(&frame, TRUE, eva); 340#if defined(I586_CPU) && !defined(NO_F00F_HACK) 341 if (i == -2) { 342 /* 343 * f00f hack workaround has triggered, treat 344 * as illegal instruction not page fault. 345 */ 346 frame.tf_trapno = T_PRIVINFLT; 347 goto restart; 348 } 349#endif 350 if (i == -1) 351 goto out; 352 if (i == 0) 353 goto user; 354 355 ucode = T_PAGEFLT; 356 break; 357 358 case T_DIVIDE: /* integer divide fault */ 359 ucode = FPE_INTDIV; 360 i = SIGFPE; 361 break; 362 363#if NISA > 0 364 case T_NMI: 365#ifdef POWERFAIL_NMI 366#ifndef TIMER_FREQ 367# define TIMER_FREQ 1193182 368#endif 369 if (time_second - lastalert > 10) { 370 log(LOG_WARNING, "NMI: power fail\n"); 371 sysbeep(TIMER_FREQ/880, hz); 372 lastalert = time_second; 373 } 374 goto out; 375#else /* !POWERFAIL_NMI */ 376 /* machine/parity/power fail/"kitchen sink" faults */ 377 if (isa_nmi(code) == 0) { 378#ifdef DDB 379 /* 380 * NMI can be hooked up to a pushbutton 381 * for debugging. 382 */ 383 if (ddb_on_nmi) { 384 printf ("NMI ... going to debugger\n"); 385 kdb_trap (type, 0, &frame); 386 } 387#endif /* DDB */ 388 goto out; 389 } else if (panic_on_nmi) 390 panic("NMI indicates hardware failure"); 391 break; 392#endif /* POWERFAIL_NMI */ 393#endif /* NISA > 0 */ 394 395 case T_OFLOW: /* integer overflow fault */ 396 ucode = FPE_INTOVF; 397 i = SIGFPE; 398 break; 399 400 case T_BOUND: /* bounds check fault */ 401 ucode = FPE_FLTSUB; 402 i = SIGFPE; 403 break; 404 405 case T_DNA: 406#if NNPX > 0 407 /* transparent fault (due to context switch "late") */ 408 if (npxdna()) 409 goto out; 410#endif 411 if (!pmath_emulate) { 412 i = SIGFPE; 413 ucode = FPE_FPU_NP_TRAP; 414 break; 415 } 416 i = (*pmath_emulate)(&frame); 417 if (i == 0) { 418 if (!(frame.tf_eflags & PSL_T)) 419 goto out; 420 frame.tf_eflags &= ~PSL_T; 421 i = SIGTRAP; 422 } 423 /* else ucode = emulator_only_knows() XXX */ 424 break; 425 426 case T_FPOPFLT: /* FPU operand fetch fault */ 427 ucode = T_FPOPFLT; 428 i = SIGILL; 429 break; 430 } 431 } else { 432 /* kernel trap */ 433 434 switch (type) { 435 case T_PAGEFLT: /* page fault */ 436 (void) trap_pfault(&frame, FALSE, eva); 437 goto out; 438 439 case T_DNA: 440#if NNPX > 0 441 /* 442 * The kernel is apparently using npx for copying. 443 * XXX this should be fatal unless the kernel has 444 * registered such use. 445 */ 446 if (npxdna()) 447 goto out; 448#endif 449 break; 450 451 /* 452 * The following two traps can happen in 453 * vm86 mode, and, if so, we want to handle 454 * them specially. 455 */ 456 case T_PROTFLT: /* general protection fault */ 457 case T_STKFLT: /* stack fault */ 458 if (frame.tf_eflags & PSL_VM) { 459 i = vm86_emulate((struct vm86frame *)&frame); 460 if (i != 0) 461 /* 462 * returns to original process 463 */ 464 vm86_trap((struct vm86frame *)&frame); 465 goto out; 466 } 467 if (type == T_STKFLT) 468 break; 469 470 /* FALL THROUGH */ 471 472 case T_SEGNPFLT: /* segment not present fault */ 473 if (in_vm86call) 474 break; 475 476 if (intr_nesting_level != 0) 477 break; 478 479 /* 480 * Invalid %fs's and %gs's can be created using 481 * procfs or PT_SETREGS or by invalidating the 482 * underlying LDT entry. This causes a fault 483 * in kernel mode when the kernel attempts to 484 * switch contexts. Lose the bad context 485 * (XXX) so that we can continue, and generate 486 * a signal. 487 */ 488 if (frame.tf_eip == (int)cpu_switch_load_gs) { 489 curpcb->pcb_gs = 0; 490 psignal(p, SIGBUS); 491 goto out; 492 } 493 494 /* 495 * Invalid segment selectors and out of bounds 496 * %eip's and %esp's can be set up in user mode. 497 * This causes a fault in kernel mode when the 498 * kernel tries to return to user mode. We want 499 * to get this fault so that we can fix the 500 * problem here and not have to check all the 501 * selectors and pointers when the user changes 502 * them. 503 */ 504 if (frame.tf_eip == (int)doreti_iret) { 505 frame.tf_eip = (int)doreti_iret_fault; 506 goto out; 507 } 508 if (frame.tf_eip == (int)doreti_popl_ds) { 509 frame.tf_eip = (int)doreti_popl_ds_fault; 510 goto out; 511 } 512 if (frame.tf_eip == (int)doreti_popl_es) { 513 frame.tf_eip = (int)doreti_popl_es_fault; 514 goto out; 515 } 516 if (frame.tf_eip == (int)doreti_popl_fs) { 517 frame.tf_eip = (int)doreti_popl_fs_fault; 518 goto out; 519 } 520 if (curpcb && curpcb->pcb_onfault) { 521 frame.tf_eip = (int)curpcb->pcb_onfault; 522 goto out; 523 } 524 break; 525 526 case T_TSSFLT: 527 /* 528 * PSL_NT can be set in user mode and isn't cleared 529 * automatically when the kernel is entered. This 530 * causes a TSS fault when the kernel attempts to 531 * `iret' because the TSS link is uninitialized. We 532 * want to get this fault so that we can fix the 533 * problem here and not every time the kernel is 534 * entered. 535 */ 536 if (frame.tf_eflags & PSL_NT) { 537 frame.tf_eflags &= ~PSL_NT; 538 goto out; 539 } 540 break; 541 542 case T_TRCTRAP: /* trace trap */ 543 if (frame.tf_eip == (int)IDTVEC(syscall)) { 544 /* 545 * We've just entered system mode via the 546 * syscall lcall. Continue single stepping 547 * silently until the syscall handler has 548 * saved the flags. 549 */ 550 goto out; 551 } 552 if (frame.tf_eip == (int)IDTVEC(syscall) + 1) { 553 /* 554 * The syscall handler has now saved the 555 * flags. Stop single stepping it. 556 */ 557 frame.tf_eflags &= ~PSL_T; 558 goto out; 559 } 560 /* 561 * Ignore debug register trace traps due to 562 * accesses in the user's address space, which 563 * can happen under several conditions such as 564 * if a user sets a watchpoint on a buffer and 565 * then passes that buffer to a system call. 566 * We still want to get TRCTRAPS for addresses 567 * in kernel space because that is useful when 568 * debugging the kernel. 569 */ 570 if (user_dbreg_trap() && !in_vm86call) { 571 /* 572 * Reset breakpoint bits because the 573 * processor doesn't 574 */ 575 load_dr6(rdr6() & 0xfffffff0); 576 goto out; 577 } 578 /* 579 * Fall through (TRCTRAP kernel mode, kernel address) 580 */ 581 case T_BPTFLT: 582 /* 583 * If DDB is enabled, let it handle the debugger trap. 584 * Otherwise, debugger traps "can't happen". 585 */ 586#ifdef DDB 587 if (kdb_trap (type, 0, &frame)) 588 goto out; 589#endif 590 break; 591 592#if NISA > 0 593 case T_NMI: 594#ifdef POWERFAIL_NMI 595 if (time_second - lastalert > 10) { 596 log(LOG_WARNING, "NMI: power fail\n"); 597 sysbeep(TIMER_FREQ/880, hz); 598 lastalert = time_second; 599 } 600 goto out; 601#else /* !POWERFAIL_NMI */ 602 /* machine/parity/power fail/"kitchen sink" faults */ 603 if (isa_nmi(code) == 0) { 604#ifdef DDB 605 /* 606 * NMI can be hooked up to a pushbutton 607 * for debugging. 608 */ 609 if (ddb_on_nmi) { 610 printf ("NMI ... going to debugger\n"); 611 kdb_trap (type, 0, &frame); 612 } 613#endif /* DDB */ 614 goto out; 615 } else if (panic_on_nmi == 0) 616 goto out; 617 /* FALL THROUGH */ 618#endif /* POWERFAIL_NMI */ 619#endif /* NISA > 0 */ 620 } 621 622 trap_fatal(&frame, eva); 623 goto out; 624 } 625 626 /* Translate fault for emulators (e.g. Linux) */ 627 if (*p->p_sysent->sv_transtrap) 628 i = (*p->p_sysent->sv_transtrap)(i, type); 629 630 trapsignal(p, i, ucode); 631 632#ifdef DEBUG 633 if (type <= MAX_TRAP_MSG) { 634 uprintf("fatal process exception: %s", 635 trap_msg[type]); 636 if ((type == T_PAGEFLT) || (type == T_PROTFLT)) 637 uprintf(", fault VA = 0x%lx", (u_long)eva); 638 uprintf("\n"); 639 } 640#endif 641 642user: 643 userret(p, &frame, sticks, 1); 644out: 645 mtx_exit(&Giant, MTX_DEF); 646} 647 648#ifdef notyet 649/* 650 * This version doesn't allow a page fault to user space while 651 * in the kernel. The rest of the kernel needs to be made "safe" 652 * before this can be used. I think the only things remaining 653 * to be made safe are the iBCS2 code and the process tracing/ 654 * debugging code. 655 */ 656static int 657trap_pfault(frame, usermode, eva) 658 struct trapframe *frame; 659 int usermode; 660 vm_offset_t eva; 661{ 662 vm_offset_t va; 663 struct vmspace *vm = NULL; 664 vm_map_t map = 0; 665 int rv = 0; 666 vm_prot_t ftype; 667 struct proc *p = curproc; 668 669 if (frame->tf_err & PGEX_W) 670 ftype = VM_PROT_WRITE; 671 else 672 ftype = VM_PROT_READ; 673 674 va = trunc_page(eva); 675 if (va < VM_MIN_KERNEL_ADDRESS) { 676 vm_offset_t v; 677 vm_page_t mpte; 678 679 if (p == NULL || 680 (!usermode && va < VM_MAXUSER_ADDRESS && 681 (intr_nesting_level != 0 || curpcb == NULL || 682 curpcb->pcb_onfault == NULL))) { 683 trap_fatal(frame, eva); 684 return (-1); 685 } 686 687 /* 688 * This is a fault on non-kernel virtual memory. 689 * vm is initialized above to NULL. If curproc is NULL 690 * or curproc->p_vmspace is NULL the fault is fatal. 691 */ 692 vm = p->p_vmspace; 693 if (vm == NULL) 694 goto nogo; 695 696 map = &vm->vm_map; 697 698 /* 699 * Keep swapout from messing with us during this 700 * critical time. 701 */ 702 ++p->p_lock; 703 704 /* 705 * Grow the stack if necessary 706 */ 707 /* grow_stack returns false only if va falls into 708 * a growable stack region and the stack growth 709 * fails. It returns true if va was not within 710 * a growable stack region, or if the stack 711 * growth succeeded. 712 */ 713 if (!grow_stack (p, va)) { 714 rv = KERN_FAILURE; 715 --p->p_lock; 716 goto nogo; 717 } 718 719 /* Fault in the user page: */ 720 rv = vm_fault(map, va, ftype, 721 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY 722 : VM_FAULT_NORMAL); 723 724 --p->p_lock; 725 } else { 726 /* 727 * Don't allow user-mode faults in kernel address space. 728 */ 729 if (usermode) 730 goto nogo; 731 732 /* 733 * Since we know that kernel virtual address addresses 734 * always have pte pages mapped, we just have to fault 735 * the page. 736 */ 737 rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL); 738 } 739 740 if (rv == KERN_SUCCESS) 741 return (0); 742nogo: 743 if (!usermode) { 744 if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) { 745 frame->tf_eip = (int)curpcb->pcb_onfault; 746 return (0); 747 } 748 trap_fatal(frame, eva); 749 return (-1); 750 } 751 752 /* kludge to pass faulting virtual address to sendsig */ 753 frame->tf_err = eva; 754 755 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); 756} 757#endif 758 759int 760trap_pfault(frame, usermode, eva) 761 struct trapframe *frame; 762 int usermode; 763 vm_offset_t eva; 764{ 765 vm_offset_t va; 766 struct vmspace *vm = NULL; 767 vm_map_t map = 0; 768 int rv = 0; 769 vm_prot_t ftype; 770 struct proc *p = curproc; 771 772 va = trunc_page(eva); 773 if (va >= KERNBASE) { 774 /* 775 * Don't allow user-mode faults in kernel address space. 776 * An exception: if the faulting address is the invalid 777 * instruction entry in the IDT, then the Intel Pentium 778 * F00F bug workaround was triggered, and we need to 779 * treat it is as an illegal instruction, and not a page 780 * fault. 781 */ 782#if defined(I586_CPU) && !defined(NO_F00F_HACK) 783 if ((eva == (unsigned int)&idt[6]) && has_f00f_bug) 784 return -2; 785#endif 786 if (usermode) 787 goto nogo; 788 789 map = kernel_map; 790 } else { 791 /* 792 * This is a fault on non-kernel virtual memory. 793 * vm is initialized above to NULL. If curproc is NULL 794 * or curproc->p_vmspace is NULL the fault is fatal. 795 */ 796 if (p != NULL) 797 vm = p->p_vmspace; 798 799 if (vm == NULL) 800 goto nogo; 801 802 map = &vm->vm_map; 803 } 804 805 if (frame->tf_err & PGEX_W) 806 ftype = VM_PROT_WRITE; 807 else 808 ftype = VM_PROT_READ; 809 810 if (map != kernel_map) { 811 /* 812 * Keep swapout from messing with us during this 813 * critical time. 814 */ 815 ++p->p_lock; 816 817 /* 818 * Grow the stack if necessary 819 */ 820 /* grow_stack returns false only if va falls into 821 * a growable stack region and the stack growth 822 * fails. It returns true if va was not within 823 * a growable stack region, or if the stack 824 * growth succeeded. 825 */ 826 if (!grow_stack (p, va)) { 827 rv = KERN_FAILURE; 828 --p->p_lock; 829 goto nogo; 830 } 831 832 /* Fault in the user page: */ 833 rv = vm_fault(map, va, ftype, 834 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY 835 : VM_FAULT_NORMAL); 836 837 --p->p_lock; 838 } else { 839 /* 840 * Don't have to worry about process locking or stacks in the kernel. 841 */ 842 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); 843 } 844 845 if (rv == KERN_SUCCESS) 846 return (0); 847nogo: 848 if (!usermode) { 849 if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) { 850 frame->tf_eip = (int)curpcb->pcb_onfault; 851 return (0); 852 } 853 trap_fatal(frame, eva); 854 return (-1); 855 } 856 857 /* kludge to pass faulting virtual address to sendsig */ 858 frame->tf_err = eva; 859 860 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); 861} 862 863static void 864trap_fatal(frame, eva) 865 struct trapframe *frame; 866 vm_offset_t eva; 867{ 868 int code, type, ss, esp; 869 struct soft_segment_descriptor softseg; 870 871 code = frame->tf_err; 872 type = frame->tf_trapno; 873 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg); 874 875 if (type <= MAX_TRAP_MSG) 876 printf("\n\nFatal trap %d: %s while in %s mode\n", 877 type, trap_msg[type], 878 frame->tf_eflags & PSL_VM ? "vm86" : 879 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel"); 880#ifdef SMP 881 /* two seperate prints in case of a trap on an unmapped page */ 882 printf("cpuid = %d; ", cpuid); 883 printf("lapic.id = %08x\n", lapic.id); 884#endif 885 if (type == T_PAGEFLT) { 886 printf("fault virtual address = 0x%x\n", eva); 887 printf("fault code = %s %s, %s\n", 888 code & PGEX_U ? "user" : "supervisor", 889 code & PGEX_W ? "write" : "read", 890 code & PGEX_P ? "protection violation" : "page not present"); 891 } 892 printf("instruction pointer = 0x%x:0x%x\n", 893 frame->tf_cs & 0xffff, frame->tf_eip); 894 if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) { 895 ss = frame->tf_ss & 0xffff; 896 esp = frame->tf_esp; 897 } else { 898 ss = GSEL(GDATA_SEL, SEL_KPL); 899 esp = (int)&frame->tf_esp; 900 } 901 printf("stack pointer = 0x%x:0x%x\n", ss, esp); 902 printf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp); 903 printf("code segment = base 0x%x, limit 0x%x, type 0x%x\n", 904 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type); 905 printf(" = DPL %d, pres %d, def32 %d, gran %d\n", 906 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32, 907 softseg.ssd_gran); 908 printf("processor eflags = "); 909 if (frame->tf_eflags & PSL_T) 910 printf("trace trap, "); 911 if (frame->tf_eflags & PSL_I) 912 printf("interrupt enabled, "); 913 if (frame->tf_eflags & PSL_NT) 914 printf("nested task, "); 915 if (frame->tf_eflags & PSL_RF) 916 printf("resume, "); 917 if (frame->tf_eflags & PSL_VM) 918 printf("vm86, "); 919 printf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12); 920 printf("current process = "); 921 if (curproc) { 922 printf("%lu (%s)\n", 923 (u_long)curproc->p_pid, curproc->p_comm ? 924 curproc->p_comm : ""); 925 } else { 926 printf("Idle\n"); 927 } 928 929#ifdef KDB 930 if (kdb_trap(&psl)) 931 return; 932#endif 933#ifdef DDB 934 if ((debugger_on_panic || db_active) && kdb_trap(type, 0, frame)) 935 return; 936#endif 937 printf("trap number = %d\n", type); 938 if (type <= MAX_TRAP_MSG) 939 panic(trap_msg[type]); 940 else 941 panic("unknown/reserved trap"); 942} 943 944/* 945 * Double fault handler. Called when a fault occurs while writing 946 * a frame for a trap/exception onto the stack. This usually occurs 947 * when the stack overflows (such is the case with infinite recursion, 948 * for example). 949 * 950 * XXX Note that the current PTD gets replaced by IdlePTD when the 951 * task switch occurs. This means that the stack that was active at 952 * the time of the double fault is not available at <kstack> unless 953 * the machine was idle when the double fault occurred. The downside 954 * of this is that "trace <ebp>" in ddb won't work. 955 */ 956void 957dblfault_handler() 958{ 959 printf("\nFatal double fault:\n"); 960 printf("eip = 0x%x\n", common_tss.tss_eip); 961 printf("esp = 0x%x\n", common_tss.tss_esp); 962 printf("ebp = 0x%x\n", common_tss.tss_ebp); 963#ifdef SMP 964 /* two seperate prints in case of a trap on an unmapped page */ 965 printf("cpuid = %d; ", cpuid); 966 printf("lapic.id = %08x\n", lapic.id); 967#endif 968 panic("double fault"); 969} 970 971/* 972 * Compensate for 386 brain damage (missing URKR). 973 * This is a little simpler than the pagefault handler in trap() because 974 * it the page tables have already been faulted in and high addresses 975 * are thrown out early for other reasons. 976 */ 977int trapwrite(addr) 978 unsigned addr; 979{ 980 struct proc *p; 981 vm_offset_t va; 982 struct vmspace *vm; 983 int rv; 984 985 va = trunc_page((vm_offset_t)addr); 986 /* 987 * XXX - MAX is END. Changed > to >= for temp. fix. 988 */ 989 if (va >= VM_MAXUSER_ADDRESS) 990 return (1); 991 992 p = curproc; 993 vm = p->p_vmspace; 994 995 ++p->p_lock; 996 997 if (!grow_stack (p, va)) { 998 --p->p_lock; 999 return (1); 1000 } 1001 1002 /* 1003 * fault the data page 1004 */ 1005 rv = vm_fault(&vm->vm_map, va, VM_PROT_WRITE, VM_FAULT_DIRTY); 1006 1007 --p->p_lock; 1008 1009 if (rv != KERN_SUCCESS) 1010 return 1; 1011 1012 return (0); 1013} 1014 1015/* 1016 * syscall2 - MP aware system call request C handler 1017 * 1018 * A system call is essentially treated as a trap except that the 1019 * MP lock is not held on entry or return. We are responsible for 1020 * obtaining the MP lock if necessary and for handling ASTs 1021 * (e.g. a task switch) prior to return. 1022 * 1023 * In general, only simple access and manipulation of curproc and 1024 * the current stack is allowed without having to hold MP lock. 1025 */ 1026void 1027syscall2(frame) 1028 struct trapframe frame; 1029{ 1030 caddr_t params; 1031 int i; 1032 struct sysent *callp; 1033 struct proc *p = curproc; 1034 u_quad_t sticks; 1035 int error; 1036 int narg; 1037 int args[8]; 1038 int have_giant = 0; 1039 u_int code; 1040 1041 atomic_add_int(&cnt.v_syscall, 1); 1042 1043#ifdef DIAGNOSTIC 1044 if (ISPL(frame.tf_cs) != SEL_UPL) { 1045 mtx_enter(&Giant, MTX_DEF); 1046 panic("syscall"); 1047 /* NOT REACHED */ 1048 } 1049#endif 1050 1051 /* 1052 * handle atomicy by looping since interrupts are enabled and the 1053 * MP lock is not held. 1054 */ 1055 sticks = ((volatile struct proc *)p)->p_sticks; 1056 while (sticks != ((volatile struct proc *)p)->p_sticks) 1057 sticks = ((volatile struct proc *)p)->p_sticks; 1058 1059 p->p_md.md_regs = &frame; 1060 params = (caddr_t)frame.tf_esp + sizeof(int); 1061 code = frame.tf_eax; 1062 1063 if (p->p_sysent->sv_prepsyscall) { 1064 /* 1065 * The prep code is not MP aware. 1066 */ 1067 mtx_enter(&Giant, MTX_DEF); 1068 (*p->p_sysent->sv_prepsyscall)(&frame, args, &code, ¶ms); 1069 mtx_exit(&Giant, MTX_DEF); 1070 } else { 1071 /* 1072 * Need to check if this is a 32 bit or 64 bit syscall. 1073 * fuword is MP aware. 1074 */ 1075 if (code == SYS_syscall) { 1076 /* 1077 * Code is first argument, followed by actual args. 1078 */ 1079 code = fuword(params); 1080 params += sizeof(int); 1081 } else if (code == SYS___syscall) { 1082 /* 1083 * Like syscall, but code is a quad, so as to maintain 1084 * quad alignment for the rest of the arguments. 1085 */ 1086 code = fuword(params); 1087 params += sizeof(quad_t); 1088 } 1089 } 1090 1091 if (p->p_sysent->sv_mask) 1092 code &= p->p_sysent->sv_mask; 1093 1094 if (code >= p->p_sysent->sv_size) 1095 callp = &p->p_sysent->sv_table[0]; 1096 else 1097 callp = &p->p_sysent->sv_table[code]; 1098 1099 narg = callp->sy_narg & SYF_ARGMASK; 1100 1101 /* 1102 * copyin is MP aware, but the tracing code is not 1103 */ 1104 if (params && (i = narg * sizeof(int)) && 1105 (error = copyin(params, (caddr_t)args, (u_int)i))) { 1106 mtx_enter(&Giant, MTX_DEF); 1107 have_giant = 1; 1108#ifdef KTRACE 1109 if (KTRPOINT(p, KTR_SYSCALL)) 1110 ktrsyscall(p->p_tracep, code, narg, args); 1111#endif 1112 goto bad; 1113 } 1114 1115 /* 1116 * Try to run the syscall without the MP lock if the syscall 1117 * is MP safe. We have to obtain the MP lock no matter what if 1118 * we are ktracing 1119 */ 1120 if ((callp->sy_narg & SYF_MPSAFE) == 0) { 1121 mtx_enter(&Giant, MTX_DEF); 1122 have_giant = 1; 1123 } 1124 1125#ifdef KTRACE 1126 if (KTRPOINT(p, KTR_SYSCALL)) { 1127 if (have_giant == 0) { 1128 mtx_enter(&Giant, MTX_DEF); 1129 have_giant = 1; 1130 } 1131 ktrsyscall(p->p_tracep, code, narg, args); 1132 } 1133#endif 1134 p->p_retval[0] = 0; 1135 p->p_retval[1] = frame.tf_edx; 1136 1137 STOPEVENT(p, S_SCE, narg); /* MP aware */ 1138 1139 error = (*callp->sy_call)(p, args); 1140 1141 /* 1142 * MP SAFE (we may or may not have the MP lock at this point) 1143 */ 1144 switch (error) { 1145 case 0: 1146 /* 1147 * Reinitialize proc pointer `p' as it may be different 1148 * if this is a child returning from fork syscall. 1149 */ 1150 p = curproc; 1151 frame.tf_eax = p->p_retval[0]; 1152 frame.tf_edx = p->p_retval[1]; 1153 frame.tf_eflags &= ~PSL_C; 1154 break; 1155 1156 case ERESTART: 1157 /* 1158 * Reconstruct pc, assuming lcall $X,y is 7 bytes, 1159 * int 0x80 is 2 bytes. We saved this in tf_err. 1160 */ 1161 frame.tf_eip -= frame.tf_err; 1162 break; 1163 1164 case EJUSTRETURN: 1165 break; 1166 1167 default: 1168bad: 1169 if (p->p_sysent->sv_errsize) { 1170 if (error >= p->p_sysent->sv_errsize) 1171 error = -1; /* XXX */ 1172 else 1173 error = p->p_sysent->sv_errtbl[error]; 1174 } 1175 frame.tf_eax = error; 1176 frame.tf_eflags |= PSL_C; 1177 break; 1178 } 1179 1180 /* 1181 * Traced syscall. trapsignal() is not MP aware. 1182 */ 1183 if ((frame.tf_eflags & PSL_T) && !(frame.tf_eflags & PSL_VM)) { 1184 if (have_giant == 0) { 1185 mtx_enter(&Giant, MTX_DEF); 1186 have_giant = 1; 1187 } 1188 frame.tf_eflags &= ~PSL_T; 1189 trapsignal(p, SIGTRAP, 0); 1190 } 1191 1192 /* 1193 * Handle reschedule and other end-of-syscall issues 1194 */ 1195 have_giant = userret(p, &frame, sticks, have_giant); 1196 1197#ifdef KTRACE 1198 if (KTRPOINT(p, KTR_SYSRET)) { 1199 if (have_giant == 0) { 1200 mtx_enter(&Giant, MTX_DEF); 1201 have_giant = 1; 1202 } 1203 ktrsysret(p->p_tracep, code, error, p->p_retval[0]); 1204 } 1205#endif 1206 1207 /* 1208 * This works because errno is findable through the 1209 * register set. If we ever support an emulation where this 1210 * is not the case, this code will need to be revisited. 1211 */ 1212 STOPEVENT(p, S_SCX, code); 1213 1214 /* 1215 * Release the MP lock if we had to get it 1216 */ 1217 if (have_giant) 1218 mtx_exit(&Giant, MTX_DEF); 1219 1220 mtx_assert(&sched_lock, MA_NOTOWNED); 1221 mtx_assert(&Giant, MA_NOTOWNED); 1222} 1223 1224void 1225ast(frame) 1226 struct trapframe frame; 1227{ 1228 struct proc *p = CURPROC; 1229 u_quad_t sticks; 1230 1231 /* 1232 * handle atomicy by looping since interrupts are enabled and the 1233 * MP lock is not held. 1234 */ 1235 sticks = ((volatile struct proc *)p)->p_sticks; 1236 while (sticks != ((volatile struct proc *)p)->p_sticks) 1237 sticks = ((volatile struct proc *)p)->p_sticks; 1238 1239 astoff(); 1240 atomic_add_int(&cnt.v_soft, 1); 1241 if (p->p_flag & P_OWEUPC) { 1242 mtx_enter(&Giant, MTX_DEF); 1243 p->p_flag &= ~P_OWEUPC; 1244 addupc_task(p, p->p_stats->p_prof.pr_addr, 1245 p->p_stats->p_prof.pr_ticks); 1246 } 1247 if (p->p_flag & P_ALRMPEND) { 1248 if (!mtx_owned(&Giant)) 1249 mtx_enter(&Giant, MTX_DEF); 1250 p->p_flag &= ~P_ALRMPEND; 1251 psignal(p, SIGVTALRM); 1252 } 1253 if (p->p_flag & P_PROFPEND) { 1254 if (!mtx_owned(&Giant)) 1255 mtx_enter(&Giant, MTX_DEF); 1256 p->p_flag &= ~P_PROFPEND; 1257 psignal(p, SIGPROF); 1258 } 1259 if (userret(p, &frame, sticks, mtx_owned(&Giant)) != 0) 1260 mtx_exit(&Giant, MTX_DEF); 1261} 1262 1263/* 1264 * Simplified back end of syscall(), used when returning from fork() 1265 * directly into user mode. Giant is not held on entry, and must not 1266 * be held on return. 1267 */ 1268void 1269fork_return(p, frame) 1270 struct proc *p; 1271 struct trapframe frame; 1272{ 1273 int have_giant; 1274 1275 frame.tf_eax = 0; /* Child returns zero */ 1276 frame.tf_eflags &= ~PSL_C; /* success */ 1277 frame.tf_edx = 1; 1278 1279 have_giant = userret(p, &frame, 0, mtx_owned(&Giant)); 1280#ifdef KTRACE 1281 if (KTRPOINT(p, KTR_SYSRET)) { 1282 if (have_giant == 0) { 1283 mtx_enter(&Giant, MTX_DEF); 1284 have_giant = 1; 1285 } 1286 ktrsysret(p->p_tracep, SYS_fork, 0, 0); 1287 } 1288#endif 1289 if (have_giant) 1290 mtx_exit(&Giant, MTX_DEF); 1291} 1292