subr_trap.c revision 71527
1/*- 2 * Copyright (C) 1994, David Greenman 3 * Copyright (c) 1990, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the University of Utah, and William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 38 * $FreeBSD: head/sys/kern/subr_trap.c 71527 2001-01-24 09:53:49Z jhb $ 39 */ 40 41/* 42 * 386 Trap and System call handling 43 */ 44 45#include "opt_clock.h" 46#include "opt_cpu.h" 47#include "opt_ddb.h" 48#include "opt_ktrace.h" 49#include "opt_npx.h" 50#include "opt_trap.h" 51 52#include <sys/param.h> 53#include <sys/bus.h> 54#include <sys/systm.h> 55#include <sys/proc.h> 56#include <sys/pioctl.h> 57#include <sys/ipl.h> 58#include <sys/kernel.h> 59#include <sys/ktr.h> 60#include <sys/mutex.h> 61#include <sys/resourcevar.h> 62#include <sys/signalvar.h> 63#include <sys/syscall.h> 64#include <sys/sysctl.h> 65#include <sys/sysent.h> 66#include <sys/uio.h> 67#include <sys/vmmeter.h> 68#ifdef KTRACE 69#include <sys/ktrace.h> 70#endif 71 72#include <vm/vm.h> 73#include <vm/vm_param.h> 74#include <sys/lock.h> 75#include <vm/pmap.h> 76#include <vm/vm_kern.h> 77#include <vm/vm_map.h> 78#include <vm/vm_page.h> 79#include <vm/vm_extern.h> 80 81#include <machine/cpu.h> 82#include <machine/md_var.h> 83#include <machine/pcb.h> 84#ifdef SMP 85#include <machine/smp.h> 86#endif 87#include <machine/tss.h> 88 89#include <i386/isa/icu.h> 90#include <i386/isa/intr_machdep.h> 91 92#ifdef POWERFAIL_NMI 93#include <sys/syslog.h> 94#include <machine/clock.h> 95#endif 96 97#include <machine/vm86.h> 98 99#include <ddb/ddb.h> 100 101#include "isa.h" 102 103#include <sys/sysctl.h> 104 105int (*pmath_emulate) __P((struct trapframe *)); 106 107extern void trap __P((struct trapframe frame)); 108extern int trapwrite __P((unsigned addr)); 109extern void syscall2 __P((struct trapframe frame)); 110extern void ast __P((struct trapframe frame)); 111 112static int trap_pfault __P((struct trapframe *, int, vm_offset_t)); 113static void trap_fatal __P((struct trapframe *, vm_offset_t)); 114void dblfault_handler __P((void)); 115 116extern inthand_t IDTVEC(syscall); 117 118#define MAX_TRAP_MSG 28 119static char *trap_msg[] = { 120 "", /* 0 unused */ 121 "privileged instruction fault", /* 1 T_PRIVINFLT */ 122 "", /* 2 unused */ 123 "breakpoint instruction fault", /* 3 T_BPTFLT */ 124 "", /* 4 unused */ 125 "", /* 5 unused */ 126 "arithmetic trap", /* 6 T_ARITHTRAP */ 127 "system forced exception", /* 7 T_ASTFLT */ 128 "", /* 8 unused */ 129 "general protection fault", /* 9 T_PROTFLT */ 130 "trace trap", /* 10 T_TRCTRAP */ 131 "", /* 11 unused */ 132 "page fault", /* 12 T_PAGEFLT */ 133 "", /* 13 unused */ 134 "alignment fault", /* 14 T_ALIGNFLT */ 135 "", /* 15 unused */ 136 "", /* 16 unused */ 137 "", /* 17 unused */ 138 "integer divide fault", /* 18 T_DIVIDE */ 139 "non-maskable interrupt trap", /* 19 T_NMI */ 140 "overflow trap", /* 20 T_OFLOW */ 141 "FPU bounds check fault", /* 21 T_BOUND */ 142 "FPU device not available", /* 22 T_DNA */ 143 "double fault", /* 23 T_DOUBLEFLT */ 144 "FPU operand fetch fault", /* 24 T_FPOPFLT */ 145 "invalid TSS fault", /* 25 T_TSSFLT */ 146 "segment not present fault", /* 26 T_SEGNPFLT */ 147 "stack fault", /* 27 T_STKFLT */ 148 "machine check trap", /* 28 T_MCHK */ 149}; 150 151#if defined(I586_CPU) && !defined(NO_F00F_HACK) 152extern int has_f00f_bug; 153#endif 154 155#ifdef DDB 156static int ddb_on_nmi = 1; 157SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW, 158 &ddb_on_nmi, 0, "Go to DDB on NMI"); 159#endif 160static int panic_on_nmi = 1; 161SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW, 162 &panic_on_nmi, 0, "Panic on NMI"); 163 164#ifdef WITNESS 165extern char *syscallnames[]; 166#endif 167 168void 169userret(p, frame, oticks) 170 struct proc *p; 171 struct trapframe *frame; 172 u_quad_t oticks; 173{ 174 int sig; 175 176 while ((sig = CURSIG(p)) != 0) { 177 if (!mtx_owned(&Giant)) 178 mtx_enter(&Giant, MTX_DEF); 179 postsig(sig); 180 } 181 182 mtx_enter(&sched_lock, MTX_SPIN); 183 p->p_priority = p->p_usrpri; 184 if (resched_wanted()) { 185 /* 186 * Since we are curproc, clock will normally just change 187 * our priority without moving us from one queue to another 188 * (since the running process is not on a queue.) 189 * If that happened after we setrunqueue ourselves but before we 190 * mi_switch()'ed, we might not be on the queue indicated by 191 * our priority. 192 */ 193 DROP_GIANT_NOSWITCH(); 194 setrunqueue(p); 195 p->p_stats->p_ru.ru_nivcsw++; 196 mi_switch(); 197 mtx_exit(&sched_lock, MTX_SPIN); 198 PICKUP_GIANT(); 199 while ((sig = CURSIG(p)) != 0) { 200 if (!mtx_owned(&Giant)) 201 mtx_enter(&Giant, MTX_DEF); 202 postsig(sig); 203 } 204 mtx_enter(&sched_lock, MTX_SPIN); 205 } 206 207 /* 208 * Charge system time if profiling. 209 */ 210 if (p->p_sflag & PS_PROFIL) { 211 mtx_exit(&sched_lock, MTX_SPIN); 212 /* XXX - do we need Giant? */ 213 if (!mtx_owned(&Giant)) 214 mtx_enter(&Giant, MTX_DEF); 215 mtx_enter(&sched_lock, MTX_SPIN); 216 addupc_task(p, frame->tf_eip, 217 (u_int)(p->p_sticks - oticks) * psratio); 218 } 219 curpriority = p->p_priority; 220 mtx_exit(&sched_lock, MTX_SPIN); 221} 222 223/* 224 * Exception, fault, and trap interface to the FreeBSD kernel. 225 * This common code is called from assembly language IDT gate entry 226 * routines that prepare a suitable stack frame, and restore this 227 * frame after the exception has been processed. 228 */ 229 230void 231trap(frame) 232 struct trapframe frame; 233{ 234 struct proc *p = curproc; 235 u_quad_t sticks = 0; 236 int i = 0, ucode = 0, type, code; 237 vm_offset_t eva; 238#ifdef POWERFAIL_NMI 239 static int lastalert = 0; 240#endif 241 242 atomic_add_int(&cnt.v_trap, 1); 243 244 if ((frame.tf_eflags & PSL_I) == 0) { 245 /* 246 * Buggy application or kernel code has disabled 247 * interrupts and then trapped. Enabling interrupts 248 * now is wrong, but it is better than running with 249 * interrupts disabled until they are accidentally 250 * enabled later. XXX This is really bad if we trap 251 * while holding a spin lock. 252 */ 253 type = frame.tf_trapno; 254 if (ISPL(frame.tf_cs) == SEL_UPL || (frame.tf_eflags & PSL_VM)) 255 printf( 256 "pid %ld (%s): trap %d with interrupts disabled\n", 257 (long)curproc->p_pid, curproc->p_comm, type); 258 else if (type != T_BPTFLT && type != T_TRCTRAP) 259 /* 260 * XXX not quite right, since this may be for a 261 * multiple fault in user mode. 262 */ 263 printf("kernel trap %d with interrupts disabled\n", 264 type); 265 /* 266 * We should walk p_heldmtx here and see if any are 267 * spin mutexes, and not do this if so. 268 */ 269 enable_intr(); 270 } 271 272 eva = 0; 273 if (frame.tf_trapno == T_PAGEFLT) { 274 /* 275 * For some Cyrix CPUs, %cr2 is clobbered by 276 * interrupts. This problem is worked around by using 277 * an interrupt gate for the pagefault handler. We 278 * are finally ready to read %cr2 and then must 279 * reenable interrupts. 280 */ 281 eva = rcr2(); 282 enable_intr(); 283 } 284 285 mtx_enter(&Giant, MTX_DEF); 286 287#if defined(I586_CPU) && !defined(NO_F00F_HACK) 288restart: 289#endif 290 291 type = frame.tf_trapno; 292 code = frame.tf_err; 293 294 if ((ISPL(frame.tf_cs) == SEL_UPL) || 295 ((frame.tf_eflags & PSL_VM) && !in_vm86call)) { 296 /* user trap */ 297 298 mtx_enter(&sched_lock, MTX_SPIN); 299 sticks = p->p_sticks; 300 mtx_exit(&sched_lock, MTX_SPIN); 301 p->p_md.md_regs = &frame; 302 303 switch (type) { 304 case T_PRIVINFLT: /* privileged instruction fault */ 305 ucode = type; 306 i = SIGILL; 307 break; 308 309 case T_BPTFLT: /* bpt instruction fault */ 310 case T_TRCTRAP: /* trace trap */ 311 frame.tf_eflags &= ~PSL_T; 312 i = SIGTRAP; 313 break; 314 315 case T_ARITHTRAP: /* arithmetic trap */ 316 ucode = code; 317 i = SIGFPE; 318 break; 319 320 /* 321 * The following two traps can happen in 322 * vm86 mode, and, if so, we want to handle 323 * them specially. 324 */ 325 case T_PROTFLT: /* general protection fault */ 326 case T_STKFLT: /* stack fault */ 327 if (frame.tf_eflags & PSL_VM) { 328 i = vm86_emulate((struct vm86frame *)&frame); 329 if (i == 0) 330 goto user; 331 break; 332 } 333 /* FALL THROUGH */ 334 335 case T_SEGNPFLT: /* segment not present fault */ 336 case T_TSSFLT: /* invalid TSS fault */ 337 case T_DOUBLEFLT: /* double fault */ 338 default: 339 ucode = code + BUS_SEGM_FAULT ; 340 i = SIGBUS; 341 break; 342 343 case T_PAGEFLT: /* page fault */ 344 i = trap_pfault(&frame, TRUE, eva); 345#if defined(I586_CPU) && !defined(NO_F00F_HACK) 346 if (i == -2) { 347 /* 348 * f00f hack workaround has triggered, treat 349 * as illegal instruction not page fault. 350 */ 351 frame.tf_trapno = T_PRIVINFLT; 352 goto restart; 353 } 354#endif 355 if (i == -1) 356 goto out; 357 if (i == 0) 358 goto user; 359 360 ucode = T_PAGEFLT; 361 break; 362 363 case T_DIVIDE: /* integer divide fault */ 364 ucode = FPE_INTDIV; 365 i = SIGFPE; 366 break; 367 368#if NISA > 0 369 case T_NMI: 370#ifdef POWERFAIL_NMI 371#ifndef TIMER_FREQ 372# define TIMER_FREQ 1193182 373#endif 374 if (time_second - lastalert > 10) { 375 log(LOG_WARNING, "NMI: power fail\n"); 376 sysbeep(TIMER_FREQ/880, hz); 377 lastalert = time_second; 378 } 379 goto out; 380#else /* !POWERFAIL_NMI */ 381 /* machine/parity/power fail/"kitchen sink" faults */ 382 if (isa_nmi(code) == 0) { 383#ifdef DDB 384 /* 385 * NMI can be hooked up to a pushbutton 386 * for debugging. 387 */ 388 if (ddb_on_nmi) { 389 printf ("NMI ... going to debugger\n"); 390 kdb_trap (type, 0, &frame); 391 } 392#endif /* DDB */ 393 goto out; 394 } else if (panic_on_nmi) 395 panic("NMI indicates hardware failure"); 396 break; 397#endif /* POWERFAIL_NMI */ 398#endif /* NISA > 0 */ 399 400 case T_OFLOW: /* integer overflow fault */ 401 ucode = FPE_INTOVF; 402 i = SIGFPE; 403 break; 404 405 case T_BOUND: /* bounds check fault */ 406 ucode = FPE_FLTSUB; 407 i = SIGFPE; 408 break; 409 410 case T_DNA: 411#ifdef DEV_NPX 412 /* transparent fault (due to context switch "late") */ 413 if (npxdna()) 414 goto out; 415#endif 416 if (!pmath_emulate) { 417 i = SIGFPE; 418 ucode = FPE_FPU_NP_TRAP; 419 break; 420 } 421 i = (*pmath_emulate)(&frame); 422 if (i == 0) { 423 if (!(frame.tf_eflags & PSL_T)) 424 goto out; 425 frame.tf_eflags &= ~PSL_T; 426 i = SIGTRAP; 427 } 428 /* else ucode = emulator_only_knows() XXX */ 429 break; 430 431 case T_FPOPFLT: /* FPU operand fetch fault */ 432 ucode = T_FPOPFLT; 433 i = SIGILL; 434 break; 435 } 436 } else { 437 /* kernel trap */ 438 439 switch (type) { 440 case T_PAGEFLT: /* page fault */ 441 (void) trap_pfault(&frame, FALSE, eva); 442 goto out; 443 444 case T_DNA: 445#ifdef DEV_NPX 446 /* 447 * The kernel is apparently using npx for copying. 448 * XXX this should be fatal unless the kernel has 449 * registered such use. 450 */ 451 if (npxdna()) 452 goto out; 453#endif 454 break; 455 456 /* 457 * The following two traps can happen in 458 * vm86 mode, and, if so, we want to handle 459 * them specially. 460 */ 461 case T_PROTFLT: /* general protection fault */ 462 case T_STKFLT: /* stack fault */ 463 if (frame.tf_eflags & PSL_VM) { 464 i = vm86_emulate((struct vm86frame *)&frame); 465 if (i != 0) 466 /* 467 * returns to original process 468 */ 469 mtx_exit(&Giant, MTX_DEF); 470 vm86_trap((struct vm86frame *)&frame); 471 goto out; 472 } 473 if (type == T_STKFLT) 474 break; 475 476 /* FALL THROUGH */ 477 478 case T_SEGNPFLT: /* segment not present fault */ 479 if (in_vm86call) 480 break; 481 482 if (p->p_intr_nesting_level != 0) 483 break; 484 485 /* 486 * Invalid %fs's and %gs's can be created using 487 * procfs or PT_SETREGS or by invalidating the 488 * underlying LDT entry. This causes a fault 489 * in kernel mode when the kernel attempts to 490 * switch contexts. Lose the bad context 491 * (XXX) so that we can continue, and generate 492 * a signal. 493 */ 494 if (frame.tf_eip == (int)cpu_switch_load_gs) { 495 PCPU_GET(curpcb)->pcb_gs = 0; 496 psignal(p, SIGBUS); 497 goto out; 498 } 499 500 /* 501 * Invalid segment selectors and out of bounds 502 * %eip's and %esp's can be set up in user mode. 503 * This causes a fault in kernel mode when the 504 * kernel tries to return to user mode. We want 505 * to get this fault so that we can fix the 506 * problem here and not have to check all the 507 * selectors and pointers when the user changes 508 * them. 509 */ 510 if (frame.tf_eip == (int)doreti_iret) { 511 frame.tf_eip = (int)doreti_iret_fault; 512 goto out; 513 } 514 if (frame.tf_eip == (int)doreti_popl_ds) { 515 frame.tf_eip = (int)doreti_popl_ds_fault; 516 goto out; 517 } 518 if (frame.tf_eip == (int)doreti_popl_es) { 519 frame.tf_eip = (int)doreti_popl_es_fault; 520 goto out; 521 } 522 if (frame.tf_eip == (int)doreti_popl_fs) { 523 frame.tf_eip = (int)doreti_popl_fs_fault; 524 goto out; 525 } 526 if (PCPU_GET(curpcb) != NULL && 527 PCPU_GET(curpcb)->pcb_onfault != NULL) { 528 frame.tf_eip = 529 (int)PCPU_GET(curpcb)->pcb_onfault; 530 goto out; 531 } 532 break; 533 534 case T_TSSFLT: 535 /* 536 * PSL_NT can be set in user mode and isn't cleared 537 * automatically when the kernel is entered. This 538 * causes a TSS fault when the kernel attempts to 539 * `iret' because the TSS link is uninitialized. We 540 * want to get this fault so that we can fix the 541 * problem here and not every time the kernel is 542 * entered. 543 */ 544 if (frame.tf_eflags & PSL_NT) { 545 frame.tf_eflags &= ~PSL_NT; 546 goto out; 547 } 548 break; 549 550 case T_TRCTRAP: /* trace trap */ 551 if (frame.tf_eip == (int)IDTVEC(syscall)) { 552 /* 553 * We've just entered system mode via the 554 * syscall lcall. Continue single stepping 555 * silently until the syscall handler has 556 * saved the flags. 557 */ 558 goto out; 559 } 560 if (frame.tf_eip == (int)IDTVEC(syscall) + 1) { 561 /* 562 * The syscall handler has now saved the 563 * flags. Stop single stepping it. 564 */ 565 frame.tf_eflags &= ~PSL_T; 566 goto out; 567 } 568 /* 569 * Ignore debug register trace traps due to 570 * accesses in the user's address space, which 571 * can happen under several conditions such as 572 * if a user sets a watchpoint on a buffer and 573 * then passes that buffer to a system call. 574 * We still want to get TRCTRAPS for addresses 575 * in kernel space because that is useful when 576 * debugging the kernel. 577 */ 578 if (user_dbreg_trap() && !in_vm86call) { 579 /* 580 * Reset breakpoint bits because the 581 * processor doesn't 582 */ 583 load_dr6(rdr6() & 0xfffffff0); 584 goto out; 585 } 586 /* 587 * Fall through (TRCTRAP kernel mode, kernel address) 588 */ 589 case T_BPTFLT: 590 /* 591 * If DDB is enabled, let it handle the debugger trap. 592 * Otherwise, debugger traps "can't happen". 593 */ 594#ifdef DDB 595 if (kdb_trap (type, 0, &frame)) 596 goto out; 597#endif 598 break; 599 600#if NISA > 0 601 case T_NMI: 602#ifdef POWERFAIL_NMI 603 if (time_second - lastalert > 10) { 604 log(LOG_WARNING, "NMI: power fail\n"); 605 sysbeep(TIMER_FREQ/880, hz); 606 lastalert = time_second; 607 } 608 goto out; 609#else /* !POWERFAIL_NMI */ 610 /* machine/parity/power fail/"kitchen sink" faults */ 611 if (isa_nmi(code) == 0) { 612#ifdef DDB 613 /* 614 * NMI can be hooked up to a pushbutton 615 * for debugging. 616 */ 617 if (ddb_on_nmi) { 618 printf ("NMI ... going to debugger\n"); 619 kdb_trap (type, 0, &frame); 620 } 621#endif /* DDB */ 622 goto out; 623 } else if (panic_on_nmi == 0) 624 goto out; 625 /* FALL THROUGH */ 626#endif /* POWERFAIL_NMI */ 627#endif /* NISA > 0 */ 628 } 629 630 trap_fatal(&frame, eva); 631 goto out; 632 } 633 634 /* Translate fault for emulators (e.g. Linux) */ 635 if (*p->p_sysent->sv_transtrap) 636 i = (*p->p_sysent->sv_transtrap)(i, type); 637 638 trapsignal(p, i, ucode); 639 640#ifdef DEBUG 641 if (type <= MAX_TRAP_MSG) { 642 uprintf("fatal process exception: %s", 643 trap_msg[type]); 644 if ((type == T_PAGEFLT) || (type == T_PROTFLT)) 645 uprintf(", fault VA = 0x%lx", (u_long)eva); 646 uprintf("\n"); 647 } 648#endif 649 650user: 651 userret(p, &frame, sticks); 652out: 653 if (mtx_owned(&Giant)) 654 mtx_exit(&Giant, MTX_DEF); 655} 656 657#ifdef notyet 658/* 659 * This version doesn't allow a page fault to user space while 660 * in the kernel. The rest of the kernel needs to be made "safe" 661 * before this can be used. I think the only things remaining 662 * to be made safe are the iBCS2 code and the process tracing/ 663 * debugging code. 664 */ 665static int 666trap_pfault(frame, usermode, eva) 667 struct trapframe *frame; 668 int usermode; 669 vm_offset_t eva; 670{ 671 vm_offset_t va; 672 struct vmspace *vm = NULL; 673 vm_map_t map = 0; 674 int rv = 0; 675 vm_prot_t ftype; 676 struct proc *p = curproc; 677 678 if (frame->tf_err & PGEX_W) 679 ftype = VM_PROT_WRITE; 680 else 681 ftype = VM_PROT_READ; 682 683 va = trunc_page(eva); 684 if (va < VM_MIN_KERNEL_ADDRESS) { 685 vm_offset_t v; 686 vm_page_t mpte; 687 688 if (p == NULL || 689 (!usermode && va < VM_MAXUSER_ADDRESS && 690 (p->p_intr_nesting_level != 0 || 691 PCPU_GET(curpcb) == NULL || 692 PCPU_GET(curpcb)->pcb_onfault == NULL))) { 693 trap_fatal(frame, eva); 694 return (-1); 695 } 696 697 /* 698 * This is a fault on non-kernel virtual memory. 699 * vm is initialized above to NULL. If curproc is NULL 700 * or curproc->p_vmspace is NULL the fault is fatal. 701 */ 702 vm = p->p_vmspace; 703 if (vm == NULL) 704 goto nogo; 705 706 map = &vm->vm_map; 707 708 /* 709 * Keep swapout from messing with us during this 710 * critical time. 711 */ 712 PROC_LOCK(p); 713 ++p->p_lock; 714 PROC_UNLOCK(p); 715 716 /* 717 * Grow the stack if necessary 718 */ 719 /* grow_stack returns false only if va falls into 720 * a growable stack region and the stack growth 721 * fails. It returns true if va was not within 722 * a growable stack region, or if the stack 723 * growth succeeded. 724 */ 725 if (!grow_stack (p, va)) { 726 rv = KERN_FAILURE; 727 PROC_LOCK(p); 728 --p->p_lock; 729 PROC_UNLOCK(p); 730 goto nogo; 731 } 732 733 /* Fault in the user page: */ 734 rv = vm_fault(map, va, ftype, 735 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY 736 : VM_FAULT_NORMAL); 737 738 PROC_LOCK(p); 739 --p->p_lock; 740 PROC_UNLOCK(p); 741 } else { 742 /* 743 * Don't allow user-mode faults in kernel address space. 744 */ 745 if (usermode) 746 goto nogo; 747 748 /* 749 * Since we know that kernel virtual address addresses 750 * always have pte pages mapped, we just have to fault 751 * the page. 752 */ 753 rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL); 754 } 755 756 if (rv == KERN_SUCCESS) 757 return (0); 758nogo: 759 if (!usermode) { 760 if (p->p_intr_nesting_level == 0 && 761 PCPU_GET(curpcb) != NULL && 762 PCPU_GET(curpcb)->pcb_onfault != NULL) { 763 frame->tf_eip = (int)PCPU_GET(curpcb)->pcb_onfault; 764 return (0); 765 } 766 trap_fatal(frame, eva); 767 return (-1); 768 } 769 770 /* kludge to pass faulting virtual address to sendsig */ 771 frame->tf_err = eva; 772 773 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); 774} 775#endif 776 777int 778trap_pfault(frame, usermode, eva) 779 struct trapframe *frame; 780 int usermode; 781 vm_offset_t eva; 782{ 783 vm_offset_t va; 784 struct vmspace *vm = NULL; 785 vm_map_t map = 0; 786 int rv = 0; 787 vm_prot_t ftype; 788 struct proc *p = curproc; 789 790 va = trunc_page(eva); 791 if (va >= KERNBASE) { 792 /* 793 * Don't allow user-mode faults in kernel address space. 794 * An exception: if the faulting address is the invalid 795 * instruction entry in the IDT, then the Intel Pentium 796 * F00F bug workaround was triggered, and we need to 797 * treat it is as an illegal instruction, and not a page 798 * fault. 799 */ 800#if defined(I586_CPU) && !defined(NO_F00F_HACK) 801 if ((eva == (unsigned int)&idt[6]) && has_f00f_bug) 802 return -2; 803#endif 804 if (usermode) 805 goto nogo; 806 807 map = kernel_map; 808 } else { 809 /* 810 * This is a fault on non-kernel virtual memory. 811 * vm is initialized above to NULL. If curproc is NULL 812 * or curproc->p_vmspace is NULL the fault is fatal. 813 */ 814 if (p != NULL) 815 vm = p->p_vmspace; 816 817 if (vm == NULL) 818 goto nogo; 819 820 map = &vm->vm_map; 821 } 822 823 if (frame->tf_err & PGEX_W) 824 ftype = VM_PROT_WRITE; 825 else 826 ftype = VM_PROT_READ; 827 828 if (map != kernel_map) { 829 /* 830 * Keep swapout from messing with us during this 831 * critical time. 832 */ 833 PROC_LOCK(p); 834 ++p->p_lock; 835 PROC_UNLOCK(p); 836 837 /* 838 * Grow the stack if necessary 839 */ 840 /* grow_stack returns false only if va falls into 841 * a growable stack region and the stack growth 842 * fails. It returns true if va was not within 843 * a growable stack region, or if the stack 844 * growth succeeded. 845 */ 846 if (!grow_stack (p, va)) { 847 rv = KERN_FAILURE; 848 PROC_LOCK(p); 849 --p->p_lock; 850 PROC_UNLOCK(p); 851 goto nogo; 852 } 853 854 /* Fault in the user page: */ 855 rv = vm_fault(map, va, ftype, 856 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY 857 : VM_FAULT_NORMAL); 858 859 PROC_LOCK(p); 860 --p->p_lock; 861 PROC_UNLOCK(p); 862 } else { 863 /* 864 * Don't have to worry about process locking or stacks in the 865 * kernel. 866 */ 867 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); 868 } 869 870 if (rv == KERN_SUCCESS) 871 return (0); 872nogo: 873 if (!usermode) { 874 if (p->p_intr_nesting_level == 0 && 875 PCPU_GET(curpcb) != NULL && 876 PCPU_GET(curpcb)->pcb_onfault != NULL) { 877 frame->tf_eip = (int)PCPU_GET(curpcb)->pcb_onfault; 878 return (0); 879 } 880 trap_fatal(frame, eva); 881 return (-1); 882 } 883 884 /* kludge to pass faulting virtual address to sendsig */ 885 frame->tf_err = eva; 886 887 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); 888} 889 890static void 891trap_fatal(frame, eva) 892 struct trapframe *frame; 893 vm_offset_t eva; 894{ 895 int code, type, ss, esp; 896 struct soft_segment_descriptor softseg; 897 898 code = frame->tf_err; 899 type = frame->tf_trapno; 900 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg); 901 902 if (type <= MAX_TRAP_MSG) 903 printf("\n\nFatal trap %d: %s while in %s mode\n", 904 type, trap_msg[type], 905 frame->tf_eflags & PSL_VM ? "vm86" : 906 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel"); 907#ifdef SMP 908 /* two seperate prints in case of a trap on an unmapped page */ 909 printf("cpuid = %d; ", PCPU_GET(cpuid)); 910 printf("lapic.id = %08x\n", lapic.id); 911#endif 912 if (type == T_PAGEFLT) { 913 printf("fault virtual address = 0x%x\n", eva); 914 printf("fault code = %s %s, %s\n", 915 code & PGEX_U ? "user" : "supervisor", 916 code & PGEX_W ? "write" : "read", 917 code & PGEX_P ? "protection violation" : "page not present"); 918 } 919 printf("instruction pointer = 0x%x:0x%x\n", 920 frame->tf_cs & 0xffff, frame->tf_eip); 921 if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) { 922 ss = frame->tf_ss & 0xffff; 923 esp = frame->tf_esp; 924 } else { 925 ss = GSEL(GDATA_SEL, SEL_KPL); 926 esp = (int)&frame->tf_esp; 927 } 928 printf("stack pointer = 0x%x:0x%x\n", ss, esp); 929 printf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp); 930 printf("code segment = base 0x%x, limit 0x%x, type 0x%x\n", 931 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type); 932 printf(" = DPL %d, pres %d, def32 %d, gran %d\n", 933 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32, 934 softseg.ssd_gran); 935 printf("processor eflags = "); 936 if (frame->tf_eflags & PSL_T) 937 printf("trace trap, "); 938 if (frame->tf_eflags & PSL_I) 939 printf("interrupt enabled, "); 940 if (frame->tf_eflags & PSL_NT) 941 printf("nested task, "); 942 if (frame->tf_eflags & PSL_RF) 943 printf("resume, "); 944 if (frame->tf_eflags & PSL_VM) 945 printf("vm86, "); 946 printf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12); 947 printf("current process = "); 948 if (curproc) { 949 printf("%lu (%s)\n", 950 (u_long)curproc->p_pid, curproc->p_comm ? 951 curproc->p_comm : ""); 952 } else { 953 printf("Idle\n"); 954 } 955 956#ifdef KDB 957 if (kdb_trap(&psl)) 958 return; 959#endif 960#ifdef DDB 961 if ((debugger_on_panic || db_active) && kdb_trap(type, 0, frame)) 962 return; 963#endif 964 printf("trap number = %d\n", type); 965 if (type <= MAX_TRAP_MSG) 966 panic(trap_msg[type]); 967 else 968 panic("unknown/reserved trap"); 969} 970 971/* 972 * Double fault handler. Called when a fault occurs while writing 973 * a frame for a trap/exception onto the stack. This usually occurs 974 * when the stack overflows (such is the case with infinite recursion, 975 * for example). 976 * 977 * XXX Note that the current PTD gets replaced by IdlePTD when the 978 * task switch occurs. This means that the stack that was active at 979 * the time of the double fault is not available at <kstack> unless 980 * the machine was idle when the double fault occurred. The downside 981 * of this is that "trace <ebp>" in ddb won't work. 982 */ 983void 984dblfault_handler() 985{ 986 printf("\nFatal double fault:\n"); 987 printf("eip = 0x%x\n", PCPU_GET(common_tss.tss_eip)); 988 printf("esp = 0x%x\n", PCPU_GET(common_tss.tss_esp)); 989 printf("ebp = 0x%x\n", PCPU_GET(common_tss.tss_ebp)); 990#ifdef SMP 991 /* two seperate prints in case of a trap on an unmapped page */ 992 printf("cpuid = %d; ", PCPU_GET(cpuid)); 993 printf("lapic.id = %08x\n", lapic.id); 994#endif 995 panic("double fault"); 996} 997 998/* 999 * Compensate for 386 brain damage (missing URKR). 1000 * This is a little simpler than the pagefault handler in trap() because 1001 * it the page tables have already been faulted in and high addresses 1002 * are thrown out early for other reasons. 1003 */ 1004int trapwrite(addr) 1005 unsigned addr; 1006{ 1007 struct proc *p; 1008 vm_offset_t va; 1009 struct vmspace *vm; 1010 int rv; 1011 1012 va = trunc_page((vm_offset_t)addr); 1013 /* 1014 * XXX - MAX is END. Changed > to >= for temp. fix. 1015 */ 1016 if (va >= VM_MAXUSER_ADDRESS) 1017 return (1); 1018 1019 p = curproc; 1020 vm = p->p_vmspace; 1021 1022 PROC_LOCK(p); 1023 ++p->p_lock; 1024 PROC_UNLOCK(p); 1025 1026 if (!grow_stack (p, va)) { 1027 PROC_LOCK(p); 1028 --p->p_lock; 1029 PROC_UNLOCK(p); 1030 return (1); 1031 } 1032 1033 /* 1034 * fault the data page 1035 */ 1036 rv = vm_fault(&vm->vm_map, va, VM_PROT_WRITE, VM_FAULT_DIRTY); 1037 1038 PROC_LOCK(p); 1039 --p->p_lock; 1040 PROC_UNLOCK(p); 1041 1042 if (rv != KERN_SUCCESS) 1043 return 1; 1044 1045 return (0); 1046} 1047 1048/* 1049 * syscall2 - MP aware system call request C handler 1050 * 1051 * A system call is essentially treated as a trap except that the 1052 * MP lock is not held on entry or return. We are responsible for 1053 * obtaining the MP lock if necessary and for handling ASTs 1054 * (e.g. a task switch) prior to return. 1055 * 1056 * In general, only simple access and manipulation of curproc and 1057 * the current stack is allowed without having to hold MP lock. 1058 */ 1059void 1060syscall2(frame) 1061 struct trapframe frame; 1062{ 1063 caddr_t params; 1064 int i; 1065 struct sysent *callp; 1066 struct proc *p = curproc; 1067 u_quad_t sticks; 1068 int error; 1069 int narg; 1070 int args[8]; 1071 u_int code; 1072 1073 atomic_add_int(&cnt.v_syscall, 1); 1074 1075#ifdef DIAGNOSTIC 1076 if (ISPL(frame.tf_cs) != SEL_UPL) { 1077 mtx_enter(&Giant, MTX_DEF); 1078 panic("syscall"); 1079 /* NOT REACHED */ 1080 } 1081#endif 1082 1083 mtx_enter(&sched_lock, MTX_SPIN); 1084 sticks = p->p_sticks; 1085 mtx_exit(&sched_lock, MTX_SPIN); 1086 1087 p->p_md.md_regs = &frame; 1088 params = (caddr_t)frame.tf_esp + sizeof(int); 1089 code = frame.tf_eax; 1090 1091 if (p->p_sysent->sv_prepsyscall) { 1092 /* 1093 * The prep code is not MP aware. 1094 */ 1095 mtx_enter(&Giant, MTX_DEF); 1096 (*p->p_sysent->sv_prepsyscall)(&frame, args, &code, ¶ms); 1097 mtx_exit(&Giant, MTX_DEF); 1098 } else { 1099 /* 1100 * Need to check if this is a 32 bit or 64 bit syscall. 1101 * fuword is MP aware. 1102 */ 1103 if (code == SYS_syscall) { 1104 /* 1105 * Code is first argument, followed by actual args. 1106 */ 1107 code = fuword(params); 1108 params += sizeof(int); 1109 } else if (code == SYS___syscall) { 1110 /* 1111 * Like syscall, but code is a quad, so as to maintain 1112 * quad alignment for the rest of the arguments. 1113 */ 1114 code = fuword(params); 1115 params += sizeof(quad_t); 1116 } 1117 } 1118 1119 if (p->p_sysent->sv_mask) 1120 code &= p->p_sysent->sv_mask; 1121 1122 if (code >= p->p_sysent->sv_size) 1123 callp = &p->p_sysent->sv_table[0]; 1124 else 1125 callp = &p->p_sysent->sv_table[code]; 1126 1127 narg = callp->sy_narg & SYF_ARGMASK; 1128 1129 /* 1130 * copyin is MP aware, but the tracing code is not 1131 */ 1132 if (params && (i = narg * sizeof(int)) && 1133 (error = copyin(params, (caddr_t)args, (u_int)i))) { 1134 mtx_enter(&Giant, MTX_DEF); 1135#ifdef KTRACE 1136 if (KTRPOINT(p, KTR_SYSCALL)) 1137 ktrsyscall(p->p_tracep, code, narg, args); 1138#endif 1139 goto bad; 1140 } 1141 1142 /* 1143 * Try to run the syscall without the MP lock if the syscall 1144 * is MP safe. We have to obtain the MP lock no matter what if 1145 * we are ktracing 1146 */ 1147 if ((callp->sy_narg & SYF_MPSAFE) == 0) { 1148 mtx_enter(&Giant, MTX_DEF); 1149 } 1150 1151#ifdef KTRACE 1152 if (KTRPOINT(p, KTR_SYSCALL)) { 1153 if (!mtx_owned(&Giant)) 1154 mtx_enter(&Giant, MTX_DEF); 1155 ktrsyscall(p->p_tracep, code, narg, args); 1156 } 1157#endif 1158 p->p_retval[0] = 0; 1159 p->p_retval[1] = frame.tf_edx; 1160 1161 STOPEVENT(p, S_SCE, narg); /* MP aware */ 1162 1163 error = (*callp->sy_call)(p, args); 1164 1165 /* 1166 * MP SAFE (we may or may not have the MP lock at this point) 1167 */ 1168 switch (error) { 1169 case 0: 1170 frame.tf_eax = p->p_retval[0]; 1171 frame.tf_edx = p->p_retval[1]; 1172 frame.tf_eflags &= ~PSL_C; 1173 break; 1174 1175 case ERESTART: 1176 /* 1177 * Reconstruct pc, assuming lcall $X,y is 7 bytes, 1178 * int 0x80 is 2 bytes. We saved this in tf_err. 1179 */ 1180 frame.tf_eip -= frame.tf_err; 1181 break; 1182 1183 case EJUSTRETURN: 1184 break; 1185 1186 default: 1187bad: 1188 if (p->p_sysent->sv_errsize) { 1189 if (error >= p->p_sysent->sv_errsize) 1190 error = -1; /* XXX */ 1191 else 1192 error = p->p_sysent->sv_errtbl[error]; 1193 } 1194 frame.tf_eax = error; 1195 frame.tf_eflags |= PSL_C; 1196 break; 1197 } 1198 1199 /* 1200 * Traced syscall. trapsignal() is not MP aware. 1201 */ 1202 if ((frame.tf_eflags & PSL_T) && !(frame.tf_eflags & PSL_VM)) { 1203 if (!mtx_owned(&Giant)) 1204 mtx_enter(&Giant, MTX_DEF); 1205 frame.tf_eflags &= ~PSL_T; 1206 trapsignal(p, SIGTRAP, 0); 1207 } 1208 1209 /* 1210 * Handle reschedule and other end-of-syscall issues 1211 */ 1212 userret(p, &frame, sticks); 1213 1214#ifdef KTRACE 1215 if (KTRPOINT(p, KTR_SYSRET)) { 1216 if (!mtx_owned(&Giant)) 1217 mtx_enter(&Giant, MTX_DEF); 1218 ktrsysret(p->p_tracep, code, error, p->p_retval[0]); 1219 } 1220#endif 1221 1222 /* 1223 * This works because errno is findable through the 1224 * register set. If we ever support an emulation where this 1225 * is not the case, this code will need to be revisited. 1226 */ 1227 STOPEVENT(p, S_SCX, code); 1228 1229 /* 1230 * Release Giant if we had to get it 1231 */ 1232 if (mtx_owned(&Giant)) 1233 mtx_exit(&Giant, MTX_DEF); 1234 1235#ifdef WITNESS 1236 if (witness_list(p)) { 1237 panic("system call %s returning with mutex(s) held\n", 1238 syscallnames[code]); 1239 } 1240#endif 1241 mtx_assert(&sched_lock, MA_NOTOWNED); 1242 mtx_assert(&Giant, MA_NOTOWNED); 1243} 1244 1245void 1246ast(frame) 1247 struct trapframe frame; 1248{ 1249 struct proc *p = CURPROC; 1250 u_quad_t sticks; 1251 1252 mtx_enter(&sched_lock, MTX_SPIN); 1253 sticks = p->p_sticks; 1254 1255 astoff(); 1256 atomic_add_int(&cnt.v_soft, 1); 1257 if (p->p_sflag & PS_OWEUPC) { 1258 p->p_sflag &= ~PS_OWEUPC; 1259 mtx_exit(&sched_lock, MTX_SPIN); 1260 mtx_enter(&Giant, MTX_DEF); 1261 mtx_enter(&sched_lock, MTX_SPIN); 1262 addupc_task(p, p->p_stats->p_prof.pr_addr, 1263 p->p_stats->p_prof.pr_ticks); 1264 } 1265 if (p->p_sflag & PS_ALRMPEND) { 1266 p->p_sflag &= ~PS_ALRMPEND; 1267 mtx_exit(&sched_lock, MTX_SPIN); 1268 if (!mtx_owned(&Giant)) 1269 mtx_enter(&Giant, MTX_DEF); 1270 psignal(p, SIGVTALRM); 1271 mtx_enter(&sched_lock, MTX_SPIN); 1272 } 1273 if (p->p_sflag & PS_PROFPEND) { 1274 p->p_sflag &= ~PS_PROFPEND; 1275 mtx_exit(&sched_lock, MTX_SPIN); 1276 if (!mtx_owned(&Giant)) 1277 mtx_enter(&Giant, MTX_DEF); 1278 psignal(p, SIGPROF); 1279 } else 1280 mtx_exit(&sched_lock, MTX_SPIN); 1281 1282 userret(p, &frame, sticks); 1283 1284 if (mtx_owned(&Giant)) 1285 mtx_exit(&Giant, MTX_DEF); 1286} 1287