subr_syscall.c revision 69881
1/*- 2 * Copyright (C) 1994, David Greenman 3 * Copyright (c) 1990, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the University of Utah, and William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 38 * $FreeBSD: head/sys/kern/subr_trap.c 69881 2000-12-12 01:14:32Z jake $ 39 */ 40 41/* 42 * 386 Trap and System call handling 43 */ 44 45#include "opt_cpu.h" 46#include "opt_ddb.h" 47#include "opt_ktrace.h" 48#include "opt_clock.h" 49#include "opt_trap.h" 50 51#include <sys/param.h> 52#include <sys/bus.h> 53#include <sys/systm.h> 54#include <sys/proc.h> 55#include <sys/pioctl.h> 56#include <sys/ipl.h> 57#include <sys/kernel.h> 58#include <sys/ktr.h> 59#include <sys/mutex.h> 60#include <sys/resourcevar.h> 61#include <sys/signalvar.h> 62#include <sys/syscall.h> 63#include <sys/sysctl.h> 64#include <sys/sysent.h> 65#include <sys/uio.h> 66#include <sys/vmmeter.h> 67#ifdef KTRACE 68#include <sys/ktrace.h> 69#endif 70 71#include <vm/vm.h> 72#include <vm/vm_param.h> 73#include <sys/lock.h> 74#include <vm/pmap.h> 75#include <vm/vm_kern.h> 76#include <vm/vm_map.h> 77#include <vm/vm_page.h> 78#include <vm/vm_extern.h> 79 80#include <machine/cpu.h> 81#include <machine/md_var.h> 82#include <machine/pcb.h> 83#ifdef SMP 84#include <machine/smp.h> 85#endif 86#include <machine/tss.h> 87 88#include <i386/isa/icu.h> 89#include <i386/isa/intr_machdep.h> 90 91#ifdef POWERFAIL_NMI 92#include <sys/syslog.h> 93#include <machine/clock.h> 94#endif 95 96#include <machine/vm86.h> 97 98#include <ddb/ddb.h> 99 100#include "isa.h" 101#include "npx.h" 102 103#include <sys/sysctl.h> 104 105int (*pmath_emulate) __P((struct trapframe *)); 106 107extern void trap __P((struct trapframe frame)); 108extern int trapwrite __P((unsigned addr)); 109extern void syscall2 __P((struct trapframe frame)); 110extern void ast __P((struct trapframe frame)); 111 112static int trap_pfault __P((struct trapframe *, int, vm_offset_t)); 113static void trap_fatal __P((struct trapframe *, vm_offset_t)); 114void dblfault_handler __P((void)); 115 116extern inthand_t IDTVEC(syscall); 117 118#define MAX_TRAP_MSG 28 119static char *trap_msg[] = { 120 "", /* 0 unused */ 121 "privileged instruction fault", /* 1 T_PRIVINFLT */ 122 "", /* 2 unused */ 123 "breakpoint instruction fault", /* 3 T_BPTFLT */ 124 "", /* 4 unused */ 125 "", /* 5 unused */ 126 "arithmetic trap", /* 6 T_ARITHTRAP */ 127 "system forced exception", /* 7 T_ASTFLT */ 128 "", /* 8 unused */ 129 "general protection fault", /* 9 T_PROTFLT */ 130 "trace trap", /* 10 T_TRCTRAP */ 131 "", /* 11 unused */ 132 "page fault", /* 12 T_PAGEFLT */ 133 "", /* 13 unused */ 134 "alignment fault", /* 14 T_ALIGNFLT */ 135 "", /* 15 unused */ 136 "", /* 16 unused */ 137 "", /* 17 unused */ 138 "integer divide fault", /* 18 T_DIVIDE */ 139 "non-maskable interrupt trap", /* 19 T_NMI */ 140 "overflow trap", /* 20 T_OFLOW */ 141 "FPU bounds check fault", /* 21 T_BOUND */ 142 "FPU device not available", /* 22 T_DNA */ 143 "double fault", /* 23 T_DOUBLEFLT */ 144 "FPU operand fetch fault", /* 24 T_FPOPFLT */ 145 "invalid TSS fault", /* 25 T_TSSFLT */ 146 "segment not present fault", /* 26 T_SEGNPFLT */ 147 "stack fault", /* 27 T_STKFLT */ 148 "machine check trap", /* 28 T_MCHK */ 149}; 150 151static __inline int userret __P((struct proc *p, struct trapframe *frame, 152 u_quad_t oticks, int have_giant)); 153 154#if defined(I586_CPU) && !defined(NO_F00F_HACK) 155extern int has_f00f_bug; 156#endif 157 158#ifdef DDB 159static int ddb_on_nmi = 1; 160SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW, 161 &ddb_on_nmi, 0, "Go to DDB on NMI"); 162#endif 163static int panic_on_nmi = 1; 164SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW, 165 &panic_on_nmi, 0, "Panic on NMI"); 166 167#ifdef WITNESS 168extern char *syscallnames[]; 169#endif 170 171static __inline int 172userret(p, frame, oticks, have_giant) 173 struct proc *p; 174 struct trapframe *frame; 175 u_quad_t oticks; 176 int have_giant; 177{ 178 int sig, s; 179 180 while ((sig = CURSIG(p)) != 0) { 181 if (have_giant == 0) { 182 mtx_enter(&Giant, MTX_DEF); 183 have_giant = 1; 184 } 185 postsig(sig); 186 } 187 188 p->p_priority = p->p_usrpri; 189 if (resched_wanted()) { 190 /* 191 * Since we are curproc, clock will normally just change 192 * our priority without moving us from one queue to another 193 * (since the running process is not on a queue.) 194 * If that happened after we setrunqueue ourselves but before we 195 * mi_switch()'ed, we might not be on the queue indicated by 196 * our priority. 197 */ 198 s = splhigh(); 199 mtx_enter(&sched_lock, MTX_SPIN); 200 DROP_GIANT_NOSWITCH(); 201 setrunqueue(p); 202 p->p_stats->p_ru.ru_nivcsw++; 203 mi_switch(); 204 mtx_exit(&sched_lock, MTX_SPIN); 205 PICKUP_GIANT(); 206 splx(s); 207 while ((sig = CURSIG(p)) != 0) { 208 if (have_giant == 0) { 209 mtx_enter(&Giant, MTX_DEF); 210 have_giant = 1; 211 } 212 postsig(sig); 213 } 214 } 215 /* 216 * Charge system time if profiling. 217 */ 218 if (p->p_flag & P_PROFIL) { 219 if (have_giant == 0) { 220 mtx_enter(&Giant, MTX_DEF); 221 have_giant = 1; 222 } 223 addupc_task(p, frame->tf_eip, 224 (u_int)(p->p_sticks - oticks) * psratio); 225 } 226 curpriority = p->p_priority; 227 return(have_giant); 228} 229 230/* 231 * Exception, fault, and trap interface to the FreeBSD kernel. 232 * This common code is called from assembly language IDT gate entry 233 * routines that prepare a suitable stack frame, and restore this 234 * frame after the exception has been processed. 235 */ 236 237void 238trap(frame) 239 struct trapframe frame; 240{ 241 struct proc *p = curproc; 242 u_quad_t sticks = 0; 243 int i = 0, ucode = 0, type, code; 244 vm_offset_t eva; 245#ifdef POWERFAIL_NMI 246 static int lastalert = 0; 247#endif 248 249 atomic_add_int(&cnt.v_trap, 1); 250 251 if ((frame.tf_eflags & PSL_I) == 0) { 252 /* 253 * Buggy application or kernel code has disabled 254 * interrupts and then trapped. Enabling interrupts 255 * now is wrong, but it is better than running with 256 * interrupts disabled until they are accidentally 257 * enabled later. XXX Consider whether is this still 258 * correct. 259 */ 260 type = frame.tf_trapno; 261 if (ISPL(frame.tf_cs) == SEL_UPL || (frame.tf_eflags & PSL_VM)) 262 printf( 263 "pid %ld (%s): trap %d with interrupts disabled\n", 264 (long)curproc->p_pid, curproc->p_comm, type); 265 else if (type != T_BPTFLT && type != T_TRCTRAP) 266 /* 267 * XXX not quite right, since this may be for a 268 * multiple fault in user mode. 269 */ 270 printf("kernel trap %d with interrupts disabled\n", 271 type); 272 enable_intr(); 273 } 274 275 eva = 0; 276 if (frame.tf_trapno == T_PAGEFLT) { 277 /* 278 * For some Cyrix CPUs, %cr2 is clobbered by 279 * interrupts. This problem is worked around by using 280 * an interrupt gate for the pagefault handler. We 281 * are finally ready to read %cr2 and then must 282 * reenable interrupts. 283 */ 284 eva = rcr2(); 285 enable_intr(); 286 } 287 288 mtx_enter(&Giant, MTX_DEF); 289 290#if defined(I586_CPU) && !defined(NO_F00F_HACK) 291restart: 292#endif 293 294 type = frame.tf_trapno; 295 code = frame.tf_err; 296 297 if ((ISPL(frame.tf_cs) == SEL_UPL) || 298 ((frame.tf_eflags & PSL_VM) && !in_vm86call)) { 299 /* user trap */ 300 301 sticks = p->p_sticks; 302 p->p_md.md_regs = &frame; 303 304 switch (type) { 305 case T_PRIVINFLT: /* privileged instruction fault */ 306 ucode = type; 307 i = SIGILL; 308 break; 309 310 case T_BPTFLT: /* bpt instruction fault */ 311 case T_TRCTRAP: /* trace trap */ 312 frame.tf_eflags &= ~PSL_T; 313 i = SIGTRAP; 314 break; 315 316 case T_ARITHTRAP: /* arithmetic trap */ 317 ucode = code; 318 i = SIGFPE; 319 break; 320 321 /* 322 * The following two traps can happen in 323 * vm86 mode, and, if so, we want to handle 324 * them specially. 325 */ 326 case T_PROTFLT: /* general protection fault */ 327 case T_STKFLT: /* stack fault */ 328 if (frame.tf_eflags & PSL_VM) { 329 i = vm86_emulate((struct vm86frame *)&frame); 330 if (i == 0) 331 goto user; 332 break; 333 } 334 /* FALL THROUGH */ 335 336 case T_SEGNPFLT: /* segment not present fault */ 337 case T_TSSFLT: /* invalid TSS fault */ 338 case T_DOUBLEFLT: /* double fault */ 339 default: 340 ucode = code + BUS_SEGM_FAULT ; 341 i = SIGBUS; 342 break; 343 344 case T_PAGEFLT: /* page fault */ 345 i = trap_pfault(&frame, TRUE, eva); 346#if defined(I586_CPU) && !defined(NO_F00F_HACK) 347 if (i == -2) { 348 /* 349 * f00f hack workaround has triggered, treat 350 * as illegal instruction not page fault. 351 */ 352 frame.tf_trapno = T_PRIVINFLT; 353 goto restart; 354 } 355#endif 356 if (i == -1) 357 goto out; 358 if (i == 0) 359 goto user; 360 361 ucode = T_PAGEFLT; 362 break; 363 364 case T_DIVIDE: /* integer divide fault */ 365 ucode = FPE_INTDIV; 366 i = SIGFPE; 367 break; 368 369#if NISA > 0 370 case T_NMI: 371#ifdef POWERFAIL_NMI 372#ifndef TIMER_FREQ 373# define TIMER_FREQ 1193182 374#endif 375 if (time_second - lastalert > 10) { 376 log(LOG_WARNING, "NMI: power fail\n"); 377 sysbeep(TIMER_FREQ/880, hz); 378 lastalert = time_second; 379 } 380 goto out; 381#else /* !POWERFAIL_NMI */ 382 /* machine/parity/power fail/"kitchen sink" faults */ 383 if (isa_nmi(code) == 0) { 384#ifdef DDB 385 /* 386 * NMI can be hooked up to a pushbutton 387 * for debugging. 388 */ 389 if (ddb_on_nmi) { 390 printf ("NMI ... going to debugger\n"); 391 kdb_trap (type, 0, &frame); 392 } 393#endif /* DDB */ 394 goto out; 395 } else if (panic_on_nmi) 396 panic("NMI indicates hardware failure"); 397 break; 398#endif /* POWERFAIL_NMI */ 399#endif /* NISA > 0 */ 400 401 case T_OFLOW: /* integer overflow fault */ 402 ucode = FPE_INTOVF; 403 i = SIGFPE; 404 break; 405 406 case T_BOUND: /* bounds check fault */ 407 ucode = FPE_FLTSUB; 408 i = SIGFPE; 409 break; 410 411 case T_DNA: 412#if NNPX > 0 413 /* transparent fault (due to context switch "late") */ 414 if (npxdna()) 415 goto out; 416#endif 417 if (!pmath_emulate) { 418 i = SIGFPE; 419 ucode = FPE_FPU_NP_TRAP; 420 break; 421 } 422 i = (*pmath_emulate)(&frame); 423 if (i == 0) { 424 if (!(frame.tf_eflags & PSL_T)) 425 goto out; 426 frame.tf_eflags &= ~PSL_T; 427 i = SIGTRAP; 428 } 429 /* else ucode = emulator_only_knows() XXX */ 430 break; 431 432 case T_FPOPFLT: /* FPU operand fetch fault */ 433 ucode = T_FPOPFLT; 434 i = SIGILL; 435 break; 436 } 437 } else { 438 /* kernel trap */ 439 440 switch (type) { 441 case T_PAGEFLT: /* page fault */ 442 (void) trap_pfault(&frame, FALSE, eva); 443 goto out; 444 445 case T_DNA: 446#if NNPX > 0 447 /* 448 * The kernel is apparently using npx for copying. 449 * XXX this should be fatal unless the kernel has 450 * registered such use. 451 */ 452 if (npxdna()) 453 goto out; 454#endif 455 break; 456 457 /* 458 * The following two traps can happen in 459 * vm86 mode, and, if so, we want to handle 460 * them specially. 461 */ 462 case T_PROTFLT: /* general protection fault */ 463 case T_STKFLT: /* stack fault */ 464 if (frame.tf_eflags & PSL_VM) { 465 i = vm86_emulate((struct vm86frame *)&frame); 466 if (i != 0) 467 /* 468 * returns to original process 469 */ 470 vm86_trap((struct vm86frame *)&frame); 471 goto out; 472 } 473 if (type == T_STKFLT) 474 break; 475 476 /* FALL THROUGH */ 477 478 case T_SEGNPFLT: /* segment not present fault */ 479 if (in_vm86call) 480 break; 481 482 if (intr_nesting_level != 0) 483 break; 484 485 /* 486 * Invalid %fs's and %gs's can be created using 487 * procfs or PT_SETREGS or by invalidating the 488 * underlying LDT entry. This causes a fault 489 * in kernel mode when the kernel attempts to 490 * switch contexts. Lose the bad context 491 * (XXX) so that we can continue, and generate 492 * a signal. 493 */ 494 if (frame.tf_eip == (int)cpu_switch_load_gs) { 495 curpcb->pcb_gs = 0; 496 psignal(p, SIGBUS); 497 goto out; 498 } 499 500 /* 501 * Invalid segment selectors and out of bounds 502 * %eip's and %esp's can be set up in user mode. 503 * This causes a fault in kernel mode when the 504 * kernel tries to return to user mode. We want 505 * to get this fault so that we can fix the 506 * problem here and not have to check all the 507 * selectors and pointers when the user changes 508 * them. 509 */ 510 if (frame.tf_eip == (int)doreti_iret) { 511 frame.tf_eip = (int)doreti_iret_fault; 512 goto out; 513 } 514 if (frame.tf_eip == (int)doreti_popl_ds) { 515 frame.tf_eip = (int)doreti_popl_ds_fault; 516 goto out; 517 } 518 if (frame.tf_eip == (int)doreti_popl_es) { 519 frame.tf_eip = (int)doreti_popl_es_fault; 520 goto out; 521 } 522 if (frame.tf_eip == (int)doreti_popl_fs) { 523 frame.tf_eip = (int)doreti_popl_fs_fault; 524 goto out; 525 } 526 if (curpcb && curpcb->pcb_onfault) { 527 frame.tf_eip = (int)curpcb->pcb_onfault; 528 goto out; 529 } 530 break; 531 532 case T_TSSFLT: 533 /* 534 * PSL_NT can be set in user mode and isn't cleared 535 * automatically when the kernel is entered. This 536 * causes a TSS fault when the kernel attempts to 537 * `iret' because the TSS link is uninitialized. We 538 * want to get this fault so that we can fix the 539 * problem here and not every time the kernel is 540 * entered. 541 */ 542 if (frame.tf_eflags & PSL_NT) { 543 frame.tf_eflags &= ~PSL_NT; 544 goto out; 545 } 546 break; 547 548 case T_TRCTRAP: /* trace trap */ 549 if (frame.tf_eip == (int)IDTVEC(syscall)) { 550 /* 551 * We've just entered system mode via the 552 * syscall lcall. Continue single stepping 553 * silently until the syscall handler has 554 * saved the flags. 555 */ 556 goto out; 557 } 558 if (frame.tf_eip == (int)IDTVEC(syscall) + 1) { 559 /* 560 * The syscall handler has now saved the 561 * flags. Stop single stepping it. 562 */ 563 frame.tf_eflags &= ~PSL_T; 564 goto out; 565 } 566 /* 567 * Ignore debug register trace traps due to 568 * accesses in the user's address space, which 569 * can happen under several conditions such as 570 * if a user sets a watchpoint on a buffer and 571 * then passes that buffer to a system call. 572 * We still want to get TRCTRAPS for addresses 573 * in kernel space because that is useful when 574 * debugging the kernel. 575 */ 576 if (user_dbreg_trap() && !in_vm86call) { 577 /* 578 * Reset breakpoint bits because the 579 * processor doesn't 580 */ 581 load_dr6(rdr6() & 0xfffffff0); 582 goto out; 583 } 584 /* 585 * Fall through (TRCTRAP kernel mode, kernel address) 586 */ 587 case T_BPTFLT: 588 /* 589 * If DDB is enabled, let it handle the debugger trap. 590 * Otherwise, debugger traps "can't happen". 591 */ 592#ifdef DDB 593 if (kdb_trap (type, 0, &frame)) 594 goto out; 595#endif 596 break; 597 598#if NISA > 0 599 case T_NMI: 600#ifdef POWERFAIL_NMI 601 if (time_second - lastalert > 10) { 602 log(LOG_WARNING, "NMI: power fail\n"); 603 sysbeep(TIMER_FREQ/880, hz); 604 lastalert = time_second; 605 } 606 goto out; 607#else /* !POWERFAIL_NMI */ 608 /* machine/parity/power fail/"kitchen sink" faults */ 609 if (isa_nmi(code) == 0) { 610#ifdef DDB 611 /* 612 * NMI can be hooked up to a pushbutton 613 * for debugging. 614 */ 615 if (ddb_on_nmi) { 616 printf ("NMI ... going to debugger\n"); 617 kdb_trap (type, 0, &frame); 618 } 619#endif /* DDB */ 620 goto out; 621 } else if (panic_on_nmi == 0) 622 goto out; 623 /* FALL THROUGH */ 624#endif /* POWERFAIL_NMI */ 625#endif /* NISA > 0 */ 626 } 627 628 trap_fatal(&frame, eva); 629 goto out; 630 } 631 632 /* Translate fault for emulators (e.g. Linux) */ 633 if (*p->p_sysent->sv_transtrap) 634 i = (*p->p_sysent->sv_transtrap)(i, type); 635 636 trapsignal(p, i, ucode); 637 638#ifdef DEBUG 639 if (type <= MAX_TRAP_MSG) { 640 uprintf("fatal process exception: %s", 641 trap_msg[type]); 642 if ((type == T_PAGEFLT) || (type == T_PROTFLT)) 643 uprintf(", fault VA = 0x%lx", (u_long)eva); 644 uprintf("\n"); 645 } 646#endif 647 648user: 649 userret(p, &frame, sticks, 1); 650out: 651 mtx_exit(&Giant, MTX_DEF); 652} 653 654#ifdef notyet 655/* 656 * This version doesn't allow a page fault to user space while 657 * in the kernel. The rest of the kernel needs to be made "safe" 658 * before this can be used. I think the only things remaining 659 * to be made safe are the iBCS2 code and the process tracing/ 660 * debugging code. 661 */ 662static int 663trap_pfault(frame, usermode, eva) 664 struct trapframe *frame; 665 int usermode; 666 vm_offset_t eva; 667{ 668 vm_offset_t va; 669 struct vmspace *vm = NULL; 670 vm_map_t map = 0; 671 int rv = 0; 672 vm_prot_t ftype; 673 struct proc *p = curproc; 674 675 if (frame->tf_err & PGEX_W) 676 ftype = VM_PROT_WRITE; 677 else 678 ftype = VM_PROT_READ; 679 680 va = trunc_page(eva); 681 if (va < VM_MIN_KERNEL_ADDRESS) { 682 vm_offset_t v; 683 vm_page_t mpte; 684 685 if (p == NULL || 686 (!usermode && va < VM_MAXUSER_ADDRESS && 687 (intr_nesting_level != 0 || curpcb == NULL || 688 curpcb->pcb_onfault == NULL))) { 689 trap_fatal(frame, eva); 690 return (-1); 691 } 692 693 /* 694 * This is a fault on non-kernel virtual memory. 695 * vm is initialized above to NULL. If curproc is NULL 696 * or curproc->p_vmspace is NULL the fault is fatal. 697 */ 698 vm = p->p_vmspace; 699 if (vm == NULL) 700 goto nogo; 701 702 map = &vm->vm_map; 703 704 /* 705 * Keep swapout from messing with us during this 706 * critical time. 707 */ 708 ++p->p_lock; 709 710 /* 711 * Grow the stack if necessary 712 */ 713 /* grow_stack returns false only if va falls into 714 * a growable stack region and the stack growth 715 * fails. It returns true if va was not within 716 * a growable stack region, or if the stack 717 * growth succeeded. 718 */ 719 if (!grow_stack (p, va)) { 720 rv = KERN_FAILURE; 721 --p->p_lock; 722 goto nogo; 723 } 724 725 /* Fault in the user page: */ 726 rv = vm_fault(map, va, ftype, 727 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY 728 : VM_FAULT_NORMAL); 729 730 --p->p_lock; 731 } else { 732 /* 733 * Don't allow user-mode faults in kernel address space. 734 */ 735 if (usermode) 736 goto nogo; 737 738 /* 739 * Since we know that kernel virtual address addresses 740 * always have pte pages mapped, we just have to fault 741 * the page. 742 */ 743 rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL); 744 } 745 746 if (rv == KERN_SUCCESS) 747 return (0); 748nogo: 749 if (!usermode) { 750 if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) { 751 frame->tf_eip = (int)curpcb->pcb_onfault; 752 return (0); 753 } 754 trap_fatal(frame, eva); 755 return (-1); 756 } 757 758 /* kludge to pass faulting virtual address to sendsig */ 759 frame->tf_err = eva; 760 761 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); 762} 763#endif 764 765int 766trap_pfault(frame, usermode, eva) 767 struct trapframe *frame; 768 int usermode; 769 vm_offset_t eva; 770{ 771 vm_offset_t va; 772 struct vmspace *vm = NULL; 773 vm_map_t map = 0; 774 int rv = 0; 775 vm_prot_t ftype; 776 struct proc *p = curproc; 777 778 va = trunc_page(eva); 779 if (va >= KERNBASE) { 780 /* 781 * Don't allow user-mode faults in kernel address space. 782 * An exception: if the faulting address is the invalid 783 * instruction entry in the IDT, then the Intel Pentium 784 * F00F bug workaround was triggered, and we need to 785 * treat it is as an illegal instruction, and not a page 786 * fault. 787 */ 788#if defined(I586_CPU) && !defined(NO_F00F_HACK) 789 if ((eva == (unsigned int)&idt[6]) && has_f00f_bug) 790 return -2; 791#endif 792 if (usermode) 793 goto nogo; 794 795 map = kernel_map; 796 } else { 797 /* 798 * This is a fault on non-kernel virtual memory. 799 * vm is initialized above to NULL. If curproc is NULL 800 * or curproc->p_vmspace is NULL the fault is fatal. 801 */ 802 if (p != NULL) 803 vm = p->p_vmspace; 804 805 if (vm == NULL) 806 goto nogo; 807 808 map = &vm->vm_map; 809 } 810 811 if (frame->tf_err & PGEX_W) 812 ftype = VM_PROT_WRITE; 813 else 814 ftype = VM_PROT_READ; 815 816 if (map != kernel_map) { 817 /* 818 * Keep swapout from messing with us during this 819 * critical time. 820 */ 821 ++p->p_lock; 822 823 /* 824 * Grow the stack if necessary 825 */ 826 /* grow_stack returns false only if va falls into 827 * a growable stack region and the stack growth 828 * fails. It returns true if va was not within 829 * a growable stack region, or if the stack 830 * growth succeeded. 831 */ 832 if (!grow_stack (p, va)) { 833 rv = KERN_FAILURE; 834 --p->p_lock; 835 goto nogo; 836 } 837 838 /* Fault in the user page: */ 839 rv = vm_fault(map, va, ftype, 840 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY 841 : VM_FAULT_NORMAL); 842 843 --p->p_lock; 844 } else { 845 /* 846 * Don't have to worry about process locking or stacks in the kernel. 847 */ 848 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); 849 } 850 851 if (rv == KERN_SUCCESS) 852 return (0); 853nogo: 854 if (!usermode) { 855 if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) { 856 frame->tf_eip = (int)curpcb->pcb_onfault; 857 return (0); 858 } 859 trap_fatal(frame, eva); 860 return (-1); 861 } 862 863 /* kludge to pass faulting virtual address to sendsig */ 864 frame->tf_err = eva; 865 866 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); 867} 868 869static void 870trap_fatal(frame, eva) 871 struct trapframe *frame; 872 vm_offset_t eva; 873{ 874 int code, type, ss, esp; 875 struct soft_segment_descriptor softseg; 876 877 code = frame->tf_err; 878 type = frame->tf_trapno; 879 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg); 880 881 if (type <= MAX_TRAP_MSG) 882 printf("\n\nFatal trap %d: %s while in %s mode\n", 883 type, trap_msg[type], 884 frame->tf_eflags & PSL_VM ? "vm86" : 885 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel"); 886#ifdef SMP 887 /* two seperate prints in case of a trap on an unmapped page */ 888 printf("cpuid = %d; ", cpuid); 889 printf("lapic.id = %08x\n", lapic.id); 890#endif 891 if (type == T_PAGEFLT) { 892 printf("fault virtual address = 0x%x\n", eva); 893 printf("fault code = %s %s, %s\n", 894 code & PGEX_U ? "user" : "supervisor", 895 code & PGEX_W ? "write" : "read", 896 code & PGEX_P ? "protection violation" : "page not present"); 897 } 898 printf("instruction pointer = 0x%x:0x%x\n", 899 frame->tf_cs & 0xffff, frame->tf_eip); 900 if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) { 901 ss = frame->tf_ss & 0xffff; 902 esp = frame->tf_esp; 903 } else { 904 ss = GSEL(GDATA_SEL, SEL_KPL); 905 esp = (int)&frame->tf_esp; 906 } 907 printf("stack pointer = 0x%x:0x%x\n", ss, esp); 908 printf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp); 909 printf("code segment = base 0x%x, limit 0x%x, type 0x%x\n", 910 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type); 911 printf(" = DPL %d, pres %d, def32 %d, gran %d\n", 912 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32, 913 softseg.ssd_gran); 914 printf("processor eflags = "); 915 if (frame->tf_eflags & PSL_T) 916 printf("trace trap, "); 917 if (frame->tf_eflags & PSL_I) 918 printf("interrupt enabled, "); 919 if (frame->tf_eflags & PSL_NT) 920 printf("nested task, "); 921 if (frame->tf_eflags & PSL_RF) 922 printf("resume, "); 923 if (frame->tf_eflags & PSL_VM) 924 printf("vm86, "); 925 printf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12); 926 printf("current process = "); 927 if (curproc) { 928 printf("%lu (%s)\n", 929 (u_long)curproc->p_pid, curproc->p_comm ? 930 curproc->p_comm : ""); 931 } else { 932 printf("Idle\n"); 933 } 934 935#ifdef KDB 936 if (kdb_trap(&psl)) 937 return; 938#endif 939#ifdef DDB 940 if ((debugger_on_panic || db_active) && kdb_trap(type, 0, frame)) 941 return; 942#endif 943 printf("trap number = %d\n", type); 944 if (type <= MAX_TRAP_MSG) 945 panic(trap_msg[type]); 946 else 947 panic("unknown/reserved trap"); 948} 949 950/* 951 * Double fault handler. Called when a fault occurs while writing 952 * a frame for a trap/exception onto the stack. This usually occurs 953 * when the stack overflows (such is the case with infinite recursion, 954 * for example). 955 * 956 * XXX Note that the current PTD gets replaced by IdlePTD when the 957 * task switch occurs. This means that the stack that was active at 958 * the time of the double fault is not available at <kstack> unless 959 * the machine was idle when the double fault occurred. The downside 960 * of this is that "trace <ebp>" in ddb won't work. 961 */ 962void 963dblfault_handler() 964{ 965 printf("\nFatal double fault:\n"); 966 printf("eip = 0x%x\n", common_tss.tss_eip); 967 printf("esp = 0x%x\n", common_tss.tss_esp); 968 printf("ebp = 0x%x\n", common_tss.tss_ebp); 969#ifdef SMP 970 /* two seperate prints in case of a trap on an unmapped page */ 971 printf("cpuid = %d; ", cpuid); 972 printf("lapic.id = %08x\n", lapic.id); 973#endif 974 panic("double fault"); 975} 976 977/* 978 * Compensate for 386 brain damage (missing URKR). 979 * This is a little simpler than the pagefault handler in trap() because 980 * it the page tables have already been faulted in and high addresses 981 * are thrown out early for other reasons. 982 */ 983int trapwrite(addr) 984 unsigned addr; 985{ 986 struct proc *p; 987 vm_offset_t va; 988 struct vmspace *vm; 989 int rv; 990 991 va = trunc_page((vm_offset_t)addr); 992 /* 993 * XXX - MAX is END. Changed > to >= for temp. fix. 994 */ 995 if (va >= VM_MAXUSER_ADDRESS) 996 return (1); 997 998 p = curproc; 999 vm = p->p_vmspace; 1000 1001 ++p->p_lock; 1002 1003 if (!grow_stack (p, va)) { 1004 --p->p_lock; 1005 return (1); 1006 } 1007 1008 /* 1009 * fault the data page 1010 */ 1011 rv = vm_fault(&vm->vm_map, va, VM_PROT_WRITE, VM_FAULT_DIRTY); 1012 1013 --p->p_lock; 1014 1015 if (rv != KERN_SUCCESS) 1016 return 1; 1017 1018 return (0); 1019} 1020 1021/* 1022 * syscall2 - MP aware system call request C handler 1023 * 1024 * A system call is essentially treated as a trap except that the 1025 * MP lock is not held on entry or return. We are responsible for 1026 * obtaining the MP lock if necessary and for handling ASTs 1027 * (e.g. a task switch) prior to return. 1028 * 1029 * In general, only simple access and manipulation of curproc and 1030 * the current stack is allowed without having to hold MP lock. 1031 */ 1032void 1033syscall2(frame) 1034 struct trapframe frame; 1035{ 1036 caddr_t params; 1037 int i; 1038 struct sysent *callp; 1039 struct proc *p = curproc; 1040 u_quad_t sticks; 1041 int error; 1042 int narg; 1043 int args[8]; 1044 int have_giant = 0; 1045 u_int code; 1046 1047 atomic_add_int(&cnt.v_syscall, 1); 1048 1049#ifdef DIAGNOSTIC 1050 if (ISPL(frame.tf_cs) != SEL_UPL) { 1051 mtx_enter(&Giant, MTX_DEF); 1052 panic("syscall"); 1053 /* NOT REACHED */ 1054 } 1055#endif 1056 1057 /* 1058 * handle atomicy by looping since interrupts are enabled and the 1059 * MP lock is not held. 1060 */ 1061 sticks = ((volatile struct proc *)p)->p_sticks; 1062 while (sticks != ((volatile struct proc *)p)->p_sticks) 1063 sticks = ((volatile struct proc *)p)->p_sticks; 1064 1065 p->p_md.md_regs = &frame; 1066 params = (caddr_t)frame.tf_esp + sizeof(int); 1067 code = frame.tf_eax; 1068 1069 if (p->p_sysent->sv_prepsyscall) { 1070 /* 1071 * The prep code is not MP aware. 1072 */ 1073 mtx_enter(&Giant, MTX_DEF); 1074 (*p->p_sysent->sv_prepsyscall)(&frame, args, &code, ¶ms); 1075 mtx_exit(&Giant, MTX_DEF); 1076 } else { 1077 /* 1078 * Need to check if this is a 32 bit or 64 bit syscall. 1079 * fuword is MP aware. 1080 */ 1081 if (code == SYS_syscall) { 1082 /* 1083 * Code is first argument, followed by actual args. 1084 */ 1085 code = fuword(params); 1086 params += sizeof(int); 1087 } else if (code == SYS___syscall) { 1088 /* 1089 * Like syscall, but code is a quad, so as to maintain 1090 * quad alignment for the rest of the arguments. 1091 */ 1092 code = fuword(params); 1093 params += sizeof(quad_t); 1094 } 1095 } 1096 1097 if (p->p_sysent->sv_mask) 1098 code &= p->p_sysent->sv_mask; 1099 1100 if (code >= p->p_sysent->sv_size) 1101 callp = &p->p_sysent->sv_table[0]; 1102 else 1103 callp = &p->p_sysent->sv_table[code]; 1104 1105 narg = callp->sy_narg & SYF_ARGMASK; 1106 1107 /* 1108 * copyin is MP aware, but the tracing code is not 1109 */ 1110 if (params && (i = narg * sizeof(int)) && 1111 (error = copyin(params, (caddr_t)args, (u_int)i))) { 1112 mtx_enter(&Giant, MTX_DEF); 1113 have_giant = 1; 1114#ifdef KTRACE 1115 if (KTRPOINT(p, KTR_SYSCALL)) 1116 ktrsyscall(p->p_tracep, code, narg, args); 1117#endif 1118 goto bad; 1119 } 1120 1121 /* 1122 * Try to run the syscall without the MP lock if the syscall 1123 * is MP safe. We have to obtain the MP lock no matter what if 1124 * we are ktracing 1125 */ 1126 if ((callp->sy_narg & SYF_MPSAFE) == 0) { 1127 mtx_enter(&Giant, MTX_DEF); 1128 have_giant = 1; 1129 } 1130 1131#ifdef KTRACE 1132 if (KTRPOINT(p, KTR_SYSCALL)) { 1133 if (have_giant == 0) { 1134 mtx_enter(&Giant, MTX_DEF); 1135 have_giant = 1; 1136 } 1137 ktrsyscall(p->p_tracep, code, narg, args); 1138 } 1139#endif 1140 p->p_retval[0] = 0; 1141 p->p_retval[1] = frame.tf_edx; 1142 1143 STOPEVENT(p, S_SCE, narg); /* MP aware */ 1144 1145 error = (*callp->sy_call)(p, args); 1146 1147 /* 1148 * MP SAFE (we may or may not have the MP lock at this point) 1149 */ 1150 switch (error) { 1151 case 0: 1152 /* 1153 * Reinitialize proc pointer `p' as it may be different 1154 * if this is a child returning from fork syscall. 1155 */ 1156 p = curproc; 1157 frame.tf_eax = p->p_retval[0]; 1158 frame.tf_edx = p->p_retval[1]; 1159 frame.tf_eflags &= ~PSL_C; 1160 break; 1161 1162 case ERESTART: 1163 /* 1164 * Reconstruct pc, assuming lcall $X,y is 7 bytes, 1165 * int 0x80 is 2 bytes. We saved this in tf_err. 1166 */ 1167 frame.tf_eip -= frame.tf_err; 1168 break; 1169 1170 case EJUSTRETURN: 1171 break; 1172 1173 default: 1174bad: 1175 if (p->p_sysent->sv_errsize) { 1176 if (error >= p->p_sysent->sv_errsize) 1177 error = -1; /* XXX */ 1178 else 1179 error = p->p_sysent->sv_errtbl[error]; 1180 } 1181 frame.tf_eax = error; 1182 frame.tf_eflags |= PSL_C; 1183 break; 1184 } 1185 1186 /* 1187 * Traced syscall. trapsignal() is not MP aware. 1188 */ 1189 if ((frame.tf_eflags & PSL_T) && !(frame.tf_eflags & PSL_VM)) { 1190 if (have_giant == 0) { 1191 mtx_enter(&Giant, MTX_DEF); 1192 have_giant = 1; 1193 } 1194 frame.tf_eflags &= ~PSL_T; 1195 trapsignal(p, SIGTRAP, 0); 1196 } 1197 1198 /* 1199 * Handle reschedule and other end-of-syscall issues 1200 */ 1201 have_giant = userret(p, &frame, sticks, have_giant); 1202 1203#ifdef KTRACE 1204 if (KTRPOINT(p, KTR_SYSRET)) { 1205 if (have_giant == 0) { 1206 mtx_enter(&Giant, MTX_DEF); 1207 have_giant = 1; 1208 } 1209 ktrsysret(p->p_tracep, code, error, p->p_retval[0]); 1210 } 1211#endif 1212 1213 /* 1214 * This works because errno is findable through the 1215 * register set. If we ever support an emulation where this 1216 * is not the case, this code will need to be revisited. 1217 */ 1218 STOPEVENT(p, S_SCX, code); 1219 1220 /* 1221 * Release the MP lock if we had to get it 1222 */ 1223 if (have_giant) 1224 mtx_exit(&Giant, MTX_DEF); 1225 1226 mtx_assert(&sched_lock, MA_NOTOWNED); 1227 mtx_assert(&Giant, MA_NOTOWNED); 1228#ifdef WITNESS 1229 if (witness_list(p)) { 1230 panic("system call %s returning with mutex(s) held\n", 1231 syscallnames[code]); 1232 } 1233#endif 1234} 1235 1236void 1237ast(frame) 1238 struct trapframe frame; 1239{ 1240 struct proc *p = CURPROC; 1241 u_quad_t sticks; 1242 1243 /* 1244 * handle atomicy by looping since interrupts are enabled and the 1245 * MP lock is not held. 1246 */ 1247 sticks = ((volatile struct proc *)p)->p_sticks; 1248 while (sticks != ((volatile struct proc *)p)->p_sticks) 1249 sticks = ((volatile struct proc *)p)->p_sticks; 1250 1251 astoff(); 1252 atomic_add_int(&cnt.v_soft, 1); 1253 if (p->p_flag & P_OWEUPC) { 1254 mtx_enter(&Giant, MTX_DEF); 1255 p->p_flag &= ~P_OWEUPC; 1256 addupc_task(p, p->p_stats->p_prof.pr_addr, 1257 p->p_stats->p_prof.pr_ticks); 1258 } 1259 if (p->p_flag & P_ALRMPEND) { 1260 if (!mtx_owned(&Giant)) 1261 mtx_enter(&Giant, MTX_DEF); 1262 p->p_flag &= ~P_ALRMPEND; 1263 psignal(p, SIGVTALRM); 1264 } 1265 if (p->p_flag & P_PROFPEND) { 1266 if (!mtx_owned(&Giant)) 1267 mtx_enter(&Giant, MTX_DEF); 1268 p->p_flag &= ~P_PROFPEND; 1269 psignal(p, SIGPROF); 1270 } 1271 if (userret(p, &frame, sticks, mtx_owned(&Giant)) != 0) 1272 mtx_exit(&Giant, MTX_DEF); 1273} 1274 1275/* 1276 * Simplified back end of syscall(), used when returning from fork() 1277 * directly into user mode. Giant is not held on entry, and must not 1278 * be held on return. 1279 */ 1280void 1281fork_return(p, frame) 1282 struct proc *p; 1283 struct trapframe frame; 1284{ 1285 int have_giant; 1286 1287 frame.tf_eax = 0; /* Child returns zero */ 1288 frame.tf_eflags &= ~PSL_C; /* success */ 1289 frame.tf_edx = 1; 1290 1291 have_giant = userret(p, &frame, 0, mtx_owned(&Giant)); 1292#ifdef KTRACE 1293 if (KTRPOINT(p, KTR_SYSRET)) { 1294 if (have_giant == 0) { 1295 mtx_enter(&Giant, MTX_DEF); 1296 have_giant = 1; 1297 } 1298 ktrsysret(p->p_tracep, SYS_fork, 0, 0); 1299 } 1300#endif 1301 if (have_giant) 1302 mtx_exit(&Giant, MTX_DEF); 1303} 1304