subr_syscall.c revision 69987
1/*- 2 * Copyright (C) 1994, David Greenman 3 * Copyright (c) 1990, 1993 4 * The Regents of the University of California. All rights reserved. 5 * 6 * This code is derived from software contributed to Berkeley by 7 * the University of Utah, and William Jolitz. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by the University of 20 * California, Berkeley and its contributors. 21 * 4. Neither the name of the University nor the names of its contributors 22 * may be used to endorse or promote products derived from this software 23 * without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * from: @(#)trap.c 7.4 (Berkeley) 5/13/91 38 * $FreeBSD: head/sys/kern/subr_trap.c 69987 2000-12-13 18:57:15Z jhb $ 39 */ 40 41/* 42 * 386 Trap and System call handling 43 */ 44 45#include "opt_cpu.h" 46#include "opt_ddb.h" 47#include "opt_ktrace.h" 48#include "opt_clock.h" 49#include "opt_trap.h" 50 51#include <sys/param.h> 52#include <sys/bus.h> 53#include <sys/systm.h> 54#include <sys/proc.h> 55#include <sys/pioctl.h> 56#include <sys/ipl.h> 57#include <sys/kernel.h> 58#include <sys/ktr.h> 59#include <sys/mutex.h> 60#include <sys/resourcevar.h> 61#include <sys/signalvar.h> 62#include <sys/syscall.h> 63#include <sys/sysctl.h> 64#include <sys/sysent.h> 65#include <sys/uio.h> 66#include <sys/vmmeter.h> 67#ifdef KTRACE 68#include <sys/ktrace.h> 69#endif 70 71#include <vm/vm.h> 72#include <vm/vm_param.h> 73#include <sys/lock.h> 74#include <vm/pmap.h> 75#include <vm/vm_kern.h> 76#include <vm/vm_map.h> 77#include <vm/vm_page.h> 78#include <vm/vm_extern.h> 79 80#include <machine/cpu.h> 81#include <machine/md_var.h> 82#include <machine/pcb.h> 83#ifdef SMP 84#include <machine/smp.h> 85#endif 86#include <machine/tss.h> 87 88#include <i386/isa/icu.h> 89#include <i386/isa/intr_machdep.h> 90 91#ifdef POWERFAIL_NMI 92#include <sys/syslog.h> 93#include <machine/clock.h> 94#endif 95 96#include <machine/vm86.h> 97 98#include <ddb/ddb.h> 99 100#include "isa.h" 101#include "npx.h" 102 103#include <sys/sysctl.h> 104 105int (*pmath_emulate) __P((struct trapframe *)); 106 107extern void trap __P((struct trapframe frame)); 108extern int trapwrite __P((unsigned addr)); 109extern void syscall2 __P((struct trapframe frame)); 110extern void ast __P((struct trapframe frame)); 111 112static int trap_pfault __P((struct trapframe *, int, vm_offset_t)); 113static void trap_fatal __P((struct trapframe *, vm_offset_t)); 114void dblfault_handler __P((void)); 115 116extern inthand_t IDTVEC(syscall); 117 118#define MAX_TRAP_MSG 28 119static char *trap_msg[] = { 120 "", /* 0 unused */ 121 "privileged instruction fault", /* 1 T_PRIVINFLT */ 122 "", /* 2 unused */ 123 "breakpoint instruction fault", /* 3 T_BPTFLT */ 124 "", /* 4 unused */ 125 "", /* 5 unused */ 126 "arithmetic trap", /* 6 T_ARITHTRAP */ 127 "system forced exception", /* 7 T_ASTFLT */ 128 "", /* 8 unused */ 129 "general protection fault", /* 9 T_PROTFLT */ 130 "trace trap", /* 10 T_TRCTRAP */ 131 "", /* 11 unused */ 132 "page fault", /* 12 T_PAGEFLT */ 133 "", /* 13 unused */ 134 "alignment fault", /* 14 T_ALIGNFLT */ 135 "", /* 15 unused */ 136 "", /* 16 unused */ 137 "", /* 17 unused */ 138 "integer divide fault", /* 18 T_DIVIDE */ 139 "non-maskable interrupt trap", /* 19 T_NMI */ 140 "overflow trap", /* 20 T_OFLOW */ 141 "FPU bounds check fault", /* 21 T_BOUND */ 142 "FPU device not available", /* 22 T_DNA */ 143 "double fault", /* 23 T_DOUBLEFLT */ 144 "FPU operand fetch fault", /* 24 T_FPOPFLT */ 145 "invalid TSS fault", /* 25 T_TSSFLT */ 146 "segment not present fault", /* 26 T_SEGNPFLT */ 147 "stack fault", /* 27 T_STKFLT */ 148 "machine check trap", /* 28 T_MCHK */ 149}; 150 151static __inline int userret __P((struct proc *p, struct trapframe *frame, 152 u_quad_t oticks, int have_giant)); 153 154#if defined(I586_CPU) && !defined(NO_F00F_HACK) 155extern int has_f00f_bug; 156#endif 157 158#ifdef DDB 159static int ddb_on_nmi = 1; 160SYSCTL_INT(_machdep, OID_AUTO, ddb_on_nmi, CTLFLAG_RW, 161 &ddb_on_nmi, 0, "Go to DDB on NMI"); 162#endif 163static int panic_on_nmi = 1; 164SYSCTL_INT(_machdep, OID_AUTO, panic_on_nmi, CTLFLAG_RW, 165 &panic_on_nmi, 0, "Panic on NMI"); 166 167#ifdef WITNESS 168extern char *syscallnames[]; 169#endif 170 171static __inline int 172userret(p, frame, oticks, have_giant) 173 struct proc *p; 174 struct trapframe *frame; 175 u_quad_t oticks; 176 int have_giant; 177{ 178 int sig, s; 179 180 while ((sig = CURSIG(p)) != 0) { 181 if (have_giant == 0) { 182 mtx_enter(&Giant, MTX_DEF); 183 have_giant = 1; 184 } 185 postsig(sig); 186 } 187 188 p->p_priority = p->p_usrpri; 189 if (resched_wanted()) { 190 /* 191 * Since we are curproc, clock will normally just change 192 * our priority without moving us from one queue to another 193 * (since the running process is not on a queue.) 194 * If that happened after we setrunqueue ourselves but before we 195 * mi_switch()'ed, we might not be on the queue indicated by 196 * our priority. 197 */ 198 s = splhigh(); 199 mtx_enter(&sched_lock, MTX_SPIN); 200 DROP_GIANT_NOSWITCH(); 201 setrunqueue(p); 202 p->p_stats->p_ru.ru_nivcsw++; 203 mi_switch(); 204 mtx_exit(&sched_lock, MTX_SPIN); 205 PICKUP_GIANT(); 206 splx(s); 207 while ((sig = CURSIG(p)) != 0) { 208 if (have_giant == 0) { 209 mtx_enter(&Giant, MTX_DEF); 210 have_giant = 1; 211 } 212 postsig(sig); 213 } 214 } 215 /* 216 * Charge system time if profiling. 217 */ 218 if (p->p_flag & P_PROFIL) { 219 if (have_giant == 0) { 220 mtx_enter(&Giant, MTX_DEF); 221 have_giant = 1; 222 } 223 addupc_task(p, frame->tf_eip, 224 (u_int)(p->p_sticks - oticks) * psratio); 225 } 226 curpriority = p->p_priority; 227 return(have_giant); 228} 229 230/* 231 * Exception, fault, and trap interface to the FreeBSD kernel. 232 * This common code is called from assembly language IDT gate entry 233 * routines that prepare a suitable stack frame, and restore this 234 * frame after the exception has been processed. 235 */ 236 237void 238trap(frame) 239 struct trapframe frame; 240{ 241 struct proc *p = curproc; 242 u_quad_t sticks = 0; 243 int i = 0, ucode = 0, type, code; 244 vm_offset_t eva; 245#ifdef POWERFAIL_NMI 246 static int lastalert = 0; 247#endif 248 249 atomic_add_int(&cnt.v_trap, 1); 250 251 if ((frame.tf_eflags & PSL_I) == 0) { 252 /* 253 * Buggy application or kernel code has disabled 254 * interrupts and then trapped. Enabling interrupts 255 * now is wrong, but it is better than running with 256 * interrupts disabled until they are accidentally 257 * enabled later. XXX Consider whether is this still 258 * correct. 259 */ 260 type = frame.tf_trapno; 261 if (ISPL(frame.tf_cs) == SEL_UPL || (frame.tf_eflags & PSL_VM)) 262 printf( 263 "pid %ld (%s): trap %d with interrupts disabled\n", 264 (long)curproc->p_pid, curproc->p_comm, type); 265 else if (type != T_BPTFLT && type != T_TRCTRAP) 266 /* 267 * XXX not quite right, since this may be for a 268 * multiple fault in user mode. 269 */ 270 printf("kernel trap %d with interrupts disabled\n", 271 type); 272 enable_intr(); 273 } 274 275 eva = 0; 276 if (frame.tf_trapno == T_PAGEFLT) { 277 /* 278 * For some Cyrix CPUs, %cr2 is clobbered by 279 * interrupts. This problem is worked around by using 280 * an interrupt gate for the pagefault handler. We 281 * are finally ready to read %cr2 and then must 282 * reenable interrupts. 283 */ 284 eva = rcr2(); 285 enable_intr(); 286 } 287 288 mtx_enter(&Giant, MTX_DEF); 289 290#if defined(I586_CPU) && !defined(NO_F00F_HACK) 291restart: 292#endif 293 294 type = frame.tf_trapno; 295 code = frame.tf_err; 296 297 if ((ISPL(frame.tf_cs) == SEL_UPL) || 298 ((frame.tf_eflags & PSL_VM) && !in_vm86call)) { 299 /* user trap */ 300 301 sticks = p->p_sticks; 302 p->p_md.md_regs = &frame; 303 304 switch (type) { 305 case T_PRIVINFLT: /* privileged instruction fault */ 306 ucode = type; 307 i = SIGILL; 308 break; 309 310 case T_BPTFLT: /* bpt instruction fault */ 311 case T_TRCTRAP: /* trace trap */ 312 frame.tf_eflags &= ~PSL_T; 313 i = SIGTRAP; 314 break; 315 316 case T_ARITHTRAP: /* arithmetic trap */ 317 ucode = code; 318 i = SIGFPE; 319 break; 320 321 /* 322 * The following two traps can happen in 323 * vm86 mode, and, if so, we want to handle 324 * them specially. 325 */ 326 case T_PROTFLT: /* general protection fault */ 327 case T_STKFLT: /* stack fault */ 328 if (frame.tf_eflags & PSL_VM) { 329 i = vm86_emulate((struct vm86frame *)&frame); 330 if (i == 0) 331 goto user; 332 break; 333 } 334 /* FALL THROUGH */ 335 336 case T_SEGNPFLT: /* segment not present fault */ 337 case T_TSSFLT: /* invalid TSS fault */ 338 case T_DOUBLEFLT: /* double fault */ 339 default: 340 ucode = code + BUS_SEGM_FAULT ; 341 i = SIGBUS; 342 break; 343 344 case T_PAGEFLT: /* page fault */ 345 i = trap_pfault(&frame, TRUE, eva); 346#if defined(I586_CPU) && !defined(NO_F00F_HACK) 347 if (i == -2) { 348 /* 349 * f00f hack workaround has triggered, treat 350 * as illegal instruction not page fault. 351 */ 352 frame.tf_trapno = T_PRIVINFLT; 353 goto restart; 354 } 355#endif 356 if (i == -1) 357 goto out; 358 if (i == 0) 359 goto user; 360 361 ucode = T_PAGEFLT; 362 break; 363 364 case T_DIVIDE: /* integer divide fault */ 365 ucode = FPE_INTDIV; 366 i = SIGFPE; 367 break; 368 369#if NISA > 0 370 case T_NMI: 371#ifdef POWERFAIL_NMI 372#ifndef TIMER_FREQ 373# define TIMER_FREQ 1193182 374#endif 375 if (time_second - lastalert > 10) { 376 log(LOG_WARNING, "NMI: power fail\n"); 377 sysbeep(TIMER_FREQ/880, hz); 378 lastalert = time_second; 379 } 380 goto out; 381#else /* !POWERFAIL_NMI */ 382 /* machine/parity/power fail/"kitchen sink" faults */ 383 if (isa_nmi(code) == 0) { 384#ifdef DDB 385 /* 386 * NMI can be hooked up to a pushbutton 387 * for debugging. 388 */ 389 if (ddb_on_nmi) { 390 printf ("NMI ... going to debugger\n"); 391 kdb_trap (type, 0, &frame); 392 } 393#endif /* DDB */ 394 goto out; 395 } else if (panic_on_nmi) 396 panic("NMI indicates hardware failure"); 397 break; 398#endif /* POWERFAIL_NMI */ 399#endif /* NISA > 0 */ 400 401 case T_OFLOW: /* integer overflow fault */ 402 ucode = FPE_INTOVF; 403 i = SIGFPE; 404 break; 405 406 case T_BOUND: /* bounds check fault */ 407 ucode = FPE_FLTSUB; 408 i = SIGFPE; 409 break; 410 411 case T_DNA: 412#if NNPX > 0 413 /* transparent fault (due to context switch "late") */ 414 if (npxdna()) 415 goto out; 416#endif 417 if (!pmath_emulate) { 418 i = SIGFPE; 419 ucode = FPE_FPU_NP_TRAP; 420 break; 421 } 422 i = (*pmath_emulate)(&frame); 423 if (i == 0) { 424 if (!(frame.tf_eflags & PSL_T)) 425 goto out; 426 frame.tf_eflags &= ~PSL_T; 427 i = SIGTRAP; 428 } 429 /* else ucode = emulator_only_knows() XXX */ 430 break; 431 432 case T_FPOPFLT: /* FPU operand fetch fault */ 433 ucode = T_FPOPFLT; 434 i = SIGILL; 435 break; 436 } 437 } else { 438 /* kernel trap */ 439 440 switch (type) { 441 case T_PAGEFLT: /* page fault */ 442 (void) trap_pfault(&frame, FALSE, eva); 443 goto out; 444 445 case T_DNA: 446#if NNPX > 0 447 /* 448 * The kernel is apparently using npx for copying. 449 * XXX this should be fatal unless the kernel has 450 * registered such use. 451 */ 452 if (npxdna()) 453 goto out; 454#endif 455 break; 456 457 /* 458 * The following two traps can happen in 459 * vm86 mode, and, if so, we want to handle 460 * them specially. 461 */ 462 case T_PROTFLT: /* general protection fault */ 463 case T_STKFLT: /* stack fault */ 464 if (frame.tf_eflags & PSL_VM) { 465 i = vm86_emulate((struct vm86frame *)&frame); 466 if (i != 0) 467 /* 468 * returns to original process 469 */ 470 mtx_exit(&Giant, MTX_DEF); 471 vm86_trap((struct vm86frame *)&frame); 472 goto out; 473 } 474 if (type == T_STKFLT) 475 break; 476 477 /* FALL THROUGH */ 478 479 case T_SEGNPFLT: /* segment not present fault */ 480 if (in_vm86call) 481 break; 482 483 if (intr_nesting_level != 0) 484 break; 485 486 /* 487 * Invalid %fs's and %gs's can be created using 488 * procfs or PT_SETREGS or by invalidating the 489 * underlying LDT entry. This causes a fault 490 * in kernel mode when the kernel attempts to 491 * switch contexts. Lose the bad context 492 * (XXX) so that we can continue, and generate 493 * a signal. 494 */ 495 if (frame.tf_eip == (int)cpu_switch_load_gs) { 496 curpcb->pcb_gs = 0; 497 psignal(p, SIGBUS); 498 goto out; 499 } 500 501 /* 502 * Invalid segment selectors and out of bounds 503 * %eip's and %esp's can be set up in user mode. 504 * This causes a fault in kernel mode when the 505 * kernel tries to return to user mode. We want 506 * to get this fault so that we can fix the 507 * problem here and not have to check all the 508 * selectors and pointers when the user changes 509 * them. 510 */ 511 if (frame.tf_eip == (int)doreti_iret) { 512 frame.tf_eip = (int)doreti_iret_fault; 513 goto out; 514 } 515 if (frame.tf_eip == (int)doreti_popl_ds) { 516 frame.tf_eip = (int)doreti_popl_ds_fault; 517 goto out; 518 } 519 if (frame.tf_eip == (int)doreti_popl_es) { 520 frame.tf_eip = (int)doreti_popl_es_fault; 521 goto out; 522 } 523 if (frame.tf_eip == (int)doreti_popl_fs) { 524 frame.tf_eip = (int)doreti_popl_fs_fault; 525 goto out; 526 } 527 if (curpcb && curpcb->pcb_onfault) { 528 frame.tf_eip = (int)curpcb->pcb_onfault; 529 goto out; 530 } 531 break; 532 533 case T_TSSFLT: 534 /* 535 * PSL_NT can be set in user mode and isn't cleared 536 * automatically when the kernel is entered. This 537 * causes a TSS fault when the kernel attempts to 538 * `iret' because the TSS link is uninitialized. We 539 * want to get this fault so that we can fix the 540 * problem here and not every time the kernel is 541 * entered. 542 */ 543 if (frame.tf_eflags & PSL_NT) { 544 frame.tf_eflags &= ~PSL_NT; 545 goto out; 546 } 547 break; 548 549 case T_TRCTRAP: /* trace trap */ 550 if (frame.tf_eip == (int)IDTVEC(syscall)) { 551 /* 552 * We've just entered system mode via the 553 * syscall lcall. Continue single stepping 554 * silently until the syscall handler has 555 * saved the flags. 556 */ 557 goto out; 558 } 559 if (frame.tf_eip == (int)IDTVEC(syscall) + 1) { 560 /* 561 * The syscall handler has now saved the 562 * flags. Stop single stepping it. 563 */ 564 frame.tf_eflags &= ~PSL_T; 565 goto out; 566 } 567 /* 568 * Ignore debug register trace traps due to 569 * accesses in the user's address space, which 570 * can happen under several conditions such as 571 * if a user sets a watchpoint on a buffer and 572 * then passes that buffer to a system call. 573 * We still want to get TRCTRAPS for addresses 574 * in kernel space because that is useful when 575 * debugging the kernel. 576 */ 577 if (user_dbreg_trap() && !in_vm86call) { 578 /* 579 * Reset breakpoint bits because the 580 * processor doesn't 581 */ 582 load_dr6(rdr6() & 0xfffffff0); 583 goto out; 584 } 585 /* 586 * Fall through (TRCTRAP kernel mode, kernel address) 587 */ 588 case T_BPTFLT: 589 /* 590 * If DDB is enabled, let it handle the debugger trap. 591 * Otherwise, debugger traps "can't happen". 592 */ 593#ifdef DDB 594 if (kdb_trap (type, 0, &frame)) 595 goto out; 596#endif 597 break; 598 599#if NISA > 0 600 case T_NMI: 601#ifdef POWERFAIL_NMI 602 if (time_second - lastalert > 10) { 603 log(LOG_WARNING, "NMI: power fail\n"); 604 sysbeep(TIMER_FREQ/880, hz); 605 lastalert = time_second; 606 } 607 goto out; 608#else /* !POWERFAIL_NMI */ 609 /* machine/parity/power fail/"kitchen sink" faults */ 610 if (isa_nmi(code) == 0) { 611#ifdef DDB 612 /* 613 * NMI can be hooked up to a pushbutton 614 * for debugging. 615 */ 616 if (ddb_on_nmi) { 617 printf ("NMI ... going to debugger\n"); 618 kdb_trap (type, 0, &frame); 619 } 620#endif /* DDB */ 621 goto out; 622 } else if (panic_on_nmi == 0) 623 goto out; 624 /* FALL THROUGH */ 625#endif /* POWERFAIL_NMI */ 626#endif /* NISA > 0 */ 627 } 628 629 trap_fatal(&frame, eva); 630 goto out; 631 } 632 633 /* Translate fault for emulators (e.g. Linux) */ 634 if (*p->p_sysent->sv_transtrap) 635 i = (*p->p_sysent->sv_transtrap)(i, type); 636 637 trapsignal(p, i, ucode); 638 639#ifdef DEBUG 640 if (type <= MAX_TRAP_MSG) { 641 uprintf("fatal process exception: %s", 642 trap_msg[type]); 643 if ((type == T_PAGEFLT) || (type == T_PROTFLT)) 644 uprintf(", fault VA = 0x%lx", (u_long)eva); 645 uprintf("\n"); 646 } 647#endif 648 649user: 650 userret(p, &frame, sticks, 1); 651out: 652 mtx_exit(&Giant, MTX_DEF); 653} 654 655#ifdef notyet 656/* 657 * This version doesn't allow a page fault to user space while 658 * in the kernel. The rest of the kernel needs to be made "safe" 659 * before this can be used. I think the only things remaining 660 * to be made safe are the iBCS2 code and the process tracing/ 661 * debugging code. 662 */ 663static int 664trap_pfault(frame, usermode, eva) 665 struct trapframe *frame; 666 int usermode; 667 vm_offset_t eva; 668{ 669 vm_offset_t va; 670 struct vmspace *vm = NULL; 671 vm_map_t map = 0; 672 int rv = 0; 673 vm_prot_t ftype; 674 struct proc *p = curproc; 675 676 if (frame->tf_err & PGEX_W) 677 ftype = VM_PROT_WRITE; 678 else 679 ftype = VM_PROT_READ; 680 681 va = trunc_page(eva); 682 if (va < VM_MIN_KERNEL_ADDRESS) { 683 vm_offset_t v; 684 vm_page_t mpte; 685 686 if (p == NULL || 687 (!usermode && va < VM_MAXUSER_ADDRESS && 688 (intr_nesting_level != 0 || curpcb == NULL || 689 curpcb->pcb_onfault == NULL))) { 690 trap_fatal(frame, eva); 691 return (-1); 692 } 693 694 /* 695 * This is a fault on non-kernel virtual memory. 696 * vm is initialized above to NULL. If curproc is NULL 697 * or curproc->p_vmspace is NULL the fault is fatal. 698 */ 699 vm = p->p_vmspace; 700 if (vm == NULL) 701 goto nogo; 702 703 map = &vm->vm_map; 704 705 /* 706 * Keep swapout from messing with us during this 707 * critical time. 708 */ 709 ++p->p_lock; 710 711 /* 712 * Grow the stack if necessary 713 */ 714 /* grow_stack returns false only if va falls into 715 * a growable stack region and the stack growth 716 * fails. It returns true if va was not within 717 * a growable stack region, or if the stack 718 * growth succeeded. 719 */ 720 if (!grow_stack (p, va)) { 721 rv = KERN_FAILURE; 722 --p->p_lock; 723 goto nogo; 724 } 725 726 /* Fault in the user page: */ 727 rv = vm_fault(map, va, ftype, 728 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY 729 : VM_FAULT_NORMAL); 730 731 --p->p_lock; 732 } else { 733 /* 734 * Don't allow user-mode faults in kernel address space. 735 */ 736 if (usermode) 737 goto nogo; 738 739 /* 740 * Since we know that kernel virtual address addresses 741 * always have pte pages mapped, we just have to fault 742 * the page. 743 */ 744 rv = vm_fault(kernel_map, va, ftype, VM_FAULT_NORMAL); 745 } 746 747 if (rv == KERN_SUCCESS) 748 return (0); 749nogo: 750 if (!usermode) { 751 if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) { 752 frame->tf_eip = (int)curpcb->pcb_onfault; 753 return (0); 754 } 755 trap_fatal(frame, eva); 756 return (-1); 757 } 758 759 /* kludge to pass faulting virtual address to sendsig */ 760 frame->tf_err = eva; 761 762 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); 763} 764#endif 765 766int 767trap_pfault(frame, usermode, eva) 768 struct trapframe *frame; 769 int usermode; 770 vm_offset_t eva; 771{ 772 vm_offset_t va; 773 struct vmspace *vm = NULL; 774 vm_map_t map = 0; 775 int rv = 0; 776 vm_prot_t ftype; 777 struct proc *p = curproc; 778 779 va = trunc_page(eva); 780 if (va >= KERNBASE) { 781 /* 782 * Don't allow user-mode faults in kernel address space. 783 * An exception: if the faulting address is the invalid 784 * instruction entry in the IDT, then the Intel Pentium 785 * F00F bug workaround was triggered, and we need to 786 * treat it is as an illegal instruction, and not a page 787 * fault. 788 */ 789#if defined(I586_CPU) && !defined(NO_F00F_HACK) 790 if ((eva == (unsigned int)&idt[6]) && has_f00f_bug) 791 return -2; 792#endif 793 if (usermode) 794 goto nogo; 795 796 map = kernel_map; 797 } else { 798 /* 799 * This is a fault on non-kernel virtual memory. 800 * vm is initialized above to NULL. If curproc is NULL 801 * or curproc->p_vmspace is NULL the fault is fatal. 802 */ 803 if (p != NULL) 804 vm = p->p_vmspace; 805 806 if (vm == NULL) 807 goto nogo; 808 809 map = &vm->vm_map; 810 } 811 812 if (frame->tf_err & PGEX_W) 813 ftype = VM_PROT_WRITE; 814 else 815 ftype = VM_PROT_READ; 816 817 if (map != kernel_map) { 818 /* 819 * Keep swapout from messing with us during this 820 * critical time. 821 */ 822 ++p->p_lock; 823 824 /* 825 * Grow the stack if necessary 826 */ 827 /* grow_stack returns false only if va falls into 828 * a growable stack region and the stack growth 829 * fails. It returns true if va was not within 830 * a growable stack region, or if the stack 831 * growth succeeded. 832 */ 833 if (!grow_stack (p, va)) { 834 rv = KERN_FAILURE; 835 --p->p_lock; 836 goto nogo; 837 } 838 839 /* Fault in the user page: */ 840 rv = vm_fault(map, va, ftype, 841 (ftype & VM_PROT_WRITE) ? VM_FAULT_DIRTY 842 : VM_FAULT_NORMAL); 843 844 --p->p_lock; 845 } else { 846 /* 847 * Don't have to worry about process locking or stacks in the kernel. 848 */ 849 rv = vm_fault(map, va, ftype, VM_FAULT_NORMAL); 850 } 851 852 if (rv == KERN_SUCCESS) 853 return (0); 854nogo: 855 if (!usermode) { 856 if (intr_nesting_level == 0 && curpcb && curpcb->pcb_onfault) { 857 frame->tf_eip = (int)curpcb->pcb_onfault; 858 return (0); 859 } 860 trap_fatal(frame, eva); 861 return (-1); 862 } 863 864 /* kludge to pass faulting virtual address to sendsig */ 865 frame->tf_err = eva; 866 867 return((rv == KERN_PROTECTION_FAILURE) ? SIGBUS : SIGSEGV); 868} 869 870static void 871trap_fatal(frame, eva) 872 struct trapframe *frame; 873 vm_offset_t eva; 874{ 875 int code, type, ss, esp; 876 struct soft_segment_descriptor softseg; 877 878 code = frame->tf_err; 879 type = frame->tf_trapno; 880 sdtossd(&gdt[IDXSEL(frame->tf_cs & 0xffff)].sd, &softseg); 881 882 if (type <= MAX_TRAP_MSG) 883 printf("\n\nFatal trap %d: %s while in %s mode\n", 884 type, trap_msg[type], 885 frame->tf_eflags & PSL_VM ? "vm86" : 886 ISPL(frame->tf_cs) == SEL_UPL ? "user" : "kernel"); 887#ifdef SMP 888 /* two seperate prints in case of a trap on an unmapped page */ 889 printf("cpuid = %d; ", cpuid); 890 printf("lapic.id = %08x\n", lapic.id); 891#endif 892 if (type == T_PAGEFLT) { 893 printf("fault virtual address = 0x%x\n", eva); 894 printf("fault code = %s %s, %s\n", 895 code & PGEX_U ? "user" : "supervisor", 896 code & PGEX_W ? "write" : "read", 897 code & PGEX_P ? "protection violation" : "page not present"); 898 } 899 printf("instruction pointer = 0x%x:0x%x\n", 900 frame->tf_cs & 0xffff, frame->tf_eip); 901 if ((ISPL(frame->tf_cs) == SEL_UPL) || (frame->tf_eflags & PSL_VM)) { 902 ss = frame->tf_ss & 0xffff; 903 esp = frame->tf_esp; 904 } else { 905 ss = GSEL(GDATA_SEL, SEL_KPL); 906 esp = (int)&frame->tf_esp; 907 } 908 printf("stack pointer = 0x%x:0x%x\n", ss, esp); 909 printf("frame pointer = 0x%x:0x%x\n", ss, frame->tf_ebp); 910 printf("code segment = base 0x%x, limit 0x%x, type 0x%x\n", 911 softseg.ssd_base, softseg.ssd_limit, softseg.ssd_type); 912 printf(" = DPL %d, pres %d, def32 %d, gran %d\n", 913 softseg.ssd_dpl, softseg.ssd_p, softseg.ssd_def32, 914 softseg.ssd_gran); 915 printf("processor eflags = "); 916 if (frame->tf_eflags & PSL_T) 917 printf("trace trap, "); 918 if (frame->tf_eflags & PSL_I) 919 printf("interrupt enabled, "); 920 if (frame->tf_eflags & PSL_NT) 921 printf("nested task, "); 922 if (frame->tf_eflags & PSL_RF) 923 printf("resume, "); 924 if (frame->tf_eflags & PSL_VM) 925 printf("vm86, "); 926 printf("IOPL = %d\n", (frame->tf_eflags & PSL_IOPL) >> 12); 927 printf("current process = "); 928 if (curproc) { 929 printf("%lu (%s)\n", 930 (u_long)curproc->p_pid, curproc->p_comm ? 931 curproc->p_comm : ""); 932 } else { 933 printf("Idle\n"); 934 } 935 936#ifdef KDB 937 if (kdb_trap(&psl)) 938 return; 939#endif 940#ifdef DDB 941 if ((debugger_on_panic || db_active) && kdb_trap(type, 0, frame)) 942 return; 943#endif 944 printf("trap number = %d\n", type); 945 if (type <= MAX_TRAP_MSG) 946 panic(trap_msg[type]); 947 else 948 panic("unknown/reserved trap"); 949} 950 951/* 952 * Double fault handler. Called when a fault occurs while writing 953 * a frame for a trap/exception onto the stack. This usually occurs 954 * when the stack overflows (such is the case with infinite recursion, 955 * for example). 956 * 957 * XXX Note that the current PTD gets replaced by IdlePTD when the 958 * task switch occurs. This means that the stack that was active at 959 * the time of the double fault is not available at <kstack> unless 960 * the machine was idle when the double fault occurred. The downside 961 * of this is that "trace <ebp>" in ddb won't work. 962 */ 963void 964dblfault_handler() 965{ 966 printf("\nFatal double fault:\n"); 967 printf("eip = 0x%x\n", common_tss.tss_eip); 968 printf("esp = 0x%x\n", common_tss.tss_esp); 969 printf("ebp = 0x%x\n", common_tss.tss_ebp); 970#ifdef SMP 971 /* two seperate prints in case of a trap on an unmapped page */ 972 printf("cpuid = %d; ", cpuid); 973 printf("lapic.id = %08x\n", lapic.id); 974#endif 975 panic("double fault"); 976} 977 978/* 979 * Compensate for 386 brain damage (missing URKR). 980 * This is a little simpler than the pagefault handler in trap() because 981 * it the page tables have already been faulted in and high addresses 982 * are thrown out early for other reasons. 983 */ 984int trapwrite(addr) 985 unsigned addr; 986{ 987 struct proc *p; 988 vm_offset_t va; 989 struct vmspace *vm; 990 int rv; 991 992 va = trunc_page((vm_offset_t)addr); 993 /* 994 * XXX - MAX is END. Changed > to >= for temp. fix. 995 */ 996 if (va >= VM_MAXUSER_ADDRESS) 997 return (1); 998 999 p = curproc; 1000 vm = p->p_vmspace; 1001 1002 ++p->p_lock; 1003 1004 if (!grow_stack (p, va)) { 1005 --p->p_lock; 1006 return (1); 1007 } 1008 1009 /* 1010 * fault the data page 1011 */ 1012 rv = vm_fault(&vm->vm_map, va, VM_PROT_WRITE, VM_FAULT_DIRTY); 1013 1014 --p->p_lock; 1015 1016 if (rv != KERN_SUCCESS) 1017 return 1; 1018 1019 return (0); 1020} 1021 1022/* 1023 * syscall2 - MP aware system call request C handler 1024 * 1025 * A system call is essentially treated as a trap except that the 1026 * MP lock is not held on entry or return. We are responsible for 1027 * obtaining the MP lock if necessary and for handling ASTs 1028 * (e.g. a task switch) prior to return. 1029 * 1030 * In general, only simple access and manipulation of curproc and 1031 * the current stack is allowed without having to hold MP lock. 1032 */ 1033void 1034syscall2(frame) 1035 struct trapframe frame; 1036{ 1037 caddr_t params; 1038 int i; 1039 struct sysent *callp; 1040 struct proc *p = curproc; 1041 u_quad_t sticks; 1042 int error; 1043 int narg; 1044 int args[8]; 1045 int have_giant = 0; 1046 u_int code; 1047 1048 atomic_add_int(&cnt.v_syscall, 1); 1049 1050#ifdef DIAGNOSTIC 1051 if (ISPL(frame.tf_cs) != SEL_UPL) { 1052 mtx_enter(&Giant, MTX_DEF); 1053 panic("syscall"); 1054 /* NOT REACHED */ 1055 } 1056#endif 1057 1058 /* 1059 * handle atomicy by looping since interrupts are enabled and the 1060 * MP lock is not held. 1061 */ 1062 sticks = ((volatile struct proc *)p)->p_sticks; 1063 while (sticks != ((volatile struct proc *)p)->p_sticks) 1064 sticks = ((volatile struct proc *)p)->p_sticks; 1065 1066 p->p_md.md_regs = &frame; 1067 params = (caddr_t)frame.tf_esp + sizeof(int); 1068 code = frame.tf_eax; 1069 1070 if (p->p_sysent->sv_prepsyscall) { 1071 /* 1072 * The prep code is not MP aware. 1073 */ 1074 mtx_enter(&Giant, MTX_DEF); 1075 (*p->p_sysent->sv_prepsyscall)(&frame, args, &code, ¶ms); 1076 mtx_exit(&Giant, MTX_DEF); 1077 } else { 1078 /* 1079 * Need to check if this is a 32 bit or 64 bit syscall. 1080 * fuword is MP aware. 1081 */ 1082 if (code == SYS_syscall) { 1083 /* 1084 * Code is first argument, followed by actual args. 1085 */ 1086 code = fuword(params); 1087 params += sizeof(int); 1088 } else if (code == SYS___syscall) { 1089 /* 1090 * Like syscall, but code is a quad, so as to maintain 1091 * quad alignment for the rest of the arguments. 1092 */ 1093 code = fuword(params); 1094 params += sizeof(quad_t); 1095 } 1096 } 1097 1098 if (p->p_sysent->sv_mask) 1099 code &= p->p_sysent->sv_mask; 1100 1101 if (code >= p->p_sysent->sv_size) 1102 callp = &p->p_sysent->sv_table[0]; 1103 else 1104 callp = &p->p_sysent->sv_table[code]; 1105 1106 narg = callp->sy_narg & SYF_ARGMASK; 1107 1108 /* 1109 * copyin is MP aware, but the tracing code is not 1110 */ 1111 if (params && (i = narg * sizeof(int)) && 1112 (error = copyin(params, (caddr_t)args, (u_int)i))) { 1113 mtx_enter(&Giant, MTX_DEF); 1114 have_giant = 1; 1115#ifdef KTRACE 1116 if (KTRPOINT(p, KTR_SYSCALL)) 1117 ktrsyscall(p->p_tracep, code, narg, args); 1118#endif 1119 goto bad; 1120 } 1121 1122 /* 1123 * Try to run the syscall without the MP lock if the syscall 1124 * is MP safe. We have to obtain the MP lock no matter what if 1125 * we are ktracing 1126 */ 1127 if ((callp->sy_narg & SYF_MPSAFE) == 0) { 1128 mtx_enter(&Giant, MTX_DEF); 1129 have_giant = 1; 1130 } 1131 1132#ifdef KTRACE 1133 if (KTRPOINT(p, KTR_SYSCALL)) { 1134 if (have_giant == 0) { 1135 mtx_enter(&Giant, MTX_DEF); 1136 have_giant = 1; 1137 } 1138 ktrsyscall(p->p_tracep, code, narg, args); 1139 } 1140#endif 1141 p->p_retval[0] = 0; 1142 p->p_retval[1] = frame.tf_edx; 1143 1144 STOPEVENT(p, S_SCE, narg); /* MP aware */ 1145 1146 error = (*callp->sy_call)(p, args); 1147 1148 /* 1149 * MP SAFE (we may or may not have the MP lock at this point) 1150 */ 1151 switch (error) { 1152 case 0: 1153 /* 1154 * Reinitialize proc pointer `p' as it may be different 1155 * if this is a child returning from fork syscall. 1156 */ 1157 p = curproc; 1158 frame.tf_eax = p->p_retval[0]; 1159 frame.tf_edx = p->p_retval[1]; 1160 frame.tf_eflags &= ~PSL_C; 1161 break; 1162 1163 case ERESTART: 1164 /* 1165 * Reconstruct pc, assuming lcall $X,y is 7 bytes, 1166 * int 0x80 is 2 bytes. We saved this in tf_err. 1167 */ 1168 frame.tf_eip -= frame.tf_err; 1169 break; 1170 1171 case EJUSTRETURN: 1172 break; 1173 1174 default: 1175bad: 1176 if (p->p_sysent->sv_errsize) { 1177 if (error >= p->p_sysent->sv_errsize) 1178 error = -1; /* XXX */ 1179 else 1180 error = p->p_sysent->sv_errtbl[error]; 1181 } 1182 frame.tf_eax = error; 1183 frame.tf_eflags |= PSL_C; 1184 break; 1185 } 1186 1187 /* 1188 * Traced syscall. trapsignal() is not MP aware. 1189 */ 1190 if ((frame.tf_eflags & PSL_T) && !(frame.tf_eflags & PSL_VM)) { 1191 if (have_giant == 0) { 1192 mtx_enter(&Giant, MTX_DEF); 1193 have_giant = 1; 1194 } 1195 frame.tf_eflags &= ~PSL_T; 1196 trapsignal(p, SIGTRAP, 0); 1197 } 1198 1199 /* 1200 * Handle reschedule and other end-of-syscall issues 1201 */ 1202 have_giant = userret(p, &frame, sticks, have_giant); 1203 1204#ifdef KTRACE 1205 if (KTRPOINT(p, KTR_SYSRET)) { 1206 if (have_giant == 0) { 1207 mtx_enter(&Giant, MTX_DEF); 1208 have_giant = 1; 1209 } 1210 ktrsysret(p->p_tracep, code, error, p->p_retval[0]); 1211 } 1212#endif 1213 1214 /* 1215 * This works because errno is findable through the 1216 * register set. If we ever support an emulation where this 1217 * is not the case, this code will need to be revisited. 1218 */ 1219 STOPEVENT(p, S_SCX, code); 1220 1221 /* 1222 * Release the MP lock if we had to get it 1223 */ 1224 if (have_giant) 1225 mtx_exit(&Giant, MTX_DEF); 1226 1227 mtx_assert(&sched_lock, MA_NOTOWNED); 1228 mtx_assert(&Giant, MA_NOTOWNED); 1229#ifdef WITNESS 1230 if (witness_list(p)) { 1231 panic("system call %s returning with mutex(s) held\n", 1232 syscallnames[code]); 1233 } 1234#endif 1235} 1236 1237void 1238ast(frame) 1239 struct trapframe frame; 1240{ 1241 struct proc *p = CURPROC; 1242 u_quad_t sticks; 1243 1244 /* 1245 * handle atomicy by looping since interrupts are enabled and the 1246 * MP lock is not held. 1247 */ 1248 sticks = ((volatile struct proc *)p)->p_sticks; 1249 while (sticks != ((volatile struct proc *)p)->p_sticks) 1250 sticks = ((volatile struct proc *)p)->p_sticks; 1251 1252 astoff(); 1253 atomic_add_int(&cnt.v_soft, 1); 1254 if (p->p_flag & P_OWEUPC) { 1255 mtx_enter(&Giant, MTX_DEF); 1256 p->p_flag &= ~P_OWEUPC; 1257 addupc_task(p, p->p_stats->p_prof.pr_addr, 1258 p->p_stats->p_prof.pr_ticks); 1259 } 1260 if (p->p_flag & P_ALRMPEND) { 1261 if (!mtx_owned(&Giant)) 1262 mtx_enter(&Giant, MTX_DEF); 1263 p->p_flag &= ~P_ALRMPEND; 1264 psignal(p, SIGVTALRM); 1265 } 1266 if (p->p_flag & P_PROFPEND) { 1267 if (!mtx_owned(&Giant)) 1268 mtx_enter(&Giant, MTX_DEF); 1269 p->p_flag &= ~P_PROFPEND; 1270 psignal(p, SIGPROF); 1271 } 1272 if (userret(p, &frame, sticks, mtx_owned(&Giant)) != 0) 1273 mtx_exit(&Giant, MTX_DEF); 1274} 1275 1276/* 1277 * Simplified back end of syscall(), used when returning from fork() 1278 * directly into user mode. Giant is not held on entry, and must not 1279 * be held on return. 1280 */ 1281void 1282fork_return(p, frame) 1283 struct proc *p; 1284 struct trapframe frame; 1285{ 1286 int have_giant; 1287 1288 frame.tf_eax = 0; /* Child returns zero */ 1289 frame.tf_eflags &= ~PSL_C; /* success */ 1290 frame.tf_edx = 1; 1291 1292 have_giant = userret(p, &frame, 0, mtx_owned(&Giant)); 1293#ifdef KTRACE 1294 if (KTRPOINT(p, KTR_SYSRET)) { 1295 if (have_giant == 0) { 1296 mtx_enter(&Giant, MTX_DEF); 1297 have_giant = 1; 1298 } 1299 ktrsysret(p->p_tracep, SYS_fork, 0, 0); 1300 } 1301#endif 1302 if (have_giant) 1303 mtx_exit(&Giant, MTX_DEF); 1304} 1305