syscall.c revision 157616
1/* $NetBSD: fault.c,v 1.45 2003/11/20 14:44:36 scw Exp $ */ 2 3/*- 4 * Copyright 2004 Olivier Houchard 5 * Copyright 2003 Wasabi Systems, Inc. 6 * All rights reserved. 7 * 8 * Written by Steve C. Woodford for Wasabi Systems, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed for the NetBSD Project by 21 * Wasabi Systems, Inc. 22 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 23 * or promote products derived from this software without specific prior 24 * written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38/*- 39 * Copyright (c) 1994-1997 Mark Brinicombe. 40 * Copyright (c) 1994 Brini. 41 * All rights reserved. 42 * 43 * This code is derived from software written for Brini by Mark Brinicombe 44 * 45 * Redistribution and use in source and binary forms, with or without 46 * modification, are permitted provided that the following conditions 47 * are met: 48 * 1. Redistributions of source code must retain the above copyright 49 * notice, this list of conditions and the following disclaimer. 50 * 2. Redistributions in binary form must reproduce the above copyright 51 * notice, this list of conditions and the following disclaimer in the 52 * documentation and/or other materials provided with the distribution. 53 * 3. All advertising materials mentioning features or use of this software 54 * must display the following acknowledgement: 55 * This product includes software developed by Brini. 56 * 4. The name of the company nor the name of the author may be used to 57 * endorse or promote products derived from this software without specific 58 * prior written permission. 59 * 60 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED 61 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 62 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 63 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 64 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 65 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 66 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 67 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 68 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 69 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 70 * SUCH DAMAGE. 71 * 72 * RiscBSD kernel project 73 * 74 * fault.c 75 * 76 * Fault handlers 77 * 78 * Created : 28/11/94 79 */ 80 81 82#include "opt_ktrace.h" 83 84#include <sys/cdefs.h> 85__FBSDID("$FreeBSD: head/sys/arm/arm/trap.c 157616 2006-04-09 20:16:47Z cognet $"); 86 87#include <sys/types.h> 88 89#include <sys/param.h> 90#include <sys/systm.h> 91#include <sys/proc.h> 92#include <sys/kernel.h> 93#include <sys/lock.h> 94#include <sys/mutex.h> 95#include <sys/syscall.h> 96#include <sys/sysent.h> 97#include <sys/signalvar.h> 98#include <sys/ktr.h> 99#ifdef KTRACE 100#include <sys/uio.h> 101#include <sys/ktrace.h> 102#endif 103#include <sys/ptrace.h> 104#include <sys/pioctl.h> 105 106#include <vm/vm.h> 107#include <vm/pmap.h> 108#include <vm/vm_kern.h> 109#include <vm/vm_map.h> 110#include <vm/vm_extern.h> 111 112#include <machine/cpuconf.h> 113#include <machine/vmparam.h> 114#include <machine/frame.h> 115#include <machine/katelib.h> 116#include <machine/cpu.h> 117#include <machine/intr.h> 118#include <machine/pcb.h> 119#include <machine/proc.h> 120#include <machine/swi.h> 121 122#include <security/audit/audit.h> 123 124#ifdef KDB 125#include <sys/kdb.h> 126#endif 127 128 129void swi_handler(trapframe_t *); 130void undefinedinstruction(trapframe_t *); 131 132#include <machine/disassem.h> 133#include <machine/machdep.h> 134 135extern char fusubailout[]; 136 137#ifdef DEBUG 138int last_fault_code; /* For the benefit of pmap_fault_fixup() */ 139#endif 140 141#if defined(CPU_ARM7TDMI) 142/* These CPUs may need data/prefetch abort fixups */ 143#define CPU_ABORT_FIXUP_REQUIRED 144#endif 145 146struct ksig { 147 int signb; 148 u_long code; 149}; 150struct data_abort { 151 int (*func)(trapframe_t *, u_int, u_int, struct thread *, struct ksig *); 152 const char *desc; 153}; 154 155static int dab_fatal(trapframe_t *, u_int, u_int, struct thread *, struct ksig *); 156static int dab_align(trapframe_t *, u_int, u_int, struct thread *, struct ksig *); 157static int dab_buserr(trapframe_t *, u_int, u_int, struct thread *, struct ksig *); 158 159static const struct data_abort data_aborts[] = { 160 {dab_fatal, "Vector Exception"}, 161 {dab_align, "Alignment Fault 1"}, 162 {dab_fatal, "Terminal Exception"}, 163 {dab_align, "Alignment Fault 3"}, 164 {dab_buserr, "External Linefetch Abort (S)"}, 165 {NULL, "Translation Fault (S)"}, 166 {dab_buserr, "External Linefetch Abort (P)"}, 167 {NULL, "Translation Fault (P)"}, 168 {dab_buserr, "External Non-Linefetch Abort (S)"}, 169 {NULL, "Domain Fault (S)"}, 170 {dab_buserr, "External Non-Linefetch Abort (P)"}, 171 {NULL, "Domain Fault (P)"}, 172 {dab_buserr, "External Translation Abort (L1)"}, 173 {NULL, "Permission Fault (S)"}, 174 {dab_buserr, "External Translation Abort (L2)"}, 175 {NULL, "Permission Fault (P)"} 176}; 177 178/* Determine if a fault came from user mode */ 179#define TRAP_USERMODE(tf) ((tf->tf_spsr & PSR_MODE) == PSR_USR32_MODE) 180 181/* Determine if 'x' is a permission fault */ 182#define IS_PERMISSION_FAULT(x) \ 183 (((1 << ((x) & FAULT_TYPE_MASK)) & \ 184 ((1 << FAULT_PERM_P) | (1 << FAULT_PERM_S))) != 0) 185 186static __inline void 187call_trapsignal(struct thread *td, int sig, u_long code) 188{ 189 ksiginfo_t ksi; 190 191 ksiginfo_init_trap(&ksi); 192 ksi.ksi_signo = sig; 193 ksi.ksi_code = (int)code; 194 trapsignal(td, &ksi); 195} 196 197static __inline int 198data_abort_fixup(trapframe_t *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig) 199{ 200#ifdef CPU_ABORT_FIXUP_REQUIRED 201 int error; 202 203 /* Call the cpu specific data abort fixup routine */ 204 error = cpu_dataabt_fixup(tf); 205 if (__predict_true(error != ABORT_FIXUP_FAILED)) 206 return (error); 207 208 /* 209 * Oops, couldn't fix up the instruction 210 */ 211 printf("data_abort_fixup: fixup for %s mode data abort failed.\n", 212 TRAP_USERMODE(tf) ? "user" : "kernel"); 213 printf("pc = 0x%08x, opcode 0x%08x, insn = ", tf->tf_pc, 214 *((u_int *)tf->tf_pc)); 215 disassemble(tf->tf_pc); 216 217 /* Die now if this happened in kernel mode */ 218 if (!TRAP_USERMODE(tf)) 219 dab_fatal(tf, fsr, far, td, NULL, ksig); 220 221 return (error); 222#else 223 return (ABORT_FIXUP_OK); 224#endif /* CPU_ABORT_FIXUP_REQUIRED */ 225} 226 227void 228data_abort_handler(trapframe_t *tf) 229{ 230 struct vm_map *map; 231 struct pcb *pcb; 232 struct thread *td; 233 u_int user, far, fsr; 234 vm_prot_t ftype; 235 void *onfault; 236 vm_offset_t va; 237 int error = 0; 238 struct ksig ksig; 239 struct proc *p; 240 241 242 /* Grab FAR/FSR before enabling interrupts */ 243 far = cpu_faultaddress(); 244 fsr = cpu_faultstatus(); 245#if 0 246 printf("data abort: %p (from %p %p)\n", (void*)far, (void*)tf->tf_pc, 247 (void*)tf->tf_svc_lr); 248#endif 249 250 /* Update vmmeter statistics */ 251#if 0 252 vmexp.traps++; 253#endif 254 255 td = curthread; 256 p = td->td_proc; 257 258 PCPU_LAZY_INC(cnt.v_trap); 259 /* Data abort came from user mode? */ 260 user = TRAP_USERMODE(tf); 261 262 if (user) { 263 td->td_pticks = 0; 264 td->td_frame = tf; 265 if (td->td_ucred != td->td_proc->p_ucred) 266 cred_update_thread(td); 267 if (td->td_pflags & TDP_SA) 268 thread_user_enter(td); 269 270 } 271 /* Grab the current pcb */ 272 pcb = td->td_pcb; 273 /* Re-enable interrupts if they were enabled previously */ 274 if (td->td_md.md_spinlock_count == 0) { 275 if (__predict_true(tf->tf_spsr & I32_bit) == 0) 276 enable_interrupts(I32_bit); 277 if (__predict_true(tf->tf_spsr & F32_bit) == 0) 278 enable_interrupts(F32_bit); 279 } 280 281 282 /* Invoke the appropriate handler, if necessary */ 283 if (__predict_false(data_aborts[fsr & FAULT_TYPE_MASK].func != NULL)) { 284 if ((data_aborts[fsr & FAULT_TYPE_MASK].func)(tf, fsr, far, 285 td, &ksig)) { 286 goto do_trapsignal; 287 } 288 goto out; 289 } 290 291 /* 292 * At this point, we're dealing with one of the following data aborts: 293 * 294 * FAULT_TRANS_S - Translation -- Section 295 * FAULT_TRANS_P - Translation -- Page 296 * FAULT_DOMAIN_S - Domain -- Section 297 * FAULT_DOMAIN_P - Domain -- Page 298 * FAULT_PERM_S - Permission -- Section 299 * FAULT_PERM_P - Permission -- Page 300 * 301 * These are the main virtual memory-related faults signalled by 302 * the MMU. 303 */ 304 305 /* fusubailout is used by [fs]uswintr to avoid page faulting */ 306 if (__predict_false(pcb->pcb_onfault == fusubailout)) { 307 tf->tf_r0 = EFAULT; 308 tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; 309 return; 310 } 311 312 /* 313 * Make sure the Program Counter is sane. We could fall foul of 314 * someone executing Thumb code, in which case the PC might not 315 * be word-aligned. This would cause a kernel alignment fault 316 * further down if we have to decode the current instruction. 317 * XXX: It would be nice to be able to support Thumb at some point. 318 */ 319 if (__predict_false((tf->tf_pc & 3) != 0)) { 320 if (user) { 321 /* 322 * Give the user an illegal instruction signal. 323 */ 324 /* Deliver a SIGILL to the process */ 325 ksig.signb = SIGILL; 326 ksig.code = 0; 327 goto do_trapsignal; 328 } 329 330 /* 331 * The kernel never executes Thumb code. 332 */ 333 printf("\ndata_abort_fault: Misaligned Kernel-mode " 334 "Program Counter\n"); 335 dab_fatal(tf, fsr, far, td, &ksig); 336 } 337 338 /* See if the cpu state needs to be fixed up */ 339 switch (data_abort_fixup(tf, fsr, far, td, &ksig)) { 340 case ABORT_FIXUP_RETURN: 341 return; 342 case ABORT_FIXUP_FAILED: 343 /* Deliver a SIGILL to the process */ 344 ksig.signb = SIGILL; 345 ksig.code = 0; 346 goto do_trapsignal; 347 default: 348 break; 349 } 350 351 va = trunc_page((vm_offset_t)far); 352 353 /* 354 * It is only a kernel address space fault iff: 355 * 1. user == 0 and 356 * 2. pcb_onfault not set or 357 * 3. pcb_onfault set and not LDRT/LDRBT/STRT/STRBT instruction. 358 */ 359 if (user == 0 && (va >= VM_MIN_KERNEL_ADDRESS || 360 (va < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW)) && 361 __predict_true((pcb->pcb_onfault == NULL || 362 (ReadWord(tf->tf_pc) & 0x05200000) != 0x04200000))) { 363 map = kernel_map; 364 365 /* Was the fault due to the FPE/IPKDB ? */ 366 if (__predict_false((tf->tf_spsr & PSR_MODE)==PSR_UND32_MODE)) { 367 368 /* 369 * Force exit via userret() 370 * This is necessary as the FPE is an extension to 371 * userland that actually runs in a priveledged mode 372 * but uses USR mode permissions for its accesses. 373 */ 374 user = 1; 375 ksig.signb = SIGSEGV; 376 ksig.code = 0; 377 goto do_trapsignal; 378 } 379 } else { 380 map = &td->td_proc->p_vmspace->vm_map; 381 } 382 383 /* 384 * We need to know whether the page should be mapped 385 * as R or R/W. The MMU does not give us the info as 386 * to whether the fault was caused by a read or a write. 387 * 388 * However, we know that a permission fault can only be 389 * the result of a write to a read-only location, so 390 * we can deal with those quickly. 391 * 392 * Otherwise we need to disassemble the instruction 393 * responsible to determine if it was a write. 394 */ 395 if (IS_PERMISSION_FAULT(fsr)) { 396 ftype = VM_PROT_WRITE; 397 } else { 398 u_int insn = ReadWord(tf->tf_pc); 399 400 if (((insn & 0x0c100000) == 0x04000000) || /* STR/STRB */ 401 ((insn & 0x0e1000b0) == 0x000000b0) || /* STRH/STRD */ 402 ((insn & 0x0a100000) == 0x08000000)) /* STM/CDT */ 403 { 404 ftype = VM_PROT_WRITE; 405 } 406 else 407 if ((insn & 0x0fb00ff0) == 0x01000090) /* SWP */ 408 ftype = VM_PROT_READ | VM_PROT_WRITE; 409 else 410 ftype = VM_PROT_READ; 411 } 412 413 /* 414 * See if the fault is as a result of ref/mod emulation, 415 * or domain mismatch. 416 */ 417#ifdef DEBUG 418 last_fault_code = fsr; 419#endif 420 if (pmap_fault_fixup(vmspace_pmap(td->td_proc->p_vmspace), va, ftype, 421 user)) { 422 goto out; 423 } 424 425 onfault = pcb->pcb_onfault; 426 pcb->pcb_onfault = NULL; 427 if (map != kernel_map) { 428 PROC_LOCK(p); 429 p->p_lock++; 430 PROC_UNLOCK(p); 431 } 432 error = vm_fault(map, va, ftype, (ftype & VM_PROT_WRITE) ? 433 VM_FAULT_DIRTY : VM_FAULT_NORMAL); 434 pcb->pcb_onfault = onfault; 435 436 if (map != kernel_map) { 437 PROC_LOCK(p); 438 p->p_lock--; 439 PROC_UNLOCK(p); 440 } 441 if (__predict_true(error == 0)) 442 goto out; 443 if (user == 0) { 444 if (pcb->pcb_onfault) { 445 tf->tf_r0 = error; 446 tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; 447 return; 448 } 449 450 printf("\nvm_fault(%p, %x, %x, 0) -> %x\n", map, va, ftype, 451 error); 452 dab_fatal(tf, fsr, far, td, &ksig); 453 } 454 455 456 if (error == ENOMEM) { 457 printf("VM: pid %d (%s), uid %d killed: " 458 "out of swap\n", td->td_proc->p_pid, td->td_proc->p_comm, 459 (td->td_proc->p_ucred) ? 460 td->td_proc->p_ucred->cr_uid : -1); 461 ksig.signb = SIGKILL; 462 } else { 463 ksig.signb = SIGSEGV; 464 } 465 ksig.code = 0; 466do_trapsignal: 467 call_trapsignal(td, ksig.signb, ksig.code); 468out: 469 /* If returning to user mode, make sure to invoke userret() */ 470 if (user) 471 userret(td, tf); 472} 473 474/* 475 * dab_fatal() handles the following data aborts: 476 * 477 * FAULT_WRTBUF_0 - Vector Exception 478 * FAULT_WRTBUF_1 - Terminal Exception 479 * 480 * We should never see these on a properly functioning system. 481 * 482 * This function is also called by the other handlers if they 483 * detect a fatal problem. 484 * 485 * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort. 486 */ 487static int 488dab_fatal(trapframe_t *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig) 489{ 490 const char *mode; 491 492 mode = TRAP_USERMODE(tf) ? "user" : "kernel"; 493 494 disable_interrupts(I32_bit|F32_bit); 495 if (td != NULL) { 496 printf("Fatal %s mode data abort: '%s'\n", mode, 497 data_aborts[fsr & FAULT_TYPE_MASK].desc); 498 printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr); 499 if ((fsr & FAULT_IMPRECISE) == 0) 500 printf("%08x, ", far); 501 else 502 printf("Invalid, "); 503 printf("spsr=%08x\n", tf->tf_spsr); 504 } else { 505 printf("Fatal %s mode prefetch abort at 0x%08x\n", 506 mode, tf->tf_pc); 507 printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr); 508 } 509 510 printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n", 511 tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3); 512 printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n", 513 tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7); 514 printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n", 515 tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11); 516 printf("r12=%08x, ", tf->tf_r12); 517 518 if (TRAP_USERMODE(tf)) 519 printf("usp=%08x, ulr=%08x", 520 tf->tf_usr_sp, tf->tf_usr_lr); 521 else 522 printf("ssp=%08x, slr=%08x", 523 tf->tf_svc_sp, tf->tf_svc_lr); 524 printf(", pc =%08x\n\n", tf->tf_pc); 525 526#ifdef KDB 527 kdb_trap(fsr, 0, tf); 528#endif 529 panic("Fatal abort"); 530 /*NOTREACHED*/ 531} 532 533/* 534 * dab_align() handles the following data aborts: 535 * 536 * FAULT_ALIGN_0 - Alignment fault 537 * FAULT_ALIGN_0 - Alignment fault 538 * 539 * These faults are fatal if they happen in kernel mode. Otherwise, we 540 * deliver a bus error to the process. 541 */ 542static int 543dab_align(trapframe_t *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig) 544{ 545 546 /* Alignment faults are always fatal if they occur in kernel mode */ 547 if (!TRAP_USERMODE(tf)) { 548 if (!td || !td->td_pcb->pcb_onfault) 549 dab_fatal(tf, fsr, far, td, ksig); 550 tf->tf_r0 = EFAULT; 551 tf->tf_pc = (int)td->td_pcb->pcb_onfault; 552 return (0); 553 } 554 555 /* pcb_onfault *must* be NULL at this point */ 556 557 /* See if the cpu state needs to be fixed up */ 558 (void) data_abort_fixup(tf, fsr, far, td, ksig); 559 560 /* Deliver a bus error signal to the process */ 561 ksig->code = 0; 562 ksig->signb = SIGBUS; 563 td->td_frame = tf; 564 565 return (1); 566} 567 568/* 569 * dab_buserr() handles the following data aborts: 570 * 571 * FAULT_BUSERR_0 - External Abort on Linefetch -- Section 572 * FAULT_BUSERR_1 - External Abort on Linefetch -- Page 573 * FAULT_BUSERR_2 - External Abort on Non-linefetch -- Section 574 * FAULT_BUSERR_3 - External Abort on Non-linefetch -- Page 575 * FAULT_BUSTRNL1 - External abort on Translation -- Level 1 576 * FAULT_BUSTRNL2 - External abort on Translation -- Level 2 577 * 578 * If pcb_onfault is set, flag the fault and return to the handler. 579 * If the fault occurred in user mode, give the process a SIGBUS. 580 * 581 * Note: On XScale, FAULT_BUSERR_0, FAULT_BUSERR_1, and FAULT_BUSERR_2 582 * can be flagged as imprecise in the FSR. This causes a real headache 583 * since some of the machine state is lost. In this case, tf->tf_pc 584 * may not actually point to the offending instruction. In fact, if 585 * we've taken a double abort fault, it generally points somewhere near 586 * the top of "data_abort_entry" in exception.S. 587 * 588 * In all other cases, these data aborts are considered fatal. 589 */ 590static int 591dab_buserr(trapframe_t *tf, u_int fsr, u_int far, struct thread *td, struct ksig *ksig) 592{ 593 struct pcb *pcb = td->td_pcb; 594 595#ifdef __XSCALE__ 596 if ((fsr & FAULT_IMPRECISE) != 0 && 597 (tf->tf_spsr & PSR_MODE) == PSR_ABT32_MODE) { 598 /* 599 * Oops, an imprecise, double abort fault. We've lost the 600 * r14_abt/spsr_abt values corresponding to the original 601 * abort, and the spsr saved in the trapframe indicates 602 * ABT mode. 603 */ 604 tf->tf_spsr &= ~PSR_MODE; 605 606 /* 607 * We use a simple heuristic to determine if the double abort 608 * happened as a result of a kernel or user mode access. 609 * If the current trapframe is at the top of the kernel stack, 610 * the fault _must_ have come from user mode. 611 */ 612 if (tf != ((trapframe_t *)pcb->un_32.pcb32_sp) - 1) { 613 /* 614 * Kernel mode. We're either about to die a 615 * spectacular death, or pcb_onfault will come 616 * to our rescue. Either way, the current value 617 * of tf->tf_pc is irrelevant. 618 */ 619 tf->tf_spsr |= PSR_SVC32_MODE; 620 if (pcb->pcb_onfault == NULL) 621 printf("\nKernel mode double abort!\n"); 622 } else { 623 /* 624 * User mode. We've lost the program counter at the 625 * time of the fault (not that it was accurate anyway; 626 * it's not called an imprecise fault for nothing). 627 * About all we can do is copy r14_usr to tf_pc and 628 * hope for the best. The process is about to get a 629 * SIGBUS, so it's probably history anyway. 630 */ 631 tf->tf_spsr |= PSR_USR32_MODE; 632 tf->tf_pc = tf->tf_usr_lr; 633 } 634 } 635 636 /* FAR is invalid for imprecise exceptions */ 637 if ((fsr & FAULT_IMPRECISE) != 0) 638 far = 0; 639#endif /* __XSCALE__ */ 640 641 if (pcb->pcb_onfault) { 642 tf->tf_r0 = EFAULT; 643 tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; 644 return (0); 645 } 646 647 /* See if the cpu state needs to be fixed up */ 648 (void) data_abort_fixup(tf, fsr, far, td, ksig); 649 650 /* 651 * At this point, if the fault happened in kernel mode, we're toast 652 */ 653 if (!TRAP_USERMODE(tf)) 654 dab_fatal(tf, fsr, far, td, ksig); 655 656 /* Deliver a bus error signal to the process */ 657 ksig->signb = SIGBUS; 658 ksig->code = 0; 659 td->td_frame = tf; 660 661 return (1); 662} 663 664static __inline int 665prefetch_abort_fixup(trapframe_t *tf, struct ksig *ksig) 666{ 667#ifdef CPU_ABORT_FIXUP_REQUIRED 668 int error; 669 670 /* Call the cpu specific prefetch abort fixup routine */ 671 error = cpu_prefetchabt_fixup(tf); 672 if (__predict_true(error != ABORT_FIXUP_FAILED)) 673 return (error); 674 675 /* 676 * Oops, couldn't fix up the instruction 677 */ 678 printf( 679 "prefetch_abort_fixup: fixup for %s mode prefetch abort failed.\n", 680 TRAP_USERMODE(tf) ? "user" : "kernel"); 681 printf("pc = 0x%08x, opcode 0x%08x, insn = ", tf->tf_pc, 682 *((u_int *)tf->tf_pc)); 683 disassemble(tf->tf_pc); 684 685 /* Die now if this happened in kernel mode */ 686 if (!TRAP_USERMODE(tf)) 687 dab_fatal(tf, 0, tf->tf_pc, NULL, ksig); 688 689 return (error); 690#else 691 return (ABORT_FIXUP_OK); 692#endif /* CPU_ABORT_FIXUP_REQUIRED */ 693} 694 695/* 696 * void prefetch_abort_handler(trapframe_t *tf) 697 * 698 * Abort handler called when instruction execution occurs at 699 * a non existent or restricted (access permissions) memory page. 700 * If the address is invalid and we were in SVC mode then panic as 701 * the kernel should never prefetch abort. 702 * If the address is invalid and the page is mapped then the user process 703 * does no have read permission so send it a signal. 704 * Otherwise fault the page in and try again. 705 */ 706void 707prefetch_abort_handler(trapframe_t *tf) 708{ 709 struct thread *td; 710 struct proc * p; 711 struct vm_map *map; 712 vm_offset_t fault_pc, va; 713 int error = 0; 714 struct ksig ksig; 715 716 717#if 0 718 /* Update vmmeter statistics */ 719 uvmexp.traps++; 720#endif 721#if 0 722 printf("prefetch abort handler: %p %p\n", (void*)tf->tf_pc, 723 (void*)tf->tf_usr_lr); 724#endif 725 726 td = curthread; 727 p = td->td_proc; 728 PCPU_LAZY_INC(cnt.v_trap); 729 730 if (TRAP_USERMODE(tf)) { 731 td->td_frame = tf; 732 if (td->td_ucred != td->td_proc->p_ucred) 733 cred_update_thread(td); 734 if (td->td_proc->p_flag & P_SA) 735 thread_user_enter(td); 736 } 737 fault_pc = tf->tf_pc; 738 if (td->td_md.md_spinlock_count == 0) { 739 if (__predict_true(tf->tf_spsr & I32_bit) == 0) 740 enable_interrupts(I32_bit); 741 if (__predict_true(tf->tf_spsr & F32_bit) == 0) 742 enable_interrupts(F32_bit); 743 } 744 745 746 747 /* See if the cpu state needs to be fixed up */ 748 switch (prefetch_abort_fixup(tf, &ksig)) { 749 case ABORT_FIXUP_RETURN: 750 return; 751 case ABORT_FIXUP_FAILED: 752 /* Deliver a SIGILL to the process */ 753 ksig.signb = SIGILL; 754 ksig.code = 0; 755 td->td_frame = tf; 756 goto do_trapsignal; 757 default: 758 break; 759 } 760 761 /* Prefetch aborts cannot happen in kernel mode */ 762 if (__predict_false(!TRAP_USERMODE(tf))) 763 dab_fatal(tf, 0, tf->tf_pc, NULL, &ksig); 764 td->td_pticks = 0; 765 766 767 /* Ok validate the address, can only execute in USER space */ 768 if (__predict_false(fault_pc >= VM_MAXUSER_ADDRESS || 769 (fault_pc < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW))) { 770 ksig.signb = SIGSEGV; 771 ksig.code = 0; 772 goto do_trapsignal; 773 } 774 775 map = &td->td_proc->p_vmspace->vm_map; 776 va = trunc_page(fault_pc); 777 778 /* 779 * See if the pmap can handle this fault on its own... 780 */ 781#ifdef DEBUG 782 last_fault_code = -1; 783#endif 784 if (pmap_fault_fixup(map->pmap, va, VM_PROT_READ, 1)) 785 goto out; 786 787 if (map != kernel_map) { 788 PROC_LOCK(p); 789 p->p_lock++; 790 PROC_UNLOCK(p); 791 } 792 793 error = vm_fault(map, va, VM_PROT_READ | VM_PROT_EXECUTE, 794 VM_FAULT_NORMAL); 795 if (map != kernel_map) { 796 PROC_LOCK(p); 797 p->p_lock--; 798 PROC_UNLOCK(p); 799 } 800 801 if (__predict_true(error == 0)) 802 goto out; 803 804 if (error == ENOMEM) { 805 printf("VM: pid %d (%s), uid %d killed: " 806 "out of swap\n", td->td_proc->p_pid, td->td_proc->p_comm, 807 (td->td_proc->p_ucred) ? 808 td->td_proc->p_ucred->cr_uid : -1); 809 ksig.signb = SIGKILL; 810 } else { 811 ksig.signb = SIGSEGV; 812 } 813 ksig.code = 0; 814 815do_trapsignal: 816 call_trapsignal(td, ksig.signb, ksig.code); 817 818out: 819 userret(td, tf); 820 821} 822 823extern int badaddr_read_1(const uint8_t *, uint8_t *); 824extern int badaddr_read_2(const uint16_t *, uint16_t *); 825extern int badaddr_read_4(const uint32_t *, uint32_t *); 826/* 827 * Tentatively read an 8, 16, or 32-bit value from 'addr'. 828 * If the read succeeds, the value is written to 'rptr' and zero is returned. 829 * Else, return EFAULT. 830 */ 831int 832badaddr_read(void *addr, size_t size, void *rptr) 833{ 834 union { 835 uint8_t v1; 836 uint16_t v2; 837 uint32_t v4; 838 } u; 839 int rv; 840 841 cpu_drain_writebuf(); 842 843 /* Read from the test address. */ 844 switch (size) { 845 case sizeof(uint8_t): 846 rv = badaddr_read_1(addr, &u.v1); 847 if (rv == 0 && rptr) 848 *(uint8_t *) rptr = u.v1; 849 break; 850 851 case sizeof(uint16_t): 852 rv = badaddr_read_2(addr, &u.v2); 853 if (rv == 0 && rptr) 854 *(uint16_t *) rptr = u.v2; 855 break; 856 857 case sizeof(uint32_t): 858 rv = badaddr_read_4(addr, &u.v4); 859 if (rv == 0 && rptr) 860 *(uint32_t *) rptr = u.v4; 861 break; 862 863 default: 864 panic("badaddr: invalid size (%lu)", (u_long) size); 865 } 866 867 /* Return EFAULT if the address was invalid, else zero */ 868 return (rv); 869} 870 871#define MAXARGS 8 872static void 873syscall(struct thread *td, trapframe_t *frame, u_int32_t insn) 874{ 875 struct proc *p = td->td_proc; 876 int code, error; 877 u_int nap, nargs; 878 register_t *ap, *args, copyargs[MAXARGS]; 879 struct sysent *callp; 880 int locked = 0; 881 882 PCPU_LAZY_INC(cnt.v_syscall); 883 td->td_pticks = 0; 884 if (td->td_ucred != td->td_proc->p_ucred) 885 cred_update_thread(td); 886 switch (insn & SWI_OS_MASK) { 887 case 0: /* XXX: we need our own one. */ 888 nap = 4; 889 break; 890 default: 891 call_trapsignal(td, SIGILL, 0); 892 userret(td, frame); 893 return; 894 } 895 code = insn & 0x000fffff; 896 td->td_pticks = 0; 897 ap = &frame->tf_r0; 898 if (code == SYS_syscall) { 899 code = *ap++; 900 901 nap--; 902 } else if (code == SYS___syscall) { 903 code = *ap++; 904 nap -= 2; 905 ap++; 906 } 907 if (p->p_sysent->sv_mask) 908 code &= p->p_sysent->sv_mask; 909 if (code >= p->p_sysent->sv_size) 910 callp = &p->p_sysent->sv_table[0]; 911 else 912 callp = &p->p_sysent->sv_table[code]; 913 nargs = callp->sy_narg & SYF_ARGMASK; 914 memcpy(copyargs, ap, nap * sizeof(register_t)); 915 if (nargs > nap) { 916 error = copyin((void *)frame->tf_usr_sp, copyargs + nap, 917 (nargs - nap) * sizeof(register_t)); 918 if (error) 919 goto bad; 920 } 921 args = copyargs; 922 error = 0; 923#ifdef KTRACE 924 if (KTRPOINT(td, KTR_SYSCALL)) 925 ktrsyscall(code, nargs, args); 926#endif 927 928 CTR4(KTR_SYSC, "syscall enter thread %p pid %d proc %s code %d", td, 929 td->td_proc->p_pid, td->td_proc->p_comm, code); 930 if ((callp->sy_narg & SYF_MPSAFE) == 0) 931 mtx_lock(&Giant); 932 locked = 1; 933 if (error == 0) { 934 td->td_retval[0] = 0; 935 td->td_retval[1] = 0; 936 STOPEVENT(p, S_SCE, (callp->sy_narg & SYF_ARGMASK)); 937 PTRACESTOP_SC(p, td, S_PT_SCE); 938 AUDIT_SYSCALL_ENTER(code, td); 939 error = (*callp->sy_call)(td, args); 940 AUDIT_SYSCALL_EXIT(error, td); 941 KASSERT(td->td_ar == NULL, 942 ("returning from syscall with td_ar set!")); 943 } 944 switch (error) { 945 case 0: 946#ifdef __ARMEB__ 947 if ((insn & 0x000fffff) && 948 (code != SYS_lseek)) { 949 /* 950 * 64-bit return, 32-bit syscall. Fixup byte order 951 */ 952 frame->tf_r0 = 0; 953 frame->tf_r1 = td->td_retval[0]; 954 } else { 955 frame->tf_r0 = td->td_retval[0]; 956 frame->tf_r1 = td->td_retval[1]; 957 } 958#else 959 frame->tf_r0 = td->td_retval[0]; 960 frame->tf_r1 = td->td_retval[1]; 961#endif 962 frame->tf_spsr &= ~PSR_C_bit; /* carry bit */ 963 break; 964 965 case ERESTART: 966 /* 967 * Reconstruct the pc to point at the swi. 968 */ 969 frame->tf_pc -= INSN_SIZE; 970 break; 971 case EJUSTRETURN: 972 /* nothing to do */ 973 break; 974 default: 975bad: 976 frame->tf_r0 = error; 977 frame->tf_spsr |= PSR_C_bit; /* carry bit */ 978 break; 979 } 980 if (locked && (callp->sy_narg & SYF_MPSAFE) == 0) 981 mtx_unlock(&Giant); 982 983 984 userret(td, frame); 985 CTR4(KTR_SYSC, "syscall exit thread %p pid %d proc %s code %d", td, 986 td->td_proc->p_pid, td->td_proc->p_comm, code); 987 988 STOPEVENT(p, S_SCX, code); 989 PTRACESTOP_SC(p, td, S_PT_SCX); 990#ifdef KTRACE 991 if (KTRPOINT(td, KTR_SYSRET)) 992 ktrsysret(code, error, td->td_retval[0]); 993#endif 994 mtx_assert(&sched_lock, MA_NOTOWNED); 995 mtx_assert(&Giant, MA_NOTOWNED); 996} 997 998void 999swi_handler(trapframe_t *frame) 1000{ 1001 struct thread *td = curthread; 1002 uint32_t insn; 1003 1004 td->td_frame = frame; 1005 1006 td->td_pticks = 0; 1007 if (td->td_proc->p_flag & P_SA) 1008 thread_user_enter(td); 1009 /* 1010 * Make sure the program counter is correctly aligned so we 1011 * don't take an alignment fault trying to read the opcode. 1012 */ 1013 if (__predict_false(((frame->tf_pc - INSN_SIZE) & 3) != 0)) { 1014 call_trapsignal(td, SIGILL, 0); 1015 userret(td, frame); 1016 return; 1017 } 1018 insn = *(u_int32_t *)(frame->tf_pc - INSN_SIZE); 1019 /* 1020 * Enable interrupts if they were enabled before the exception. 1021 * Since all syscalls *should* come from user mode it will always 1022 * be safe to enable them, but check anyway. 1023 */ 1024 if (td->td_md.md_spinlock_count == 0) { 1025 if (__predict_true(frame->tf_spsr & I32_bit) == 0) 1026 enable_interrupts(I32_bit); 1027 if (__predict_true(frame->tf_spsr & F32_bit) == 0) 1028 enable_interrupts(F32_bit); 1029 } 1030 1031 syscall(td, frame, insn); 1032} 1033 1034