trap.c revision 1.71
1/* $OpenBSD: trap.c,v 1.71 2010/12/20 21:32:06 miod Exp $ */ 2/* 3 * Copyright (c) 2004, Miodrag Vallat. 4 * Copyright (c) 1998 Steve Murphree, Jr. 5 * Copyright (c) 1996 Nivas Madhur 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Nivas Madhur. 19 * 4. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34/* 35 * Mach Operating System 36 * Copyright (c) 1991 Carnegie Mellon University 37 * Copyright (c) 1991 OMRON Corporation 38 * All Rights Reserved. 39 * 40 * Permission to use, copy, modify and distribute this software and its 41 * documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 */ 47 48#include <sys/types.h> 49#include <sys/param.h> 50#include <sys/proc.h> 51#include <sys/signalvar.h> 52#include <sys/user.h> 53#include <sys/syscall.h> 54#include <sys/systm.h> 55#include <sys/ktrace.h> 56 57#include "systrace.h" 58#include <dev/systrace.h> 59 60#include <uvm/uvm_extern.h> 61 62#include <machine/asm_macro.h> 63#include <machine/cmmu.h> 64#include <machine/cpu.h> 65#ifdef M88100 66#include <machine/m88100.h> 67#include <machine/m8820x.h> 68#endif 69#ifdef M88110 70#include <machine/m88110.h> 71#endif 72#include <machine/fpu.h> 73#include <machine/pcb.h> 74#include <machine/psl.h> 75#include <machine/trap.h> 76 77#include <machine/db_machdep.h> 78 79#define SSBREAKPOINT (0xF000D1F8U) /* Single Step Breakpoint */ 80 81#define USERMODE(PSR) (((PSR) & PSR_MODE) == 0) 82#define SYSTEMMODE(PSR) (((PSR) & PSR_MODE) != 0) 83 84__dead void panictrap(int, struct trapframe *); 85__dead void error_fatal(struct trapframe *); 86int double_reg_fixup(struct trapframe *); 87int ss_put_value(struct proc *, vaddr_t, u_int); 88 89extern void regdump(struct trapframe *f); 90 91const char *trap_type[] = { 92 "Reset", 93 "Interrupt Exception", 94 "Instruction Access", 95 "Data Access Exception", 96 "Misaligned Access", 97 "Unimplemented Opcode", 98 "Privilege Violation" 99 "Bounds Check Violation", 100 "Illegal Integer Divide", 101 "Integer Overflow", 102 "Error Exception", 103 "Non-Maskable Exception", 104}; 105 106const int trap_types = sizeof trap_type / sizeof trap_type[0]; 107 108#ifdef M88100 109const char *pbus_exception_type[] = { 110 "Success (No Fault)", 111 "unknown 1", 112 "unknown 2", 113 "Bus Error", 114 "Segment Fault", 115 "Page Fault", 116 "Supervisor Violation", 117 "Write Violation", 118}; 119#endif 120 121static inline void 122userret(struct proc *p) 123{ 124 int sig; 125 126 /* take pending signals */ 127 while ((sig = CURSIG(p)) != 0) 128 postsig(sig); 129 130 curcpu()->ci_schedstate.spc_curpriority = p->p_priority = p->p_usrpri; 131} 132 133__dead void 134panictrap(int type, struct trapframe *frame) 135{ 136 static int panicing = 0; 137 138 if (panicing++ == 0) { 139#ifdef M88100 140 if (CPU_IS88100) { 141 if (type == 2) { 142 /* instruction exception */ 143 printf("\nInstr access fault (%s) v = %x, " 144 "frame %p\n", 145 pbus_exception_type[ 146 CMMU_PFSR_FAULT(frame->tf_ipfsr)], 147 frame->tf_sxip & XIP_ADDR, frame); 148 } else if (type == 3) { 149 /* data access exception */ 150 printf("\nData access fault (%s) v = %x, " 151 "frame %p\n", 152 pbus_exception_type[ 153 CMMU_PFSR_FAULT(frame->tf_dpfsr)], 154 frame->tf_sxip & XIP_ADDR, frame); 155 } else 156 printf("\nTrap type %d, v = %x, frame %p\n", 157 type, frame->tf_sxip & XIP_ADDR, frame); 158 } 159#endif 160#ifdef M88110 161 if (CPU_IS88110) { 162 printf("\nTrap type %d, v = %x, frame %p\n", 163 type, frame->tf_exip, frame); 164 } 165#endif 166#ifdef DDB 167 regdump(frame); 168#endif 169 } 170 if ((u_int)type < trap_types) 171 panic(trap_type[type]); 172 else 173 panic("trap %d", type); 174 /*NOTREACHED*/ 175} 176 177/* 178 * Handle external interrupts. 179 */ 180void 181interrupt(struct trapframe *frame) 182{ 183 struct cpu_info *ci = curcpu(); 184 185 ci->ci_intrdepth++; 186 md_interrupt_func(frame); 187 ci->ci_intrdepth--; 188} 189 190#ifdef M88110 191/* 192 * Handle non-maskable interrupts. 193 */ 194int 195nmi(struct trapframe *frame) 196{ 197 return md_nmi_func(frame); 198} 199 200/* 201 * Reenable non-maskable interrupts. 202 */ 203void 204nmi_wrapup(struct trapframe *frame) 205{ 206 md_nmi_wrapup_func(frame); 207} 208#endif 209 210/* 211 * Handle asynchronous software traps. 212 */ 213void 214ast(struct trapframe *frame) 215{ 216 struct cpu_info *ci = curcpu(); 217 struct proc *p = ci->ci_curproc; 218 219 uvmexp.softs++; 220 p->p_md.md_astpending = 0; 221 if (p->p_flag & P_OWEUPC) { 222 KERNEL_PROC_LOCK(p); 223 ADDUPROF(p); 224 KERNEL_PROC_UNLOCK(p); 225 } 226 if (ci->ci_want_resched) 227 preempt(NULL); 228 229 userret(p); 230} 231 232#ifdef M88100 233void 234m88100_trap(u_int type, struct trapframe *frame) 235{ 236 struct proc *p; 237 struct vm_map *map; 238 vaddr_t va, pcb_onfault; 239 vm_prot_t ftype; 240 int fault_type, pbus_type; 241 u_long fault_code; 242 vaddr_t fault_addr; 243 struct vmspace *vm; 244 union sigval sv; 245 int result; 246#ifdef DDB 247 int s; 248 u_int psr; 249#endif 250 int sig = 0; 251 252 uvmexp.traps++; 253 if ((p = curproc) == NULL) 254 p = &proc0; 255 256 if (USERMODE(frame->tf_epsr)) { 257 type += T_USER; 258 p->p_md.md_tf = frame; /* for ptrace/signals */ 259 } 260 fault_type = SI_NOINFO; 261 fault_code = 0; 262 fault_addr = frame->tf_sxip & XIP_ADDR; 263 264 switch (type) { 265 default: 266 panictrap(frame->tf_vector, frame); 267 break; 268 /*NOTREACHED*/ 269 270#if defined(DDB) 271 case T_KDB_BREAK: 272 s = splhigh(); 273 set_psr((psr = get_psr()) & ~PSR_IND); 274 ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame); 275 set_psr(psr); 276 splx(s); 277 return; 278 case T_KDB_ENTRY: 279 s = splhigh(); 280 set_psr((psr = get_psr()) & ~PSR_IND); 281 ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame); 282 set_psr(psr); 283 splx(s); 284 return; 285#endif /* DDB */ 286 case T_ILLFLT: 287 printf("Unimplemented opcode!\n"); 288 panictrap(frame->tf_vector, frame); 289 break; 290 291 case T_MISALGNFLT: 292 printf("kernel misaligned access exception @ 0x%08x\n", 293 frame->tf_sxip); 294 panictrap(frame->tf_vector, frame); 295 break; 296 297 case T_INSTFLT: 298 /* kernel mode instruction access fault. 299 * Should never, never happen for a non-paged kernel. 300 */ 301#ifdef TRAPDEBUG 302 pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr); 303 printf("Kernel Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n", 304 pbus_type, pbus_exception_type[pbus_type], 305 fault_addr, frame, frame->tf_cpu); 306#endif 307 panictrap(frame->tf_vector, frame); 308 break; 309 310 case T_DATAFLT: 311 /* kernel mode data fault */ 312 313 /* data fault on the user address? */ 314 if ((frame->tf_dmt0 & DMT_DAS) == 0) { 315 KERNEL_LOCK(); 316 goto user_fault; 317 } 318 319 fault_addr = frame->tf_dma0; 320 if (frame->tf_dmt0 & (DMT_WRITE|DMT_LOCKBAR)) { 321 ftype = VM_PROT_READ|VM_PROT_WRITE; 322 fault_code = VM_PROT_WRITE; 323 } else { 324 ftype = VM_PROT_READ; 325 fault_code = VM_PROT_READ; 326 } 327 328 va = trunc_page((vaddr_t)fault_addr); 329 if (va == 0) { 330 panic("trap: bad kernel access at %x", fault_addr); 331 } 332 333 KERNEL_LOCK(); 334 vm = p->p_vmspace; 335 map = kernel_map; 336 337 pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr); 338#ifdef TRAPDEBUG 339 printf("Kernel Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n", 340 pbus_type, pbus_exception_type[pbus_type], 341 fault_addr, frame, frame->tf_cpu); 342#endif 343 344 switch (pbus_type) { 345 case CMMU_PFSR_SUCCESS: 346 /* 347 * The fault was resolved. Call data_access_emulation 348 * to drain the data unit pipe line and reset dmt0 349 * so that trap won't get called again. 350 */ 351 data_access_emulation((u_int *)frame); 352 frame->tf_dpfsr = 0; 353 frame->tf_dmt0 = 0; 354 KERNEL_UNLOCK(); 355 return; 356 case CMMU_PFSR_SFAULT: 357 case CMMU_PFSR_PFAULT: 358 if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) 359 p->p_addr->u_pcb.pcb_onfault = 0; 360 result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); 361 p->p_addr->u_pcb.pcb_onfault = pcb_onfault; 362 if (result == 0) { 363 /* 364 * We could resolve the fault. Call 365 * data_access_emulation to drain the data 366 * unit pipe line and reset dmt0 so that trap 367 * won't get called again. 368 */ 369 data_access_emulation((u_int *)frame); 370 frame->tf_dpfsr = 0; 371 frame->tf_dmt0 = 0; 372 KERNEL_UNLOCK(); 373 return; 374 } 375 break; 376 } 377#ifdef TRAPDEBUG 378 printf("PBUS Fault %d (%s) va = 0x%x\n", pbus_type, 379 pbus_exception_type[pbus_type], va); 380#endif 381 KERNEL_UNLOCK(); 382 panictrap(frame->tf_vector, frame); 383 /* NOTREACHED */ 384 case T_INSTFLT+T_USER: 385 /* User mode instruction access fault */ 386 /* FALLTHROUGH */ 387 case T_DATAFLT+T_USER: 388 KERNEL_PROC_LOCK(p); 389user_fault: 390 if (type == T_INSTFLT + T_USER) { 391 pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr); 392#ifdef TRAPDEBUG 393 printf("User Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n", 394 pbus_type, pbus_exception_type[pbus_type], 395 fault_addr, frame, frame->tf_cpu); 396#endif 397 } else { 398 fault_addr = frame->tf_dma0; 399 pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr); 400#ifdef TRAPDEBUG 401 printf("User Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n", 402 pbus_type, pbus_exception_type[pbus_type], 403 fault_addr, frame, frame->tf_cpu); 404#endif 405 } 406 407 if (frame->tf_dmt0 & (DMT_WRITE | DMT_LOCKBAR)) { 408 ftype = VM_PROT_READ | VM_PROT_WRITE; 409 fault_code = VM_PROT_WRITE; 410 } else { 411 ftype = VM_PROT_READ; 412 fault_code = VM_PROT_READ; 413 } 414 415 va = trunc_page((vaddr_t)fault_addr); 416 417 vm = p->p_vmspace; 418 map = &vm->vm_map; 419 if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) 420 p->p_addr->u_pcb.pcb_onfault = 0; 421 422 /* Call uvm_fault() to resolve non-bus error faults */ 423 switch (pbus_type) { 424 case CMMU_PFSR_SUCCESS: 425 result = 0; 426 break; 427 case CMMU_PFSR_BERROR: 428 result = EACCES; 429 break; 430 default: 431 result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); 432 break; 433 } 434 435 p->p_addr->u_pcb.pcb_onfault = pcb_onfault; 436 437 if ((caddr_t)va >= vm->vm_maxsaddr) { 438 if (result == 0) 439 uvm_grow(p, va); 440 else if (result == EACCES) 441 result = EFAULT; 442 } 443 444 /* 445 * This could be a fault caused in copyin*() 446 * while accessing user space. 447 */ 448 if (result != 0 && pcb_onfault != 0) { 449 frame->tf_snip = pcb_onfault | NIP_V; 450 frame->tf_sfip = (pcb_onfault + 4) | FIP_V; 451 frame->tf_sxip = 0; 452 /* 453 * Continue as if the fault had been resolved, but 454 * do not try to complete the faulting access. 455 */ 456 frame->tf_dmt0 |= DMT_SKIP; 457 result = 0; 458 } 459 460 if (result == 0) { 461 if (type == T_INSTFLT + T_USER) { 462 /* 463 * back up SXIP, SNIP, 464 * clearing the Error bit 465 */ 466 frame->tf_sfip = frame->tf_snip & ~FIP_E; 467 frame->tf_snip = frame->tf_sxip & ~NIP_E; 468 frame->tf_ipfsr = 0; 469 } else { 470 /* 471 * We could resolve the fault. Call 472 * data_access_emulation to drain the data unit 473 * pipe line and reset dmt0 so that trap won't 474 * get called again. 475 */ 476 data_access_emulation((u_int *)frame); 477 frame->tf_dpfsr = 0; 478 frame->tf_dmt0 = 0; 479 } 480 } else { 481 sig = result == EACCES ? SIGBUS : SIGSEGV; 482 fault_type = result == EACCES ? 483 BUS_ADRERR : SEGV_MAPERR; 484 } 485 if (type == T_DATAFLT) 486 KERNEL_UNLOCK(); 487 else 488 KERNEL_PROC_UNLOCK(p); 489 break; 490 case T_MISALGNFLT+T_USER: 491 /* Fix any misaligned ld.d or st.d instructions */ 492 sig = double_reg_fixup(frame); 493 fault_type = BUS_ADRALN; 494 break; 495 case T_PRIVINFLT+T_USER: 496 case T_ILLFLT+T_USER: 497#ifndef DDB 498 case T_KDB_BREAK: 499 case T_KDB_ENTRY: 500#endif 501 case T_KDB_BREAK+T_USER: 502 case T_KDB_ENTRY+T_USER: 503 case T_KDB_TRACE: 504 case T_KDB_TRACE+T_USER: 505 sig = SIGILL; 506 break; 507 case T_BNDFLT+T_USER: 508 sig = SIGFPE; 509 break; 510 case T_ZERODIV+T_USER: 511 sig = SIGFPE; 512 fault_type = FPE_INTDIV; 513 break; 514 case T_OVFFLT+T_USER: 515 sig = SIGFPE; 516 fault_type = FPE_INTOVF; 517 break; 518 case T_FPEPFLT+T_USER: 519 sig = SIGFPE; 520 break; 521 case T_SIGSYS+T_USER: 522 sig = SIGSYS; 523 break; 524 case T_STEPBPT+T_USER: 525#ifdef PTRACE 526 /* 527 * This trap is used by the kernel to support single-step 528 * debugging (although any user could generate this trap 529 * which should probably be handled differently). When a 530 * process is continued by a debugger with the PT_STEP 531 * function of ptrace (single step), the kernel inserts 532 * one or two breakpoints in the user process so that only 533 * one instruction (or two in the case of a delayed branch) 534 * is executed. When this breakpoint is hit, we get the 535 * T_STEPBPT trap. 536 */ 537 { 538 u_int instr; 539 vaddr_t pc = PC_REGS(&frame->tf_regs); 540 541 /* read break instruction */ 542 copyin((caddr_t)pc, &instr, sizeof(u_int)); 543 544 /* check and see if we got here by accident */ 545 if ((p->p_md.md_bp0va != pc && 546 p->p_md.md_bp1va != pc) || 547 instr != SSBREAKPOINT) { 548 sig = SIGTRAP; 549 fault_type = TRAP_TRACE; 550 break; 551 } 552 553 /* restore original instruction and clear breakpoint */ 554 if (p->p_md.md_bp0va == pc) { 555 ss_put_value(p, pc, p->p_md.md_bp0save); 556 p->p_md.md_bp0va = 0; 557 } 558 if (p->p_md.md_bp1va == pc) { 559 ss_put_value(p, pc, p->p_md.md_bp1save); 560 p->p_md.md_bp1va = 0; 561 } 562 563#if 1 564 frame->tf_sfip = frame->tf_snip; 565 frame->tf_snip = pc | NIP_V; 566#endif 567 sig = SIGTRAP; 568 fault_type = TRAP_BRKPT; 569 } 570#else 571 sig = SIGTRAP; 572 fault_type = TRAP_TRACE; 573#endif 574 break; 575 576 case T_USERBPT+T_USER: 577 /* 578 * This trap is meant to be used by debuggers to implement 579 * breakpoint debugging. When we get this trap, we just 580 * return a signal which gets caught by the debugger. 581 */ 582 frame->tf_sfip = frame->tf_snip; 583 frame->tf_snip = frame->tf_sxip; 584 sig = SIGTRAP; 585 fault_type = TRAP_BRKPT; 586 break; 587 588 } 589 590 /* 591 * If trap from supervisor mode, just return 592 */ 593 if (type < T_USER) 594 return; 595 596 if (sig) { 597 sv.sival_ptr = (void *)fault_addr; 598 KERNEL_PROC_LOCK(p); 599 trapsignal(p, sig, fault_code, fault_type, sv); 600 KERNEL_PROC_UNLOCK(p); 601 /* 602 * don't want multiple faults - we are going to 603 * deliver signal. 604 */ 605 frame->tf_dmt0 = 0; 606 frame->tf_ipfsr = frame->tf_dpfsr = 0; 607 } 608 609 userret(p); 610} 611#endif /* M88100 */ 612 613#ifdef M88110 614void 615m88110_trap(u_int type, struct trapframe *frame) 616{ 617 struct proc *p; 618 struct vm_map *map; 619 vaddr_t va, pcb_onfault; 620 vm_prot_t ftype; 621 int fault_type; 622 u_long fault_code; 623 vaddr_t fault_addr; 624 struct vmspace *vm; 625 union sigval sv; 626 int result; 627#ifdef DDB 628 int s; 629 u_int psr; 630#endif 631 int sig = 0; 632 633 uvmexp.traps++; 634 if ((p = curproc) == NULL) 635 p = &proc0; 636 637 fault_type = SI_NOINFO; 638 fault_code = 0; 639 fault_addr = frame->tf_exip & XIP_ADDR; 640 641 /* 642 * 88110 errata #16 (4.2) or #3 (5.1.1): 643 * ``bsr, br, bcnd, jsr and jmp instructions with the .n extension 644 * can cause the enip value to be incremented by 4 incorrectly 645 * if the instruction in the delay slot is the first word of a 646 * page which misses in the mmu and results in a hardware 647 * tablewalk which encounters an exception or an invalid 648 * descriptor. The exip value in this case will point to the 649 * first word of the page, and the D bit will be set. 650 * 651 * Note: if the instruction is a jsr.n r1, r1 will be overwritten 652 * with erroneous data. Therefore, no recovery is possible. Do 653 * not allow this instruction to occupy the last word of a page. 654 * 655 * Suggested fix: recover in general by backing up the exip by 4 656 * and clearing the delay bit before an rte when the lower 3 hex 657 * digits of the exip are 001.'' 658 */ 659 if ((frame->tf_exip & PAGE_MASK) == 0x00000001 && type == T_INSTFLT) { 660 u_int instr; 661 662 /* 663 * Note that we have initialized fault_addr above, so that 664 * signals provide the correct address if necessary. 665 */ 666 frame->tf_exip = (frame->tf_exip & ~1) - 4; 667 668 /* 669 * Check the instruction at the (backed up) exip. 670 * If it is a jsr.n, abort. 671 */ 672 if (!USERMODE(frame->tf_epsr)) { 673 instr = *(u_int *)frame->tf_exip; 674 if (instr == 0xf400cc01) 675 panic("mc88110 errata #16, exip %p enip %p", 676 (frame->tf_exip + 4) | 1, frame->tf_enip); 677 } else { 678 /* copyin here should not fail */ 679 if (copyin((const void *)frame->tf_exip, &instr, 680 sizeof instr) == 0 && 681 instr == 0xf400cc01) { 682 uprintf("mc88110 errata #16, exip %p enip %p", 683 (frame->tf_exip + 4) | 1, frame->tf_enip); 684 sig = SIGILL; 685 } 686 } 687 } 688 689 if (USERMODE(frame->tf_epsr)) { 690 type += T_USER; 691 p->p_md.md_tf = frame; /* for ptrace/signals */ 692 } 693 694 if (sig != 0) 695 goto deliver; 696 697 switch (type) { 698 default: 699lose: 700 panictrap(frame->tf_vector, frame); 701 break; 702 /*NOTREACHED*/ 703 704#ifdef DEBUG 705 case T_110_DRM+T_USER: 706 case T_110_DRM: 707 printf("DMMU read miss: Hardware Table Searches should be enabled!\n"); 708 goto lose; 709 case T_110_DWM+T_USER: 710 case T_110_DWM: 711 printf("DMMU write miss: Hardware Table Searches should be enabled!\n"); 712 goto lose; 713 case T_110_IAM+T_USER: 714 case T_110_IAM: 715 printf("IMMU miss: Hardware Table Searches should be enabled!\n"); 716 goto lose; 717#endif 718 719#ifdef DDB 720 case T_KDB_TRACE: 721 s = splhigh(); 722 set_psr((psr = get_psr()) & ~PSR_IND); 723 ddb_break_trap(T_KDB_TRACE, (db_regs_t*)frame); 724 set_psr(psr); 725 splx(s); 726 return; 727 case T_KDB_BREAK: 728 s = splhigh(); 729 set_psr((psr = get_psr()) & ~PSR_IND); 730 ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame); 731 set_psr(psr); 732 splx(s); 733 return; 734 case T_KDB_ENTRY: 735 s = splhigh(); 736 set_psr((psr = get_psr()) & ~PSR_IND); 737 ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame); 738 set_psr(psr); 739 /* skip trap instruction */ 740 m88110_skip_insn(frame); 741 splx(s); 742 return; 743#endif /* DDB */ 744 case T_ILLFLT: 745 /* 746 * The 88110 seems to trigger an instruction fault in 747 * supervisor mode when running the following sequence: 748 * 749 * bcnd.n cond, reg, 1f 750 * arithmetic insn 751 * ... 752 * the same exact arithmetic insn 753 * 1: another arithmetic insn stalled by the previous one 754 * ... 755 * 756 * The exception is reported with exip pointing to the 757 * branch address. I don't know, at this point, if there 758 * is any better workaround than the aggressive one 759 * implemented below; I don't see how this could relate to 760 * any of the 88110 errata (although it might be related to 761 * branch prediction). 762 * 763 * For the record, the exact sequence triggering the 764 * spurious exception is: 765 * 766 * bcnd.n eq0, r2, 1f 767 * or r25, r0, r22 768 * bsr somewhere 769 * or r25, r0, r22 770 * 1: cmp r13, r25, r20 771 * 772 * within the same cache line. 773 * 774 * Simply ignoring the exception and returning does not 775 * cause the exception to disappear. Clearing the 776 * instruction cache works, but on 88110+88410 systems, 777 * the 88410 needs to be invalidated as well. (note that 778 * the size passed to the flush routines does not matter 779 * since there is no way to flush a subset of the 88110 780 * I$ anyway) 781 */ 782 { 783 extern void *kernel_text, *etext; 784 785 if (fault_addr >= (vaddr_t)&kernel_text && 786 fault_addr < (vaddr_t)&etext) { 787 cmmu_flush_inst_cache(curcpu()->ci_cpuid, 788 trunc_page(fault_addr), PAGE_SIZE); 789 cmmu_flush_cache(curcpu()->ci_cpuid, 790 trunc_page(fault_addr), PAGE_SIZE); 791 return; 792 } 793 } 794 goto lose; 795 case T_MISALGNFLT: 796 printf("kernel mode misaligned access exception @ 0x%08x\n", 797 frame->tf_exip); 798 goto lose; 799 800 case T_INSTFLT: 801 /* kernel mode instruction access fault. 802 * Should never, never happen for a non-paged kernel. 803 */ 804#ifdef TRAPDEBUG 805 printf("Kernel Instruction fault exip %x isr %x ilar %x\n", 806 frame->tf_exip, frame->tf_isr, frame->tf_ilar); 807#endif 808 goto lose; 809 810 case T_DATAFLT: 811 /* kernel mode data fault */ 812 813 /* data fault on the user address? */ 814 if ((frame->tf_dsr & CMMU_DSR_SU) == 0) { 815 KERNEL_LOCK(); 816 goto m88110_user_fault; 817 } 818 819#ifdef TRAPDEBUG 820 printf("Kernel Data access fault exip %x dsr %x dlar %x\n", 821 frame->tf_exip, frame->tf_dsr, frame->tf_dlar); 822#endif 823 824 fault_addr = frame->tf_dlar; 825 if (frame->tf_dsr & CMMU_DSR_RW) { 826 ftype = VM_PROT_READ; 827 fault_code = VM_PROT_READ; 828 } else { 829 ftype = VM_PROT_READ|VM_PROT_WRITE; 830 fault_code = VM_PROT_WRITE; 831 } 832 833 va = trunc_page((vaddr_t)fault_addr); 834 if (va == 0) { 835 panic("trap: bad kernel access at %x", fault_addr); 836 } 837 838 KERNEL_LOCK(); 839 vm = p->p_vmspace; 840 map = kernel_map; 841 842 if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) { 843 /* 844 * On a segment or a page fault, call uvm_fault() to 845 * resolve the fault. 846 */ 847 if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) 848 p->p_addr->u_pcb.pcb_onfault = 0; 849 result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); 850 p->p_addr->u_pcb.pcb_onfault = pcb_onfault; 851 if (result == 0) { 852 KERNEL_UNLOCK(); 853 return; 854 } 855 } 856 KERNEL_UNLOCK(); 857 goto lose; 858 case T_INSTFLT+T_USER: 859 /* User mode instruction access fault */ 860 /* FALLTHROUGH */ 861 case T_DATAFLT+T_USER: 862 KERNEL_PROC_LOCK(p); 863m88110_user_fault: 864 if (type == T_INSTFLT+T_USER) { 865 ftype = VM_PROT_READ; 866 fault_code = VM_PROT_READ; 867#ifdef TRAPDEBUG 868 printf("User Instruction fault exip %x isr %x ilar %x\n", 869 frame->tf_exip, frame->tf_isr, frame->tf_ilar); 870#endif 871 } else { 872 fault_addr = frame->tf_dlar; 873 if (frame->tf_dsr & CMMU_DSR_RW) { 874 ftype = VM_PROT_READ; 875 fault_code = VM_PROT_READ; 876 } else { 877 ftype = VM_PROT_READ|VM_PROT_WRITE; 878 fault_code = VM_PROT_WRITE; 879 } 880#ifdef TRAPDEBUG 881 printf("User Data access fault exip %x dsr %x dlar %x\n", 882 frame->tf_exip, frame->tf_dsr, frame->tf_dlar); 883#endif 884 } 885 886 va = trunc_page((vaddr_t)fault_addr); 887 888 vm = p->p_vmspace; 889 map = &vm->vm_map; 890 if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) 891 p->p_addr->u_pcb.pcb_onfault = 0; 892 893 /* 894 * Call uvm_fault() to resolve non-bus error faults 895 * whenever possible. 896 */ 897 if (type == T_INSTFLT+T_USER) { 898 /* instruction faults */ 899 if (frame->tf_isr & 900 (CMMU_ISR_BE | CMMU_ISR_SP | CMMU_ISR_TBE)) { 901 /* bus error, supervisor protection */ 902 result = EACCES; 903 } else 904 if (frame->tf_isr & (CMMU_ISR_SI | CMMU_ISR_PI)) { 905 /* segment or page fault */ 906 result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); 907 p->p_addr->u_pcb.pcb_onfault = pcb_onfault; 908 } else { 909#ifdef TRAPDEBUG 910 printf("Unexpected Instruction fault isr %x\n", 911 frame->tf_isr); 912#endif 913 if (type == T_DATAFLT) 914 KERNEL_UNLOCK(); 915 else 916 KERNEL_PROC_UNLOCK(p); 917 panictrap(frame->tf_vector, frame); 918 } 919 } else { 920 /* data faults */ 921 if (frame->tf_dsr & CMMU_DSR_BE) { 922 /* bus error */ 923 result = EACCES; 924 } else 925 if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) { 926 /* segment or page fault */ 927 result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); 928 p->p_addr->u_pcb.pcb_onfault = pcb_onfault; 929 } else 930 if (frame->tf_dsr & (CMMU_DSR_CP | CMMU_DSR_WA)) { 931 /* copyback or write allocate error */ 932 result = EACCES; 933 } else 934 if (frame->tf_dsr & CMMU_DSR_WE) { 935 /* write fault */ 936 /* This could be a write protection fault or an 937 * exception to set the used and modified bits 938 * in the pte. Basically, if we got a write 939 * error, then we already have a pte entry that 940 * faulted in from a previous seg fault or page 941 * fault. 942 * Get the pte and check the status of the 943 * modified and valid bits to determine if this 944 * indeed a real write fault. XXX smurph 945 */ 946 if (pmap_set_modify(map->pmap, va)) { 947#ifdef TRAPDEBUG 948 printf("Corrected userland write fault, pmap %p va %p\n", 949 map->pmap, va); 950#endif 951 result = 0; 952 } else { 953 /* must be a real wp fault */ 954#ifdef TRAPDEBUG 955 printf("Uncorrected userland write fault, pmap %p va %p\n", 956 map->pmap, va); 957#endif 958 result = uvm_fault(map, va, VM_FAULT_INVALID, ftype); 959 p->p_addr->u_pcb.pcb_onfault = pcb_onfault; 960 } 961 } else { 962#ifdef TRAPDEBUG 963 printf("Unexpected Data access fault dsr %x\n", 964 frame->tf_dsr); 965#endif 966 if (type == T_DATAFLT) 967 KERNEL_UNLOCK(); 968 else 969 KERNEL_PROC_UNLOCK(p); 970 panictrap(frame->tf_vector, frame); 971 } 972 } 973 974 if ((caddr_t)va >= vm->vm_maxsaddr) { 975 if (result == 0) 976 uvm_grow(p, va); 977 else if (result == EACCES) 978 result = EFAULT; 979 } 980 if (type == T_DATAFLT) 981 KERNEL_UNLOCK(); 982 else 983 KERNEL_PROC_UNLOCK(p); 984 985 /* 986 * This could be a fault caused in copyin*() 987 * while accessing user space. 988 */ 989 if (result != 0 && pcb_onfault != 0) { 990 frame->tf_exip = pcb_onfault; 991 /* 992 * Continue as if the fault had been resolved. 993 */ 994 result = 0; 995 } 996 997 if (result != 0) { 998 sig = result == EACCES ? SIGBUS : SIGSEGV; 999 fault_type = result == EACCES ? 1000 BUS_ADRERR : SEGV_MAPERR; 1001 } 1002 break; 1003 case T_MISALGNFLT+T_USER: 1004 /* Fix any misaligned ld.d or st.d instructions */ 1005 sig = double_reg_fixup(frame); 1006 fault_type = BUS_ADRALN; 1007 if (sig == 0) { 1008 /* skip recovered instruction */ 1009 m88110_skip_insn(frame); 1010 goto userexit; 1011 } 1012 break; 1013 case T_PRIVINFLT+T_USER: 1014 fault_type = ILL_PRVREG; 1015 /* FALLTHROUGH */ 1016 case T_ILLFLT+T_USER: 1017#ifndef DDB 1018 case T_KDB_BREAK: 1019 case T_KDB_ENTRY: 1020 case T_KDB_TRACE: 1021#endif 1022 case T_KDB_BREAK+T_USER: 1023 case T_KDB_ENTRY+T_USER: 1024 case T_KDB_TRACE+T_USER: 1025 sig = SIGILL; 1026 break; 1027 case T_BNDFLT+T_USER: 1028 sig = SIGFPE; 1029 /* skip trap instruction */ 1030 m88110_skip_insn(frame); 1031 break; 1032 case T_ZERODIV+T_USER: 1033 sig = SIGFPE; 1034 fault_type = FPE_INTDIV; 1035 /* skip trap instruction */ 1036 m88110_skip_insn(frame); 1037 break; 1038 case T_OVFFLT+T_USER: 1039 sig = SIGFPE; 1040 fault_type = FPE_INTOVF; 1041 /* skip trap instruction */ 1042 m88110_skip_insn(frame); 1043 break; 1044 case T_FPEPFLT+T_USER: 1045 m88110_fpu_exception(frame); 1046 goto userexit; 1047 case T_SIGSYS+T_USER: 1048 sig = SIGSYS; 1049 break; 1050 case T_STEPBPT+T_USER: 1051#ifdef PTRACE 1052 /* 1053 * This trap is used by the kernel to support single-step 1054 * debugging (although any user could generate this trap 1055 * which should probably be handled differently). When a 1056 * process is continued by a debugger with the PT_STEP 1057 * function of ptrace (single step), the kernel inserts 1058 * one or two breakpoints in the user process so that only 1059 * one instruction (or two in the case of a delayed branch) 1060 * is executed. When this breakpoint is hit, we get the 1061 * T_STEPBPT trap. 1062 */ 1063 { 1064 u_int instr; 1065 vaddr_t pc = PC_REGS(&frame->tf_regs); 1066 1067 /* read break instruction */ 1068 copyin((caddr_t)pc, &instr, sizeof(u_int)); 1069 1070 /* check and see if we got here by accident */ 1071 if ((p->p_md.md_bp0va != pc && 1072 p->p_md.md_bp1va != pc) || 1073 instr != SSBREAKPOINT) { 1074 sig = SIGTRAP; 1075 fault_type = TRAP_TRACE; 1076 break; 1077 } 1078 1079 /* restore original instruction and clear breakpoint */ 1080 if (p->p_md.md_bp0va == pc) { 1081 ss_put_value(p, pc, p->p_md.md_bp0save); 1082 p->p_md.md_bp0va = 0; 1083 } 1084 if (p->p_md.md_bp1va == pc) { 1085 ss_put_value(p, pc, p->p_md.md_bp1save); 1086 p->p_md.md_bp1va = 0; 1087 } 1088 1089 sig = SIGTRAP; 1090 fault_type = TRAP_BRKPT; 1091 } 1092#else 1093 sig = SIGTRAP; 1094 fault_type = TRAP_TRACE; 1095#endif 1096 break; 1097 case T_USERBPT+T_USER: 1098 /* 1099 * This trap is meant to be used by debuggers to implement 1100 * breakpoint debugging. When we get this trap, we just 1101 * return a signal which gets caught by the debugger. 1102 */ 1103 sig = SIGTRAP; 1104 fault_type = TRAP_BRKPT; 1105 break; 1106 } 1107 1108 /* 1109 * If trap from supervisor mode, just return 1110 */ 1111 if (type < T_USER) 1112 return; 1113 1114 if (sig) { 1115deliver: 1116 sv.sival_ptr = (void *)fault_addr; 1117 KERNEL_PROC_LOCK(p); 1118 trapsignal(p, sig, fault_code, fault_type, sv); 1119 KERNEL_PROC_UNLOCK(p); 1120 } 1121 1122userexit: 1123 userret(p); 1124} 1125#endif /* M88110 */ 1126 1127__dead void 1128error_fatal(struct trapframe *frame) 1129{ 1130 if (frame->tf_vector == 0) 1131 printf("\nCPU %d Reset Exception\n", cpu_number()); 1132 else 1133 printf("\nCPU %d Error Exception\n", cpu_number()); 1134 1135#ifdef DDB 1136 regdump((struct trapframe*)frame); 1137#endif 1138 panic("unrecoverable exception %d", frame->tf_vector); 1139} 1140 1141#ifdef M88100 1142void 1143m88100_syscall(register_t code, struct trapframe *tf) 1144{ 1145 int i, nsys, nap; 1146 struct sysent *callp; 1147 struct proc *p = curproc; 1148 int error; 1149 register_t args[8], rval[2], *ap; 1150 int nolock; 1151 1152 uvmexp.syscalls++; 1153 1154 callp = p->p_emul->e_sysent; 1155 nsys = p->p_emul->e_nsysent; 1156 1157 p->p_md.md_tf = tf; 1158 1159 /* 1160 * For 88k, all the arguments are passed in the registers (r2-r9), 1161 * and further arguments (if any) on stack. 1162 * For syscall (and __syscall), r2 (and r3) has the actual code. 1163 * __syscall takes a quad syscall number, so that other 1164 * arguments are at their natural alignments. 1165 */ 1166 ap = &tf->tf_r[2]; 1167 nap = 8; /* r2-r9 */ 1168 1169 switch (code) { 1170 case SYS_syscall: 1171 code = *ap++; 1172 nap--; 1173 break; 1174 case SYS___syscall: 1175 if (callp != sysent) 1176 break; 1177 code = ap[_QUAD_LOWWORD]; 1178 ap += 2; 1179 nap -= 2; 1180 break; 1181 } 1182 1183 if (code < 0 || code >= nsys) 1184 callp += p->p_emul->e_nosys; 1185 else 1186 callp += code; 1187 1188 i = callp->sy_argsize / sizeof(register_t); 1189 if (i > sizeof(args) / sizeof(register_t)) 1190 panic("syscall nargs"); 1191 if (i > nap) { 1192 bcopy((caddr_t)ap, (caddr_t)args, nap * sizeof(register_t)); 1193 error = copyin((caddr_t)tf->tf_r[31], (caddr_t)(args + nap), 1194 (i - nap) * sizeof(register_t)); 1195 } else { 1196 bcopy((caddr_t)ap, (caddr_t)args, i * sizeof(register_t)); 1197 error = 0; 1198 } 1199 1200 if (error != 0) 1201 goto bad; 1202 1203#ifdef SYSCALL_DEBUG 1204 KERNEL_PROC_LOCK(p); 1205 scdebug_call(p, code, args); 1206 KERNEL_PROC_UNLOCK(p); 1207#endif 1208#ifdef KTRACE 1209 if (KTRPOINT(p, KTR_SYSCALL)) { 1210 KERNEL_PROC_LOCK(p); 1211 ktrsyscall(p, code, callp->sy_argsize, args); 1212 KERNEL_PROC_UNLOCK(p); 1213 } 1214#endif 1215 rval[0] = 0; 1216 rval[1] = tf->tf_r[3]; 1217#if NSYSTRACE > 0 1218 if (ISSET(p->p_flag, P_SYSTRACE)) { 1219 KERNEL_PROC_LOCK(p); 1220 error = systrace_redirect(code, p, args, rval); 1221 KERNEL_PROC_UNLOCK(p); 1222 } else 1223#endif 1224 { 1225 nolock = (callp->sy_flags & SY_NOLOCK); 1226 if (!nolock) 1227 KERNEL_PROC_LOCK(p); 1228 error = (*callp->sy_call)(p, args, rval); 1229 if (!nolock) 1230 KERNEL_PROC_UNLOCK(p); 1231 } 1232 1233 /* 1234 * system call will look like: 1235 * or r13, r0, <code> 1236 * tb0 0, r0, <128> <- sxip 1237 * br err <- snip 1238 * jmp r1 <- sfip 1239 * err: or.u r3, r0, hi16(errno) 1240 * st r2, r3, lo16(errno) 1241 * subu r2, r0, 1 1242 * jmp r1 1243 * 1244 * So, when we take syscall trap, sxip/snip/sfip will be as 1245 * shown above. 1246 * Given this, 1247 * 1. If the system call returned 0, need to skip nip. 1248 * nip = fip, fip += 4 1249 * (doesn't matter what fip + 4 will be but we will never 1250 * execute this since jmp r1 at nip will change the execution flow.) 1251 * 2. If the system call returned an errno > 0, plug the value 1252 * in r2, and leave nip and fip unchanged. This will have us 1253 * executing "br err" on return to user space. 1254 * 3. If the system call code returned ERESTART, 1255 * we need to rexecute the trap instruction. Back up the pipe 1256 * line. 1257 * fip = nip, nip = xip 1258 * 4. If the system call returned EJUSTRETURN, don't need to adjust 1259 * any pointers. 1260 */ 1261 1262 switch (error) { 1263 case 0: 1264 tf->tf_r[2] = rval[0]; 1265 tf->tf_r[3] = rval[1]; 1266 tf->tf_epsr &= ~PSR_C; 1267 tf->tf_snip = tf->tf_sfip & ~NIP_E; 1268 tf->tf_sfip = tf->tf_snip + 4; 1269 break; 1270 case ERESTART: 1271 tf->tf_epsr &= ~PSR_C; 1272 tf->tf_sfip = tf->tf_snip & ~FIP_E; 1273 tf->tf_snip = tf->tf_sxip & ~NIP_E; 1274 break; 1275 case EJUSTRETURN: 1276 tf->tf_epsr &= ~PSR_C; 1277 break; 1278 default: 1279bad: 1280 if (p->p_emul->e_errno) 1281 error = p->p_emul->e_errno[error]; 1282 tf->tf_r[2] = error; 1283 tf->tf_epsr |= PSR_C; /* fail */ 1284 tf->tf_snip = tf->tf_snip & ~NIP_E; 1285 tf->tf_sfip = tf->tf_sfip & ~FIP_E; 1286 break; 1287 } 1288#ifdef SYSCALL_DEBUG 1289 KERNEL_PROC_LOCK(p); 1290 scdebug_ret(p, code, error, rval); 1291 KERNEL_PROC_UNLOCK(p); 1292#endif 1293 userret(p); 1294#ifdef KTRACE 1295 if (KTRPOINT(p, KTR_SYSRET)) { 1296 KERNEL_PROC_LOCK(p); 1297 ktrsysret(p, code, error, rval[0]); 1298 KERNEL_PROC_UNLOCK(p); 1299 } 1300#endif 1301} 1302#endif /* M88100 */ 1303 1304#ifdef M88110 1305/* Instruction pointers operate differently on mc88110 */ 1306void 1307m88110_syscall(register_t code, struct trapframe *tf) 1308{ 1309 int i, nsys, nap; 1310 struct sysent *callp; 1311 struct proc *p = curproc; 1312 int error; 1313 register_t args[8], rval[2], *ap; 1314 int nolock; 1315 1316 uvmexp.syscalls++; 1317 1318 callp = p->p_emul->e_sysent; 1319 nsys = p->p_emul->e_nsysent; 1320 1321 p->p_md.md_tf = tf; 1322 1323 /* 1324 * For 88k, all the arguments are passed in the registers (r2-r9), 1325 * and further arguments (if any) on stack. 1326 * For syscall (and __syscall), r2 (and r3) has the actual code. 1327 * __syscall takes a quad syscall number, so that other 1328 * arguments are at their natural alignments. 1329 */ 1330 ap = &tf->tf_r[2]; 1331 nap = 8; /* r2-r9 */ 1332 1333 switch (code) { 1334 case SYS_syscall: 1335 code = *ap++; 1336 nap--; 1337 break; 1338 case SYS___syscall: 1339 if (callp != sysent) 1340 break; 1341 code = ap[_QUAD_LOWWORD]; 1342 ap += 2; 1343 nap -= 2; 1344 break; 1345 } 1346 1347 if (code < 0 || code >= nsys) 1348 callp += p->p_emul->e_nosys; 1349 else 1350 callp += code; 1351 1352 i = callp->sy_argsize / sizeof(register_t); 1353 if (i > sizeof(args) > sizeof(register_t)) 1354 panic("syscall nargs"); 1355 if (i > nap) { 1356 bcopy((caddr_t)ap, (caddr_t)args, nap * sizeof(register_t)); 1357 error = copyin((caddr_t)tf->tf_r[31], (caddr_t)(args + nap), 1358 (i - nap) * sizeof(register_t)); 1359 } else { 1360 bcopy((caddr_t)ap, (caddr_t)args, i * sizeof(register_t)); 1361 error = 0; 1362 } 1363 1364 if (error != 0) 1365 goto bad; 1366 1367#ifdef SYSCALL_DEBUG 1368 KERNEL_PROC_LOCK(p); 1369 scdebug_call(p, code, args); 1370 KERNEL_PROC_UNLOCK(p); 1371#endif 1372#ifdef KTRACE 1373 if (KTRPOINT(p, KTR_SYSCALL)) { 1374 KERNEL_PROC_LOCK(p); 1375 ktrsyscall(p, code, callp->sy_argsize, args); 1376 KERNEL_PROC_UNLOCK(p); 1377 } 1378#endif 1379 rval[0] = 0; 1380 rval[1] = tf->tf_r[3]; 1381#if NSYSTRACE > 0 1382 if (ISSET(p->p_flag, P_SYSTRACE)) { 1383 KERNEL_PROC_LOCK(p); 1384 error = systrace_redirect(code, p, args, rval); 1385 KERNEL_PROC_UNLOCK(p); 1386 } else 1387#endif 1388 { 1389 nolock = (callp->sy_flags & SY_NOLOCK); 1390 if (!nolock) 1391 KERNEL_PROC_LOCK(p); 1392 error = (*callp->sy_call)(p, args, rval); 1393 if (!nolock) 1394 KERNEL_PROC_UNLOCK(p); 1395 } 1396 1397 /* 1398 * system call will look like: 1399 * or r13, r0, <code> 1400 * tb0 0, r0, <128> <- exip 1401 * br err <- enip 1402 * jmp r1 1403 * err: or.u r3, r0, hi16(errno) 1404 * st r2, r3, lo16(errno) 1405 * subu r2, r0, 1 1406 * jmp r1 1407 * 1408 * So, when we take syscall trap, exip/enip will be as 1409 * shown above. 1410 * Given this, 1411 * 1. If the system call returned 0, need to jmp r1. 1412 * exip += 8 1413 * 2. If the system call returned an errno > 0, increment 1414 * exip += 4 and plug the value in r2. This will have us 1415 * executing "br err" on return to user space. 1416 * 3. If the system call code returned ERESTART, 1417 * we need to rexecute the trap instruction. leave exip as is. 1418 * 4. If the system call returned EJUSTRETURN, just return. 1419 * exip += 4 1420 */ 1421 1422 switch (error) { 1423 case 0: 1424 tf->tf_r[2] = rval[0]; 1425 tf->tf_r[3] = rval[1]; 1426 tf->tf_epsr &= ~PSR_C; 1427 /* skip two instructions */ 1428 m88110_skip_insn(tf); 1429 m88110_skip_insn(tf); 1430 break; 1431 case ERESTART: 1432 /* 1433 * Reexecute the trap. 1434 * exip is already at the trap instruction, so 1435 * there is nothing to do. 1436 */ 1437 tf->tf_epsr &= ~PSR_C; 1438 break; 1439 case EJUSTRETURN: 1440 tf->tf_epsr &= ~PSR_C; 1441 /* skip one instruction */ 1442 m88110_skip_insn(tf); 1443 break; 1444 default: 1445bad: 1446 if (p->p_emul->e_errno) 1447 error = p->p_emul->e_errno[error]; 1448 tf->tf_r[2] = error; 1449 tf->tf_epsr |= PSR_C; /* fail */ 1450 /* skip one instruction */ 1451 m88110_skip_insn(tf); 1452 break; 1453 } 1454 1455#ifdef SYSCALL_DEBUG 1456 KERNEL_PROC_LOCK(p); 1457 scdebug_ret(p, code, error, rval); 1458 KERNEL_PROC_UNLOCK(p); 1459#endif 1460 userret(p); 1461#ifdef KTRACE 1462 if (KTRPOINT(p, KTR_SYSRET)) { 1463 KERNEL_PROC_LOCK(p); 1464 ktrsysret(p, code, error, rval[0]); 1465 KERNEL_PROC_UNLOCK(p); 1466 } 1467#endif 1468} 1469#endif /* M88110 */ 1470 1471/* 1472 * Set up return-value registers as fork() libc stub expects, 1473 * and do normal return-to-user-mode stuff. 1474 */ 1475void 1476child_return(arg) 1477 void *arg; 1478{ 1479 struct proc *p = arg; 1480 struct trapframe *tf; 1481 1482 tf = (struct trapframe *)USER_REGS(p); 1483 tf->tf_r[2] = 0; 1484 tf->tf_r[3] = 0; 1485 tf->tf_epsr &= ~PSR_C; 1486 /* skip br instruction as in syscall() */ 1487#ifdef M88100 1488 if (CPU_IS88100) { 1489 tf->tf_snip = (tf->tf_sfip & XIP_ADDR) | XIP_V; 1490 tf->tf_sfip = tf->tf_snip + 4; 1491 } 1492#endif 1493#ifdef M88110 1494 if (CPU_IS88110) { 1495 /* skip two instructions */ 1496 m88110_skip_insn(tf); 1497 m88110_skip_insn(tf); 1498 } 1499#endif 1500 1501 KERNEL_PROC_UNLOCK(p); 1502 userret(p); 1503 1504#ifdef KTRACE 1505 if (KTRPOINT(p, KTR_SYSRET)) { 1506 KERNEL_PROC_LOCK(p); 1507 ktrsysret(p, 1508 (p->p_flag & P_PPWAIT) ? SYS_vfork : SYS_fork, 0, 0); 1509 KERNEL_PROC_UNLOCK(p); 1510 } 1511#endif 1512} 1513 1514#ifdef PTRACE 1515 1516/* 1517 * User Single Step Debugging Support 1518 */ 1519 1520#include <sys/ptrace.h> 1521 1522vaddr_t ss_branch_taken(u_int, vaddr_t, struct reg *); 1523int ss_get_value(struct proc *, vaddr_t, u_int *); 1524int ss_inst_branch_or_call(u_int); 1525int ss_put_breakpoint(struct proc *, vaddr_t, vaddr_t *, u_int *); 1526 1527#define SYSCALL_INSTR 0xf000d080 /* tb0 0,r0,128 */ 1528 1529int 1530ss_get_value(struct proc *p, vaddr_t addr, u_int *value) 1531{ 1532 struct uio uio; 1533 struct iovec iov; 1534 1535 iov.iov_base = (caddr_t)value; 1536 iov.iov_len = sizeof(u_int); 1537 uio.uio_iov = &iov; 1538 uio.uio_iovcnt = 1; 1539 uio.uio_offset = (off_t)addr; 1540 uio.uio_resid = sizeof(u_int); 1541 uio.uio_segflg = UIO_SYSSPACE; 1542 uio.uio_rw = UIO_READ; 1543 uio.uio_procp = curproc; 1544 return (process_domem(curproc, p, &uio, PT_READ_I)); 1545} 1546 1547int 1548ss_put_value(struct proc *p, vaddr_t addr, u_int value) 1549{ 1550 struct uio uio; 1551 struct iovec iov; 1552 1553 iov.iov_base = (caddr_t)&value; 1554 iov.iov_len = sizeof(u_int); 1555 uio.uio_iov = &iov; 1556 uio.uio_iovcnt = 1; 1557 uio.uio_offset = (off_t)addr; 1558 uio.uio_resid = sizeof(u_int); 1559 uio.uio_segflg = UIO_SYSSPACE; 1560 uio.uio_rw = UIO_WRITE; 1561 uio.uio_procp = curproc; 1562 return (process_domem(curproc, p, &uio, PT_WRITE_I)); 1563} 1564 1565/* 1566 * ss_branch_taken(instruction, pc, regs) 1567 * 1568 * instruction will be a control flow instruction location at address pc. 1569 * Branch taken is supposed to return the address to which the instruction 1570 * would jump if the branch is taken. 1571 * 1572 * This is different from branch_taken() in ddb, as we also need to process 1573 * system calls. 1574 */ 1575vaddr_t 1576ss_branch_taken(u_int inst, vaddr_t pc, struct reg *regs) 1577{ 1578 u_int regno; 1579 1580 /* 1581 * Quick check of the instruction. Note that we know we are only 1582 * invoked if ss_inst_branch_or_call() returns TRUE, so we do not 1583 * need to repeat the jpm, jsr and syscall stricter checks here. 1584 */ 1585 switch (inst >> (32 - 5)) { 1586 case 0x18: /* br */ 1587 case 0x19: /* bsr */ 1588 /* signed 26 bit pc relative displacement, shift left 2 bits */ 1589 inst = (inst & 0x03ffffff) << 2; 1590 /* check if sign extension is needed */ 1591 if (inst & 0x08000000) 1592 inst |= 0xf0000000; 1593 return (pc + inst); 1594 1595 case 0x1a: /* bb0 */ 1596 case 0x1b: /* bb1 */ 1597 case 0x1d: /* bcnd */ 1598 /* signed 16 bit pc relative displacement, shift left 2 bits */ 1599 inst = (inst & 0x0000ffff) << 2; 1600 /* check if sign extension is needed */ 1601 if (inst & 0x00020000) 1602 inst |= 0xfffc0000; 1603 return (pc + inst); 1604 1605 case 0x1e: /* jmp or jsr */ 1606 regno = inst & 0x1f; /* get the register value */ 1607 return (regno == 0 ? 0 : regs->r[regno]); 1608 1609 default: /* system call */ 1610 /* 1611 * The regular (pc + 4) breakpoint will match the error 1612 * return. Successful system calls return at (pc + 8), 1613 * so we'll set up a branch breakpoint there. 1614 */ 1615 return (pc + 8); 1616 } 1617} 1618 1619int 1620ss_inst_branch_or_call(u_int ins) 1621{ 1622 /* check high five bits */ 1623 switch (ins >> (32 - 5)) { 1624 case 0x18: /* br */ 1625 case 0x19: /* bsr */ 1626 case 0x1a: /* bb0 */ 1627 case 0x1b: /* bb1 */ 1628 case 0x1d: /* bcnd */ 1629 return (TRUE); 1630 case 0x1e: /* could be jmp or jsr */ 1631 if ((ins & 0xfffff3e0) == 0xf400c000) 1632 return (TRUE); 1633 } 1634 1635 return (FALSE); 1636} 1637 1638int 1639ss_put_breakpoint(struct proc *p, vaddr_t va, vaddr_t *bpva, u_int *bpsave) 1640{ 1641 int rc; 1642 1643 /* Restore previous breakpoint if we did not trigger it. */ 1644 if (*bpva != 0) { 1645 ss_put_value(p, *bpva, *bpsave); 1646 *bpva = 0; 1647 } 1648 1649 /* Save instruction. */ 1650 if ((rc = ss_get_value(p, va, bpsave)) != 0) 1651 return (rc); 1652 1653 /* Store breakpoint instruction at the location now. */ 1654 *bpva = va; 1655 return (ss_put_value(p, va, SSBREAKPOINT)); 1656} 1657 1658int 1659process_sstep(struct proc *p, int sstep) 1660{ 1661 struct reg *sstf = USER_REGS(p); 1662 vaddr_t pc, brpc; 1663 u_int32_t instr; 1664 int rc; 1665 1666 if (sstep == 0) { 1667 /* Restore previous breakpoints if any. */ 1668 if (p->p_md.md_bp0va != 0) { 1669 ss_put_value(p, p->p_md.md_bp0va, p->p_md.md_bp0save); 1670 p->p_md.md_bp0va = 0; 1671 } 1672 if (p->p_md.md_bp1va != 0) { 1673 ss_put_value(p, p->p_md.md_bp1va, p->p_md.md_bp1save); 1674 p->p_md.md_bp1va = 0; 1675 } 1676 1677 return (0); 1678 } 1679 1680 /* 1681 * User was stopped at pc, e.g. the instruction at pc was not executed. 1682 * Fetch what's at the current location. 1683 */ 1684 pc = PC_REGS(sstf); 1685 if ((rc = ss_get_value(p, pc, &instr)) != 0) 1686 return (rc); 1687 1688 /* 1689 * Find if this instruction may cause a branch, and set up a breakpoint 1690 * at the branch location. 1691 */ 1692 if (ss_inst_branch_or_call(instr) || instr == SYSCALL_INSTR) { 1693 brpc = ss_branch_taken(instr, pc, sstf); 1694 1695 /* self-branches are hopeless */ 1696 if (brpc != pc && brpc != 0) { 1697 if ((rc = ss_put_breakpoint(p, brpc, 1698 &p->p_md.md_bp1va, &p->p_md.md_bp1save)) != 0) 1699 return (rc); 1700 } 1701 } 1702 1703 if ((rc = ss_put_breakpoint(p, pc + 4, 1704 &p->p_md.md_bp0va, &p->p_md.md_bp0save)) != 0) 1705 return (rc); 1706 1707 return (0); 1708} 1709 1710#endif /* PTRACE */ 1711 1712#ifdef DIAGNOSTIC 1713void 1714splassert_check(int wantipl, const char *func) 1715{ 1716 int oldipl; 1717 1718 oldipl = getipl(); 1719 1720 if (oldipl < wantipl) { 1721 splassert_fail(wantipl, oldipl, func); 1722 /* 1723 * This will raise the spl, 1724 * in a feeble attempt to reduce further damage. 1725 */ 1726 (void)raiseipl(wantipl); 1727 } 1728} 1729#endif 1730 1731/* 1732 * ld.d and st.d instructions referencing long aligned but not long long 1733 * aligned addresses will trigger a misaligned address exception. 1734 * 1735 * This routine attempts to recover these (valid) statements, by simulating 1736 * the split form of the instruction. If it fails, it returns the appropriate 1737 * signal number to deliver. 1738 * 1739 * Note that we do not attempt to do anything for .d.usr instructions - the 1740 * kernel never issues such instructions, and they cause a privileged 1741 * instruction exception from userland. 1742 */ 1743int 1744double_reg_fixup(struct trapframe *frame) 1745{ 1746 u_int32_t pc, instr, value; 1747 int regno, store; 1748 vaddr_t addr; 1749 1750 /* 1751 * Decode the faulting instruction. 1752 */ 1753 1754 pc = PC_REGS(&frame->tf_regs); 1755 if (copyin((void *)pc, &instr, sizeof(u_int32_t)) != 0) 1756 return SIGSEGV; 1757 1758 switch (instr & 0xfc00ff00) { 1759 case 0xf4001000: /* ld.d rD, rS1, rS2 */ 1760 addr = frame->tf_r[(instr >> 16) & 0x1f] 1761 + frame->tf_r[(instr & 0x1f)]; 1762 store = 0; 1763 break; 1764 case 0xf4002000: /* st.d rD, rS1, rS2 */ 1765 addr = frame->tf_r[(instr >> 16) & 0x1f] 1766 + frame->tf_r[(instr & 0x1f)]; 1767 store = 1; 1768 break; 1769 default: 1770 switch (instr >> 26) { 1771 case 0x10000000 >> 26: /* ld.d rD, rS, imm16 */ 1772 addr = (instr & 0x0000ffff) + 1773 frame->tf_r[(instr >> 16) & 0x1f]; 1774 store = 0; 1775 break; 1776 case 0x20000000 >> 26: /* st.d rD, rS, imm16 */ 1777 addr = (instr & 0x0000ffff) + 1778 frame->tf_r[(instr >> 16) & 0x1f]; 1779 store = 1; 1780 break; 1781 default: 1782 return SIGBUS; 1783 } 1784 break; 1785 } 1786 1787 /* We only handle long but not long long aligned access here */ 1788 if ((addr & 0x07) != 4) 1789 return SIGBUS; 1790 1791 regno = (instr >> 21) & 0x1f; 1792 1793 if (store) { 1794 /* 1795 * Two word stores. 1796 */ 1797 if (regno == 0) 1798 value = 0; 1799 else 1800 value = frame->tf_r[regno]; 1801 if (copyout(&value, (void *)addr, sizeof(u_int32_t)) != 0) 1802 return SIGSEGV; 1803 if (regno == 31) 1804 value = 0; 1805 else 1806 value = frame->tf_r[regno + 1]; 1807 if (copyout(&value, (void *)(addr + 4), sizeof(u_int32_t)) != 0) 1808 return SIGSEGV; 1809 } else { 1810 /* 1811 * Two word loads. r0 should be left unaltered, but the 1812 * value should still be fetched even if it is discarded. 1813 */ 1814 if (copyin((void *)addr, &value, sizeof(u_int32_t)) != 0) 1815 return SIGSEGV; 1816 if (regno != 0) 1817 frame->tf_r[regno] = value; 1818 if (copyin((void *)(addr + 4), &value, sizeof(u_int32_t)) != 0) 1819 return SIGSEGV; 1820 if (regno != 31) 1821 frame->tf_r[regno + 1] = value; 1822 } 1823 1824 return 0; 1825} 1826 1827void 1828cache_flush(struct trapframe *tf) 1829{ 1830 struct proc *p = curproc; 1831 struct pmap *pmap; 1832 paddr_t pa; 1833 vaddr_t va; 1834 vsize_t len, count; 1835 1836 p->p_md.md_tf = tf; 1837 1838 pmap = vm_map_pmap(&p->p_vmspace->vm_map); 1839 va = tf->tf_r[2]; 1840 len = tf->tf_r[3]; 1841 1842 if (/* va < VM_MIN_ADDRESS || */ va >= VM_MAXUSER_ADDRESS || 1843 va + len <= va || va + len >= VM_MAXUSER_ADDRESS) 1844 len = 0; 1845 1846 while (len != 0) { 1847 count = min(len, PAGE_SIZE - (va & PAGE_MASK)); 1848 if (pmap_extract(pmap, va, &pa) != FALSE) 1849 dma_cachectl(pa, count, DMA_CACHE_SYNC); 1850 va += count; 1851 len -= count; 1852 } 1853 1854#ifdef M88100 1855 if (CPU_IS88100) { 1856 tf->tf_snip = tf->tf_snip & ~NIP_E; 1857 tf->tf_sfip = tf->tf_sfip & ~FIP_E; 1858 } 1859#endif 1860#ifdef M88110 1861 if (CPU_IS88110) { 1862 /* skip instruction */ 1863 m88110_skip_insn(tf); 1864 } 1865#endif 1866 1867 userret(p); 1868} 1869