trap.c revision 1.123
1/* $OpenBSD: trap.c,v 1.123 2022/11/02 07:20:07 guenther Exp $ */ 2/* 3 * Copyright (c) 2004, Miodrag Vallat. 4 * Copyright (c) 1998 Steve Murphree, Jr. 5 * Copyright (c) 1996 Nivas Madhur 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Nivas Madhur. 19 * 4. The name of the author may not be used to endorse or promote products 20 * derived from this software without specific prior written permission 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34/* 35 * Mach Operating System 36 * Copyright (c) 1991 Carnegie Mellon University 37 * Copyright (c) 1991 OMRON Corporation 38 * All Rights Reserved. 39 * 40 * Permission to use, copy, modify and distribute this software and its 41 * documentation is hereby granted, provided that both the copyright 42 * notice and this permission notice appear in all copies of the 43 * software, derivative works or modified versions, and any portions 44 * thereof, and that both notices appear in supporting documentation. 45 * 46 */ 47 48#include <sys/param.h> 49#include <sys/proc.h> 50#include <sys/signalvar.h> 51#include <sys/user.h> 52#include <sys/syscall.h> 53#include <sys/systm.h> 54#include <sys/syscall_mi.h> 55 56#include <uvm/uvm_extern.h> 57 58#include <machine/asm_macro.h> 59#include <machine/cmmu.h> 60#include <machine/cpu.h> 61#ifdef M88100 62#include <machine/m88100.h> 63#include <machine/m8820x.h> 64#endif 65#ifdef M88110 66#include <machine/m88110.h> 67#endif 68#include <machine/fpu.h> 69#include <machine/pcb.h> 70#include <machine/psl.h> 71#include <machine/trap.h> 72 73#include <machine/db_machdep.h> 74 75#define SSBREAKPOINT (0xF000D1F8U) /* Single Step Breakpoint */ 76 77#define USERMODE(PSR) (((PSR) & PSR_MODE) == 0) 78#define SYSTEMMODE(PSR) (((PSR) & PSR_MODE) != 0) 79 80void printtrap(int, struct trapframe *); 81__dead void panictrap(int, struct trapframe *); 82__dead void error_fatal(struct trapframe *); 83int double_reg_fixup(struct trapframe *, int); 84int ss_put_value(struct proc *, vaddr_t, u_int); 85 86extern void regdump(struct trapframe *f); 87 88const char *trap_type[] = { 89 "Reset", 90 "Interrupt Exception", 91 "Instruction Access", 92 "Data Access Exception", 93 "Misaligned Access", 94 "Unimplemented Opcode", 95 "Privilege Violation" 96 "Bounds Check Violation", 97 "Illegal Integer Divide", 98 "Integer Overflow", 99 "Error Exception", 100 "Non-Maskable Exception", 101}; 102 103const int trap_types = sizeof trap_type / sizeof trap_type[0]; 104 105#ifdef M88100 106const char *pbus_exception_type[] = { 107 "Success (No Fault)", 108 "unknown 1", 109 "unknown 2", 110 "Bus Error", 111 "Segment Fault", 112 "Page Fault", 113 "Supervisor Violation", 114 "Write Violation", 115}; 116#endif 117 118void 119printtrap(int type, struct trapframe *frame) 120{ 121#ifdef M88100 122 if (CPU_IS88100) { 123 if (type == 2) { 124 /* instruction exception */ 125 printf("\nInstr access fault (%s) v = %lx, frame %p\n", 126 pbus_exception_type[ 127 CMMU_PFSR_FAULT(frame->tf_ipfsr)], 128 frame->tf_sxip & XIP_ADDR, frame); 129 } else if (type == 3) { 130 /* data access exception */ 131 printf("\nData access fault (%s) v = %lx, frame %p\n", 132 pbus_exception_type[ 133 CMMU_PFSR_FAULT(frame->tf_dpfsr)], 134 frame->tf_sxip & XIP_ADDR, frame); 135 } else 136 printf("\nTrap type %d, v = %lx, frame %p\n", 137 type, frame->tf_sxip & XIP_ADDR, frame); 138 } 139#endif 140#ifdef M88110 141 if (CPU_IS88110) { 142 printf("\nTrap type %d, v = %lx, frame %p\n", 143 type, frame->tf_exip, frame); 144 } 145#endif 146#ifdef DDB 147 regdump(frame); 148#endif 149} 150 151__dead void 152panictrap(int type, struct trapframe *frame) 153{ 154 static int panicing = 0; 155 156 if (panicing++ == 0) 157 printtrap(type, frame); 158 if ((u_int)type < trap_types) 159 panic("%s", trap_type[type]); 160 else 161 panic("trap %d", type); 162 /*NOTREACHED*/ 163} 164 165/* 166 * Handle external interrupts. 167 */ 168void 169interrupt(struct trapframe *frame) 170{ 171 struct cpu_info *ci = curcpu(); 172 173 ci->ci_intrdepth++; 174 md_interrupt_func(frame); 175 ci->ci_intrdepth--; 176} 177 178#ifdef M88110 179/* 180 * Handle non-maskable interrupts. 181 */ 182int 183nmi(struct trapframe *frame) 184{ 185 return md_nmi_func(frame); 186} 187 188/* 189 * Reenable non-maskable interrupts. 190 */ 191void 192nmi_wrapup(struct trapframe *frame) 193{ 194 md_nmi_wrapup_func(frame); 195} 196#endif 197 198/* 199 * Handle asynchronous software traps. 200 */ 201void 202ast(struct trapframe *frame) 203{ 204 struct cpu_info *ci = curcpu(); 205 struct proc *p = ci->ci_curproc; 206 207 p->p_md.md_astpending = 0; 208 209 uvmexp.softs++; 210 mi_ast(p, ci->ci_want_resched); 211 userret(p); 212} 213 214#ifdef M88100 215void 216m88100_trap(u_int type, struct trapframe *frame) 217{ 218 struct proc *p; 219 struct vm_map *map; 220 vaddr_t va, pcb_onfault; 221 vm_prot_t access_type; 222 int fault_type, pbus_type; 223 u_long fault_code; 224 vaddr_t fault_addr; 225 struct vmspace *vm; 226 union sigval sv; 227 int result; 228#ifdef DDB 229 int s; 230 u_int psr; 231#endif 232 int sig = 0; 233 234 uvmexp.traps++; 235 if ((p = curproc) == NULL) 236 p = &proc0; 237 238 if (USERMODE(frame->tf_epsr)) { 239 type |= T_USER; 240 p->p_md.md_tf = frame; /* for ptrace/signals */ 241 refreshcreds(p); 242 } 243 fault_type = SI_NOINFO; 244 fault_code = 0; 245 fault_addr = frame->tf_sxip & XIP_ADDR; 246 247 switch (type) { 248 default: 249 case T_ILLFLT: 250lose: 251 panictrap(frame->tf_vector, frame); 252 break; 253 /*NOTREACHED*/ 254 255#if defined(DDB) 256 case T_KDB_BREAK: 257 s = splhigh(); 258 set_psr((psr = get_psr()) & ~PSR_IND); 259 ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame); 260 set_psr(psr); 261 splx(s); 262 return; 263 case T_KDB_ENTRY: 264 s = splhigh(); 265 set_psr((psr = get_psr()) & ~PSR_IND); 266 ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame); 267 set_psr(psr); 268 splx(s); 269 return; 270#endif /* DDB */ 271 case T_MISALGNFLT: 272 printf("kernel misaligned access exception @0x%08lx\n", 273 frame->tf_sxip); 274 goto lose; 275 case T_INSTFLT: 276 /* kernel mode instruction access fault. 277 * Should never, never happen for a non-paged kernel. 278 */ 279#ifdef TRAPDEBUG 280 pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr); 281 printf("Kernel Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n", 282 pbus_type, pbus_exception_type[pbus_type], 283 fault_addr, frame, frame->tf_cpu); 284#endif 285 goto lose; 286 case T_DATAFLT: 287 /* kernel mode data fault */ 288 289 /* data fault on the user address? */ 290 if ((frame->tf_dmt0 & DMT_DAS) == 0) 291 goto user_fault; 292 293 fault_addr = frame->tf_dma0; 294 if (frame->tf_dmt0 & (DMT_WRITE|DMT_LOCKBAR)) { 295 access_type = PROT_READ | PROT_WRITE; 296 fault_code = PROT_WRITE; 297 } else { 298 access_type = PROT_READ; 299 fault_code = PROT_READ; 300 } 301 302 va = trunc_page((vaddr_t)fault_addr); 303 304 vm = p->p_vmspace; 305 map = kernel_map; 306 307 pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr); 308#ifdef TRAPDEBUG 309 printf("Kernel Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n", 310 pbus_type, pbus_exception_type[pbus_type], 311 fault_addr, frame, frame->tf_cpu); 312#endif 313 314 pcb_onfault = p->p_addr->u_pcb.pcb_onfault; 315 switch (pbus_type) { 316 case CMMU_PFSR_SUCCESS: 317 /* 318 * The fault was resolved. Call data_access_emulation 319 * to drain the data unit pipe line and reset dmt0 320 * so that trap won't get called again. 321 */ 322 p->p_addr->u_pcb.pcb_onfault = 0; 323 KERNEL_LOCK(); 324 data_access_emulation((u_int *)frame); 325 KERNEL_UNLOCK(); 326 p->p_addr->u_pcb.pcb_onfault = pcb_onfault; 327 frame->tf_dmt0 = 0; 328 frame->tf_dpfsr = 0; 329 return; 330 case CMMU_PFSR_SFAULT: 331 case CMMU_PFSR_PFAULT: 332 p->p_addr->u_pcb.pcb_onfault = 0; 333 KERNEL_LOCK(); 334 result = uvm_fault(map, va, 0, access_type); 335 p->p_addr->u_pcb.pcb_onfault = pcb_onfault; 336 if (result == 0) { 337 /* 338 * We could resolve the fault. Call 339 * data_access_emulation to drain the data 340 * unit pipe line and reset dmt0 so that trap 341 * won't get called again. 342 */ 343 p->p_addr->u_pcb.pcb_onfault = 0; 344 data_access_emulation((u_int *)frame); 345 KERNEL_UNLOCK(); 346 p->p_addr->u_pcb.pcb_onfault = pcb_onfault; 347 frame->tf_dmt0 = 0; 348 frame->tf_dpfsr = 0; 349 return; 350 } else if (pcb_onfault != 0) { 351 KERNEL_UNLOCK(); 352 /* 353 * This could be a fault caused in copyout*() 354 * while accessing kernel space. 355 */ 356 frame->tf_snip = pcb_onfault | NIP_V; 357 frame->tf_sfip = (pcb_onfault + 4) | FIP_V; 358 /* 359 * Continue as if the fault had been resolved, 360 * but do not try to complete the faulting 361 * access. 362 */ 363 frame->tf_dmt0 = 0; 364 frame->tf_dpfsr = 0; 365 return; 366 } 367 KERNEL_UNLOCK(); 368 break; 369 } 370#ifdef TRAPDEBUG 371 printf("PBUS Fault %d (%s) va = 0x%x\n", pbus_type, 372 pbus_exception_type[pbus_type], va); 373#endif 374 goto lose; 375 /* NOTREACHED */ 376 case T_INSTFLT+T_USER: 377 /* User mode instruction access fault */ 378 /* FALLTHROUGH */ 379 case T_DATAFLT+T_USER: 380 if (!uvm_map_inentry(p, &p->p_spinentry, PROC_STACK(p), 381 "[%s]%d/%d sp=%lx inside %lx-%lx: not MAP_STACK\n", 382 uvm_map_inentry_sp, p->p_vmspace->vm_map.sserial)) 383 goto userexit; 384user_fault: 385 if (type == T_INSTFLT + T_USER) { 386 pbus_type = CMMU_PFSR_FAULT(frame->tf_ipfsr); 387#ifdef TRAPDEBUG 388 printf("User Instruction fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n", 389 pbus_type, pbus_exception_type[pbus_type], 390 fault_addr, frame, frame->tf_cpu); 391#endif 392 } else { 393 fault_addr = frame->tf_dma0; 394 pbus_type = CMMU_PFSR_FAULT(frame->tf_dpfsr); 395#ifdef TRAPDEBUG 396 printf("User Data access fault #%d (%s) v = 0x%x, frame 0x%x cpu %p\n", 397 pbus_type, pbus_exception_type[pbus_type], 398 fault_addr, frame, frame->tf_cpu); 399#endif 400 } 401 402 if (frame->tf_dmt0 & (DMT_WRITE | DMT_LOCKBAR)) { 403 access_type = PROT_READ | PROT_WRITE; 404 fault_code = PROT_WRITE; 405 } else { 406 access_type = PROT_READ; 407 fault_code = PROT_READ; 408 } 409 410 va = trunc_page((vaddr_t)fault_addr); 411 412 vm = p->p_vmspace; 413 map = &vm->vm_map; 414 if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) 415 p->p_addr->u_pcb.pcb_onfault = 0; 416 417 /* Call uvm_fault() to resolve non-bus error faults */ 418 switch (pbus_type) { 419 case CMMU_PFSR_SUCCESS: 420 result = 0; 421 break; 422 case CMMU_PFSR_BERROR: 423 result = EACCES; 424 break; 425 default: 426 KERNEL_LOCK(); 427 result = uvm_fault(map, va, 0, access_type); 428 KERNEL_UNLOCK(); 429 if (result == EACCES) 430 result = EFAULT; 431 break; 432 } 433 434 p->p_addr->u_pcb.pcb_onfault = pcb_onfault; 435 436 if (result == 0) { 437 uvm_grow(p, va); 438 439 if (type == T_INSTFLT + T_USER) { 440 m88100_rewind_insn(&(frame->tf_regs)); 441 /* clear the error bit */ 442 frame->tf_sfip &= ~FIP_E; 443 frame->tf_snip &= ~NIP_E; 444 frame->tf_ipfsr = 0; 445 } else { 446 /* 447 * We could resolve the fault. Call 448 * data_access_emulation to drain the data unit 449 * pipe line and reset dmt0 so that trap won't 450 * get called again. 451 */ 452 p->p_addr->u_pcb.pcb_onfault = 0; 453 KERNEL_LOCK(); 454 data_access_emulation((u_int *)frame); 455 KERNEL_UNLOCK(); 456 p->p_addr->u_pcb.pcb_onfault = pcb_onfault; 457 frame->tf_dmt0 = 0; 458 frame->tf_dpfsr = 0; 459 } 460 } else { 461 /* 462 * This could be a fault caused in copyin*() 463 * while accessing user space. 464 */ 465 if (pcb_onfault != 0) { 466 frame->tf_snip = pcb_onfault | NIP_V; 467 frame->tf_sfip = (pcb_onfault + 4) | FIP_V; 468 /* 469 * Continue as if the fault had been resolved, 470 * but do not try to complete the faulting 471 * access. 472 */ 473 frame->tf_dmt0 = 0; 474 frame->tf_dpfsr = 0; 475 } else { 476 sig = result == EACCES ? SIGBUS : SIGSEGV; 477 fault_type = result == EACCES ? 478 BUS_ADRERR : SEGV_MAPERR; 479 } 480 } 481 break; 482 case T_MISALGNFLT+T_USER: 483 /* Fix any misaligned ld.d or st.d instructions */ 484 sig = double_reg_fixup(frame, T_MISALGNFLT); 485 fault_type = BUS_ADRALN; 486 break; 487 case T_PRIVINFLT+T_USER: 488 case T_ILLFLT+T_USER: 489#ifndef DDB 490 case T_KDB_BREAK: 491 case T_KDB_ENTRY: 492#endif 493 case T_KDB_BREAK+T_USER: 494 case T_KDB_ENTRY+T_USER: 495 case T_KDB_TRACE: 496 case T_KDB_TRACE+T_USER: 497 sig = SIGILL; 498 break; 499 case T_BNDFLT+T_USER: 500 sig = SIGFPE; 501 break; 502 case T_ZERODIV+T_USER: 503 sig = SIGFPE; 504 fault_type = FPE_INTDIV; 505 break; 506 case T_OVFFLT+T_USER: 507 sig = SIGFPE; 508 fault_type = FPE_INTOVF; 509 break; 510 case T_FPEPFLT+T_USER: 511 m88100_fpu_precise_exception(frame); 512 goto userexit; 513 case T_FPEIFLT+T_USER: 514 m88100_fpu_imprecise_exception(frame); 515 goto userexit; 516 case T_SIGSYS+T_USER: 517 sig = SIGSYS; 518 break; 519 case T_STEPBPT+T_USER: 520#ifdef PTRACE 521 /* 522 * This trap is used by the kernel to support single-step 523 * debugging (although any user could generate this trap 524 * which should probably be handled differently). When a 525 * process is continued by a debugger with the PT_STEP 526 * function of ptrace (single step), the kernel inserts 527 * one or two breakpoints in the user process so that only 528 * one instruction (or two in the case of a delayed branch) 529 * is executed. When this breakpoint is hit, we get the 530 * T_STEPBPT trap. 531 */ 532 { 533 u_int instr; 534 vaddr_t pc = PC_REGS(&frame->tf_regs); 535 536 /* read break instruction */ 537 copyin((caddr_t)pc, &instr, sizeof(u_int)); 538 539 /* check and see if we got here by accident */ 540 if ((p->p_md.md_bp0va != pc && 541 p->p_md.md_bp1va != pc) || 542 instr != SSBREAKPOINT) { 543 sig = SIGTRAP; 544 fault_type = TRAP_TRACE; 545 break; 546 } 547 548 /* restore original instruction and clear breakpoint */ 549 KERNEL_LOCK(); 550 if (p->p_md.md_bp0va == pc) { 551 ss_put_value(p, pc, p->p_md.md_bp0save); 552 p->p_md.md_bp0va = 0; 553 } 554 if (p->p_md.md_bp1va == pc) { 555 ss_put_value(p, pc, p->p_md.md_bp1save); 556 p->p_md.md_bp1va = 0; 557 } 558 KERNEL_UNLOCK(); 559 560 frame->tf_sxip = pc | NIP_V; 561 sig = SIGTRAP; 562 fault_type = TRAP_BRKPT; 563 } 564#else 565 sig = SIGTRAP; 566 fault_type = TRAP_TRACE; 567#endif 568 break; 569 570 case T_USERBPT+T_USER: 571 /* 572 * This trap is meant to be used by debuggers to implement 573 * breakpoint debugging. When we get this trap, we just 574 * return a signal which gets caught by the debugger. 575 */ 576 sig = SIGTRAP; 577 fault_type = TRAP_BRKPT; 578 break; 579 580 } 581 582 /* 583 * If trap from supervisor mode, just return 584 */ 585 if (type < T_USER) 586 return; 587 588 if (sig) { 589 sv.sival_ptr = (void *)fault_addr; 590 trapsignal(p, sig, fault_code, fault_type, sv); 591 /* 592 * don't want multiple faults - we are going to 593 * deliver signal. 594 */ 595 frame->tf_dmt0 = 0; 596 frame->tf_ipfsr = frame->tf_dpfsr = 0; 597 } 598 599userexit: 600 userret(p); 601} 602#endif /* M88100 */ 603 604#ifdef M88110 605void 606m88110_trap(u_int type, struct trapframe *frame) 607{ 608 struct proc *p; 609 struct vm_map *map; 610 vaddr_t va, pcb_onfault; 611 vm_prot_t access_type; 612 int fault_type; 613 u_long fault_code; 614 vaddr_t fault_addr; 615 struct vmspace *vm; 616 union sigval sv; 617 int result; 618#ifdef DDB 619 int s; 620 u_int psr; 621#endif 622 int sig = 0; 623 624 uvmexp.traps++; 625 if ((p = curproc) == NULL) 626 p = &proc0; 627 628 fault_type = SI_NOINFO; 629 fault_code = 0; 630 fault_addr = frame->tf_exip & XIP_ADDR; 631 632 /* 633 * 88110 errata #16 (4.2) or #3 (5.1.1): 634 * ``bsr, br, bcnd, jsr and jmp instructions with the .n extension 635 * can cause the enip value to be incremented by 4 incorrectly 636 * if the instruction in the delay slot is the first word of a 637 * page which misses in the mmu and results in a hardware 638 * tablewalk which encounters an exception or an invalid 639 * descriptor. The exip value in this case will point to the 640 * first word of the page, and the D bit will be set. 641 * 642 * Note: if the instruction is a jsr.n r1, r1 will be overwritten 643 * with erroneous data. Therefore, no recovery is possible. Do 644 * not allow this instruction to occupy the last word of a page. 645 * 646 * Suggested fix: recover in general by backing up the exip by 4 647 * and clearing the delay bit before an rte when the lower 3 hex 648 * digits of the exip are 001.'' 649 */ 650 if ((frame->tf_exip & PAGE_MASK) == 0x00000001 && type == T_INSTFLT) { 651 u_int instr; 652 653 /* 654 * Note that we have initialized fault_addr above, so that 655 * signals provide the correct address if necessary. 656 */ 657 frame->tf_exip = (frame->tf_exip & ~1) - 4; 658 659 /* 660 * Check the instruction at the (backed up) exip. 661 * If it is a jsr.n, abort. 662 */ 663 if (!USERMODE(frame->tf_epsr)) { 664 instr = *(u_int *)fault_addr; 665 if (instr == 0xf400cc01) 666 panic("mc88110 errata #16, exip 0x%lx enip 0x%lx", 667 (frame->tf_exip + 4) | 1, frame->tf_enip); 668 } else { 669 /* copyin here should not fail */ 670 if (copyin((const void *)frame->tf_exip, &instr, 671 sizeof instr) == 0 && 672 instr == 0xf400cc01) { 673 uprintf("mc88110 errata #16, exip 0x%lx enip 0x%lx", 674 (frame->tf_exip + 4) | 1, frame->tf_enip); 675 sig = SIGILL; 676 } 677 } 678 } 679 680 if (USERMODE(frame->tf_epsr)) { 681 type |= T_USER; 682 p->p_md.md_tf = frame; /* for ptrace/signals */ 683 refreshcreds(p); 684 } 685 686 if (sig != 0) 687 goto deliver; 688 689 switch (type) { 690 default: 691lose: 692 panictrap(frame->tf_vector, frame); 693 break; 694 /*NOTREACHED*/ 695 696#ifdef DEBUG 697 case T_110_DRM+T_USER: 698 case T_110_DRM: 699 printf("DMMU read miss: Hardware Table Searches should be enabled!\n"); 700 goto lose; 701 case T_110_DWM+T_USER: 702 case T_110_DWM: 703 printf("DMMU write miss: Hardware Table Searches should be enabled!\n"); 704 goto lose; 705 case T_110_IAM+T_USER: 706 case T_110_IAM: 707 printf("IMMU miss: Hardware Table Searches should be enabled!\n"); 708 goto lose; 709#endif 710 711#ifdef DDB 712 case T_KDB_TRACE: 713 s = splhigh(); 714 set_psr((psr = get_psr()) & ~PSR_IND); 715 ddb_break_trap(T_KDB_TRACE, (db_regs_t*)frame); 716 set_psr(psr); 717 splx(s); 718 return; 719 case T_KDB_BREAK: 720 s = splhigh(); 721 set_psr((psr = get_psr()) & ~PSR_IND); 722 ddb_break_trap(T_KDB_BREAK, (db_regs_t*)frame); 723 set_psr(psr); 724 splx(s); 725 return; 726 case T_KDB_ENTRY: 727 s = splhigh(); 728 set_psr((psr = get_psr()) & ~PSR_IND); 729 ddb_entry_trap(T_KDB_ENTRY, (db_regs_t*)frame); 730 set_psr(psr); 731 /* skip trap instruction */ 732 m88110_skip_insn(frame); 733 splx(s); 734 return; 735#endif /* DDB */ 736 case T_ILLFLT: 737 /* 738 * The 88110 seems to trigger an instruction fault in 739 * supervisor mode when running the following sequence: 740 * 741 * bcnd.n cond, reg, 1f 742 * arithmetic insn 743 * ... 744 * the same exact arithmetic insn 745 * 1: another arithmetic insn stalled by the previous one 746 * ... 747 * 748 * The exception is reported with exip pointing to the 749 * branch address. I don't know, at this point, if there 750 * is any better workaround than the aggressive one 751 * implemented below; I don't see how this could relate to 752 * any of the 88110 errata (although it might be related to 753 * branch prediction). 754 * 755 * For the record, the exact sequence triggering the 756 * spurious exception is: 757 * 758 * bcnd.n eq0, r2, 1f 759 * or r25, r0, r22 760 * bsr somewhere 761 * or r25, r0, r22 762 * 1: cmp r13, r25, r20 763 * 764 * within the same cache line. 765 * 766 * Simply ignoring the exception and returning does not 767 * cause the exception to disappear. Clearing the 768 * instruction cache works, but on 88110+88410 systems, 769 * the 88410 needs to be invalidated as well. (note that 770 * the size passed to the flush routines does not matter 771 * since there is no way to flush a subset of the 88110 772 * I$ anyway) 773 */ 774 { 775 extern void *kernel_text, *etext; 776 777 if (fault_addr >= (vaddr_t)&kernel_text && 778 fault_addr < (vaddr_t)&etext) { 779 cmmu_icache_inv(curcpu()->ci_cpuid, 780 trunc_page(fault_addr), PAGE_SIZE); 781 cmmu_cache_wbinv(curcpu()->ci_cpuid, 782 trunc_page(fault_addr), PAGE_SIZE); 783 return; 784 } 785 } 786 goto lose; 787 case T_MISALGNFLT: 788 printf("kernel misaligned access exception @%p\n", 789 (void *)frame->tf_exip); 790 goto lose; 791 case T_INSTFLT: 792 /* kernel mode instruction access fault. 793 * Should never, never happen for a non-paged kernel. 794 */ 795#ifdef TRAPDEBUG 796 printf("Kernel Instruction fault exip %x isr %x ilar %x\n", 797 frame->tf_exip, frame->tf_isr, frame->tf_ilar); 798#endif 799 goto lose; 800 801 case T_DATAFLT: 802 /* kernel mode data fault */ 803 804 /* data fault on the user address? */ 805 if ((frame->tf_dsr & CMMU_DSR_SU) == 0) 806 goto m88110_user_fault; 807 808#ifdef TRAPDEBUG 809 printf("Kernel Data access fault exip %x dsr %x dlar %x\n", 810 frame->tf_exip, frame->tf_dsr, frame->tf_dlar); 811#endif 812 813 fault_addr = frame->tf_dlar; 814 if (frame->tf_dsr & CMMU_DSR_RW) { 815 access_type = PROT_READ; 816 fault_code = PROT_READ; 817 } else { 818 access_type = PROT_READ | PROT_WRITE; 819 fault_code = PROT_WRITE; 820 } 821 822 va = trunc_page((vaddr_t)fault_addr); 823 824 vm = p->p_vmspace; 825 map = kernel_map; 826 827 if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) { 828 /* 829 * On a segment or a page fault, call uvm_fault() to 830 * resolve the fault. 831 */ 832 if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) 833 p->p_addr->u_pcb.pcb_onfault = 0; 834 KERNEL_LOCK(); 835 result = uvm_fault(map, va, 0, access_type); 836 KERNEL_UNLOCK(); 837 p->p_addr->u_pcb.pcb_onfault = pcb_onfault; 838 /* 839 * This could be a fault caused in copyout*() 840 * while accessing kernel space. 841 */ 842 if (result != 0 && pcb_onfault != 0) { 843 frame->tf_exip = pcb_onfault; 844 /* 845 * Continue as if the fault had been resolved. 846 */ 847 result = 0; 848 } 849 if (result == 0) 850 return; 851 } 852 goto lose; 853 case T_INSTFLT+T_USER: 854 /* User mode instruction access fault */ 855 /* FALLTHROUGH */ 856 case T_DATAFLT+T_USER: 857 if (!uvm_map_inentry(p, &p->p_spinentry, PROC_STACK(p), 858 "[%s]%d/%d sp=%lx inside %lx-%lx: not MAP_STACK\n", 859 uvm_map_inentry_sp, p->p_vmspace->vm_map.sserial)) 860 goto userexit; 861m88110_user_fault: 862 if (type == T_INSTFLT+T_USER) { 863 access_type = PROT_READ; 864 fault_code = PROT_READ; 865#ifdef TRAPDEBUG 866 printf("User Instruction fault exip %x isr %x ilar %x\n", 867 frame->tf_exip, frame->tf_isr, frame->tf_ilar); 868#endif 869 } else { 870 fault_addr = frame->tf_dlar; 871 /* 872 * Unlike the 88100, there is no specific bit telling 873 * us this is the read part of an xmem operation. 874 * However, if the WE (Write Exception) bit is set, 875 * then obviously this is not a read fault. 876 * But the value of this bit can not be relied upon 877 * if either PI or SI are set... 878 */ 879 if ((frame->tf_dsr & CMMU_DSR_RW) != 0 && 880 ((frame->tf_dsr & (CMMU_DSR_PI|CMMU_DSR_SI)) != 0 || 881 (frame->tf_dsr & CMMU_DSR_WE) == 0)) { 882 access_type = PROT_READ; 883 fault_code = PROT_READ; 884 } else { 885 access_type = PROT_READ | PROT_WRITE; 886 fault_code = PROT_WRITE; 887 } 888#ifdef TRAPDEBUG 889 printf("User Data access fault exip %x dsr %x dlar %x\n", 890 frame->tf_exip, frame->tf_dsr, frame->tf_dlar); 891#endif 892 } 893 894 va = trunc_page((vaddr_t)fault_addr); 895 896 vm = p->p_vmspace; 897 map = &vm->vm_map; 898 if ((pcb_onfault = p->p_addr->u_pcb.pcb_onfault) != 0) 899 p->p_addr->u_pcb.pcb_onfault = 0; 900 901 /* 902 * Call uvm_fault() to resolve non-bus error faults 903 * whenever possible. 904 */ 905 if (type == T_INSTFLT+T_USER) { 906 /* instruction faults */ 907 if (frame->tf_isr & 908 (CMMU_ISR_BE | CMMU_ISR_SP | CMMU_ISR_TBE)) { 909 /* bus error, supervisor protection */ 910 result = EACCES; 911 } else 912 if (frame->tf_isr & (CMMU_ISR_SI | CMMU_ISR_PI)) { 913 /* segment or page fault */ 914 KERNEL_LOCK(); 915 result = uvm_fault(map, va, 0, access_type); 916 KERNEL_UNLOCK(); 917 if (result == EACCES) 918 result = EFAULT; 919 } else { 920#ifdef TRAPDEBUG 921 printf("Unexpected Instruction fault isr %x\n", 922 frame->tf_isr); 923#endif 924 goto lose; 925 } 926 } else { 927 /* data faults */ 928 if (frame->tf_dsr & CMMU_DSR_BE) { 929 /* bus error */ 930 result = EACCES; 931 } else 932 if (frame->tf_dsr & (CMMU_DSR_SI | CMMU_DSR_PI)) { 933 /* segment or page fault */ 934 KERNEL_LOCK(); 935 result = uvm_fault(map, va, 0, access_type); 936 KERNEL_UNLOCK(); 937 if (result == EACCES) 938 result = EFAULT; 939 } else 940 if (frame->tf_dsr & (CMMU_DSR_CP | CMMU_DSR_WA)) { 941 /* copyback or write allocate error */ 942 result = EACCES; 943 } else 944 if (frame->tf_dsr & CMMU_DSR_WE) { 945 /* write fault */ 946 /* This could be a write protection fault or an 947 * exception to set the used and modified bits 948 * in the pte. Basically, if we got a write 949 * error, then we already have a pte entry that 950 * faulted in from a previous seg fault or page 951 * fault. 952 * Get the pte and check the status of the 953 * modified and valid bits to determine if this 954 * indeed a real write fault. XXX smurph 955 */ 956 if (pmap_set_modify(map->pmap, va)) { 957#ifdef TRAPDEBUG 958 printf("Corrected userland write fault, pmap %p va %p\n", 959 map->pmap, va); 960#endif 961 result = 0; 962 } else { 963 /* must be a real wp fault */ 964#ifdef TRAPDEBUG 965 printf("Uncorrected userland write fault, pmap %p va %p\n", 966 map->pmap, va); 967#endif 968 result = uvm_fault(map, va, 0, access_type); 969 if (result == EACCES) 970 result = EFAULT; 971 } 972 } else { 973#ifdef TRAPDEBUG 974 printf("Unexpected Data access fault dsr %x\n", 975 frame->tf_dsr); 976#endif 977 goto lose; 978 } 979 } 980 p->p_addr->u_pcb.pcb_onfault = pcb_onfault; 981 982 if (result == 0) 983 uvm_grow(p, va); 984 985 /* 986 * This could be a fault caused in copyin*() 987 * while accessing user space. 988 */ 989 if (result != 0 && pcb_onfault != 0) { 990 frame->tf_exip = pcb_onfault; 991 /* 992 * Continue as if the fault had been resolved. 993 */ 994 result = 0; 995 } 996 997 if (result != 0) { 998 sig = result == EACCES ? SIGBUS : SIGSEGV; 999 fault_type = result == EACCES ? 1000 BUS_ADRERR : SEGV_MAPERR; 1001 } 1002 break; 1003 case T_MISALGNFLT+T_USER: 1004 /* Fix any misaligned ld.d or st.d instructions */ 1005 sig = double_reg_fixup(frame, T_MISALGNFLT); 1006 fault_type = BUS_ADRALN; 1007 if (sig == 0) { 1008 /* skip recovered instruction */ 1009 m88110_skip_insn(frame); 1010 goto userexit; 1011 } 1012 break; 1013 case T_ILLFLT+T_USER: 1014 /* Fix any ld.d or st.d instruction with an odd register */ 1015 sig = double_reg_fixup(frame, T_ILLFLT); 1016 fault_type = ILL_PRVREG; 1017 if (sig == 0) { 1018 /* skip recovered instruction */ 1019 m88110_skip_insn(frame); 1020 goto userexit; 1021 } 1022 break; 1023 case T_PRIVINFLT+T_USER: 1024 fault_type = ILL_PRVREG; 1025 /* FALLTHROUGH */ 1026#ifndef DDB 1027 case T_KDB_BREAK: 1028 case T_KDB_ENTRY: 1029 case T_KDB_TRACE: 1030#endif 1031 case T_KDB_BREAK+T_USER: 1032 case T_KDB_ENTRY+T_USER: 1033 case T_KDB_TRACE+T_USER: 1034 sig = SIGILL; 1035 break; 1036 case T_BNDFLT+T_USER: 1037 sig = SIGFPE; 1038 /* skip trap instruction */ 1039 m88110_skip_insn(frame); 1040 break; 1041 case T_ZERODIV+T_USER: 1042 sig = SIGFPE; 1043 fault_type = FPE_INTDIV; 1044 /* skip trap instruction */ 1045 m88110_skip_insn(frame); 1046 break; 1047 case T_OVFFLT+T_USER: 1048 sig = SIGFPE; 1049 fault_type = FPE_INTOVF; 1050 /* skip trap instruction */ 1051 m88110_skip_insn(frame); 1052 break; 1053 case T_FPEPFLT+T_USER: 1054 m88110_fpu_exception(frame); 1055 goto userexit; 1056 case T_SIGSYS+T_USER: 1057 sig = SIGSYS; 1058 break; 1059 case T_STEPBPT+T_USER: 1060#ifdef PTRACE 1061 /* 1062 * This trap is used by the kernel to support single-step 1063 * debugging (although any user could generate this trap 1064 * which should probably be handled differently). When a 1065 * process is continued by a debugger with the PT_STEP 1066 * function of ptrace (single step), the kernel inserts 1067 * one or two breakpoints in the user process so that only 1068 * one instruction (or two in the case of a delayed branch) 1069 * is executed. When this breakpoint is hit, we get the 1070 * T_STEPBPT trap. 1071 */ 1072 { 1073 u_int instr; 1074 vaddr_t pc = PC_REGS(&frame->tf_regs); 1075 1076 /* read break instruction */ 1077 copyin((caddr_t)pc, &instr, sizeof(u_int)); 1078 1079 /* check and see if we got here by accident */ 1080 if ((p->p_md.md_bp0va != pc && 1081 p->p_md.md_bp1va != pc) || 1082 instr != SSBREAKPOINT) { 1083 sig = SIGTRAP; 1084 fault_type = TRAP_TRACE; 1085 break; 1086 } 1087 1088 /* restore original instruction and clear breakpoint */ 1089 KERNEL_LOCK(); 1090 if (p->p_md.md_bp0va == pc) { 1091 ss_put_value(p, pc, p->p_md.md_bp0save); 1092 p->p_md.md_bp0va = 0; 1093 } 1094 if (p->p_md.md_bp1va == pc) { 1095 ss_put_value(p, pc, p->p_md.md_bp1save); 1096 p->p_md.md_bp1va = 0; 1097 } 1098 KERNEL_UNLOCK(); 1099 1100 sig = SIGTRAP; 1101 fault_type = TRAP_BRKPT; 1102 } 1103#else 1104 sig = SIGTRAP; 1105 fault_type = TRAP_TRACE; 1106#endif 1107 break; 1108 case T_USERBPT+T_USER: 1109 /* 1110 * This trap is meant to be used by debuggers to implement 1111 * breakpoint debugging. When we get this trap, we just 1112 * return a signal which gets caught by the debugger. 1113 */ 1114 sig = SIGTRAP; 1115 fault_type = TRAP_BRKPT; 1116 break; 1117 } 1118 1119 /* 1120 * If trap from supervisor mode, just return 1121 */ 1122 if (type < T_USER) 1123 return; 1124 1125 if (sig) { 1126deliver: 1127 sv.sival_ptr = (void *)fault_addr; 1128 trapsignal(p, sig, fault_code, fault_type, sv); 1129 } 1130 1131userexit: 1132 userret(p); 1133} 1134#endif /* M88110 */ 1135 1136__dead void 1137error_fatal(struct trapframe *frame) 1138{ 1139 if (frame->tf_vector == 0) 1140 printf("\nCPU %d Reset Exception\n", cpu_number()); 1141 else 1142 printf("\nCPU %d Error Exception\n", cpu_number()); 1143 1144#ifdef DDB 1145 regdump((struct trapframe*)frame); 1146#endif 1147 panic("unrecoverable exception %ld", frame->tf_vector); 1148} 1149 1150#ifdef M88100 1151void 1152m88100_syscall(register_t code, struct trapframe *tf) 1153{ 1154 int i, nap; 1155 const struct sysent *callp; 1156 struct proc *p = curproc; 1157 int error; 1158 register_t args[8] __aligned(8); 1159 register_t rval[2] __aligned(8); 1160 register_t *ap; 1161 1162 uvmexp.syscalls++; 1163 1164 p->p_md.md_tf = tf; 1165 1166 /* 1167 * For 88k, all the arguments are passed in the registers (r2-r9), 1168 * and further arguments (if any) on stack. 1169 * For syscall (and __syscall), r2 (and r3) has the actual code. 1170 * __syscall takes a quad syscall number, so that other 1171 * arguments are at their natural alignments. 1172 */ 1173 ap = &tf->tf_r[2]; 1174 nap = 8; /* r2-r9 */ 1175 1176 switch (code) { 1177 case SYS_syscall: 1178 code = *ap++; 1179 nap--; 1180 break; 1181 case SYS___syscall: 1182 code = ap[_QUAD_LOWWORD]; 1183 ap += 2; 1184 nap -= 2; 1185 break; 1186 } 1187 1188 callp = sysent; 1189 if (code < 0 || code >= SYS_MAXSYSCALL) 1190 callp += SYS_syscall; 1191 else 1192 callp += code; 1193 1194 i = callp->sy_argsize / sizeof(register_t); 1195 if (i > sizeof(args) / sizeof(register_t)) 1196 panic("syscall nargs"); 1197 if (i > nap) { 1198 bcopy((caddr_t)ap, (caddr_t)args, nap * sizeof(register_t)); 1199 if ((error = copyin((caddr_t)tf->tf_r[31], args + nap, 1200 (i - nap) * sizeof(register_t)))) 1201 goto bad; 1202 } else 1203 bcopy((caddr_t)ap, (caddr_t)args, i * sizeof(register_t)); 1204 1205 rval[0] = 0; 1206 rval[1] = tf->tf_r[3]; 1207 1208 error = mi_syscall(p, code, callp, args, rval); 1209 1210 /* 1211 * system call will look like: 1212 * or r13, r0, <code> 1213 * tb0 0, r0, <128> <- sxip 1214 * br err <- snip 1215 * jmp r1 <- sfip 1216 * err: or.u r3, r0, hi16(errno) 1217 * st r2, r3, lo16(errno) 1218 * subu r2, r0, 1 1219 * jmp r1 1220 * 1221 * So, when we take syscall trap, sxip/snip/sfip will be as 1222 * shown above. 1223 * Given this, 1224 * 1. If the system call returned 0, need to skip nip. 1225 * nip = fip, fip += 4 1226 * (doesn't matter what fip + 4 will be but we will never 1227 * execute this since jmp r1 at nip will change the execution flow.) 1228 * 2. If the system call returned an errno > 0, plug the value 1229 * in r2, and leave nip and fip unchanged. This will have us 1230 * executing "br err" on return to user space. 1231 * 3. If the system call code returned ERESTART, 1232 * we need to rexecute the trap instruction. Back up the pipe 1233 * line. 1234 * fip = nip, nip = xip 1235 * 4. If the system call returned EJUSTRETURN, don't need to adjust 1236 * any pointers. 1237 */ 1238 1239 switch (error) { 1240 case 0: 1241 tf->tf_r[2] = rval[0]; 1242 tf->tf_r[3] = rval[1]; 1243 tf->tf_epsr &= ~PSR_C; 1244 tf->tf_snip = tf->tf_sfip & ~NIP_E; 1245 tf->tf_sfip = tf->tf_snip + 4; 1246 break; 1247 case ERESTART: 1248 m88100_rewind_insn(&(tf->tf_regs)); 1249 /* clear the error bit */ 1250 tf->tf_sfip &= ~FIP_E; 1251 tf->tf_snip &= ~NIP_E; 1252 break; 1253 case EJUSTRETURN: 1254 break; 1255 default: 1256 bad: 1257 tf->tf_r[2] = error; 1258 tf->tf_epsr |= PSR_C; /* fail */ 1259 tf->tf_snip = tf->tf_snip & ~NIP_E; 1260 tf->tf_sfip = tf->tf_sfip & ~FIP_E; 1261 break; 1262 } 1263 1264 mi_syscall_return(p, code, error, rval); 1265} 1266#endif /* M88100 */ 1267 1268#ifdef M88110 1269/* Instruction pointers operate differently on mc88110 */ 1270void 1271m88110_syscall(register_t code, struct trapframe *tf) 1272{ 1273 int i, nap; 1274 const struct sysent *callp; 1275 struct proc *p = curproc; 1276 int error; 1277 register_t args[8] __aligned(8); 1278 register_t rval[2] __aligned(8); 1279 register_t *ap; 1280 1281 uvmexp.syscalls++; 1282 1283 p->p_md.md_tf = tf; 1284 1285 /* 1286 * For 88k, all the arguments are passed in the registers (r2-r9), 1287 * and further arguments (if any) on stack. 1288 * For syscall (and __syscall), r2 (and r3) has the actual code. 1289 * __syscall takes a quad syscall number, so that other 1290 * arguments are at their natural alignments. 1291 */ 1292 ap = &tf->tf_r[2]; 1293 nap = 8; /* r2-r9 */ 1294 1295 switch (code) { 1296 case SYS_syscall: 1297 code = *ap++; 1298 nap--; 1299 break; 1300 case SYS___syscall: 1301 code = ap[_QUAD_LOWWORD]; 1302 ap += 2; 1303 nap -= 2; 1304 break; 1305 } 1306 1307 callp = sysent; 1308 if (code < 0 || code >= SYS_MAXSYSCALL) 1309 callp += SYS_syscall; 1310 else 1311 callp += code; 1312 1313 i = callp->sy_argsize / sizeof(register_t); 1314 if (i > sizeof(args) / sizeof(register_t)) 1315 panic("syscall nargs"); 1316 if (i > nap) { 1317 bcopy((caddr_t)ap, (caddr_t)args, nap * sizeof(register_t)); 1318 if ((error = copyin((caddr_t)tf->tf_r[31], args + nap, 1319 (i - nap) * sizeof(register_t)))) 1320 goto bad; 1321 } else 1322 bcopy((caddr_t)ap, (caddr_t)args, i * sizeof(register_t)); 1323 1324 rval[0] = 0; 1325 rval[1] = tf->tf_r[3]; 1326 1327 error = mi_syscall(p, code, callp, args, rval); 1328 1329 /* 1330 * system call will look like: 1331 * or r13, r0, <code> 1332 * tb0 0, r0, <128> <- exip 1333 * br err <- enip 1334 * jmp r1 1335 * err: or.u r3, r0, hi16(errno) 1336 * st r2, r3, lo16(errno) 1337 * subu r2, r0, 1 1338 * jmp r1 1339 * 1340 * So, when we take syscall trap, exip/enip will be as 1341 * shown above. 1342 * Given this, 1343 * 1. If the system call returned 0, need to jmp r1. 1344 * exip += 8 1345 * 2. If the system call returned an errno > 0, increment 1346 * exip += 4 and plug the value in r2. This will have us 1347 * executing "br err" on return to user space. 1348 * 3. If the system call code returned ERESTART, 1349 * we need to rexecute the trap instruction. leave exip as is. 1350 * 4. If the system call returned EJUSTRETURN, just return. 1351 * exip += 4 1352 */ 1353 1354 switch (error) { 1355 case 0: 1356 tf->tf_r[2] = rval[0]; 1357 tf->tf_r[3] = rval[1]; 1358 tf->tf_epsr &= ~PSR_C; 1359 /* skip two instructions */ 1360 m88110_skip_insn(tf); 1361 m88110_skip_insn(tf); 1362 break; 1363 case ERESTART: 1364 /* 1365 * Reexecute the trap. 1366 * exip is already at the trap instruction, so 1367 * there is nothing to do. 1368 */ 1369 break; 1370 case EJUSTRETURN: 1371 /* skip one instruction */ 1372 m88110_skip_insn(tf); 1373 break; 1374 default: 1375 bad: 1376 tf->tf_r[2] = error; 1377 tf->tf_epsr |= PSR_C; /* fail */ 1378 /* skip one instruction */ 1379 m88110_skip_insn(tf); 1380 break; 1381 } 1382 1383 mi_syscall_return(p, code, error, rval); 1384} 1385#endif /* M88110 */ 1386 1387/* 1388 * Set up return-value registers as fork() libc stub expects, 1389 * and do normal return-to-user-mode stuff. 1390 */ 1391void 1392child_return(arg) 1393 void *arg; 1394{ 1395 struct proc *p = arg; 1396 struct trapframe *tf; 1397 1398 tf = (struct trapframe *)USER_REGS(p); 1399 tf->tf_r[2] = 0; 1400 tf->tf_epsr &= ~PSR_C; 1401 /* skip br instruction as in syscall() */ 1402#ifdef M88100 1403 if (CPU_IS88100) { 1404 tf->tf_snip = (tf->tf_sfip & XIP_ADDR) | XIP_V; 1405 tf->tf_sfip = tf->tf_snip + 4; 1406 } 1407#endif 1408#ifdef M88110 1409 if (CPU_IS88110) { 1410 /* skip two instructions */ 1411 m88110_skip_insn(tf); 1412 m88110_skip_insn(tf); 1413 } 1414#endif 1415 1416 KERNEL_UNLOCK(); 1417 1418 mi_child_return(p); 1419} 1420 1421#ifdef PTRACE 1422 1423/* 1424 * User Single Step Debugging Support 1425 */ 1426 1427#include <sys/ptrace.h> 1428 1429vaddr_t ss_branch_taken(u_int, vaddr_t, struct reg *); 1430int ss_get_value(struct proc *, vaddr_t, u_int *); 1431int ss_inst_branch_or_call(u_int); 1432int ss_put_breakpoint(struct proc *, vaddr_t, vaddr_t *, u_int *); 1433 1434#define SYSCALL_INSTR 0xf000d080 /* tb0 0,r0,128 */ 1435 1436int 1437ss_get_value(struct proc *p, vaddr_t addr, u_int *value) 1438{ 1439 struct uio uio; 1440 struct iovec iov; 1441 1442 iov.iov_base = (caddr_t)value; 1443 iov.iov_len = sizeof(u_int); 1444 uio.uio_iov = &iov; 1445 uio.uio_iovcnt = 1; 1446 uio.uio_offset = (off_t)addr; 1447 uio.uio_resid = sizeof(u_int); 1448 uio.uio_segflg = UIO_SYSSPACE; 1449 uio.uio_rw = UIO_READ; 1450 uio.uio_procp = curproc; 1451 return (process_domem(curproc, p->p_p, &uio, PT_READ_I)); 1452} 1453 1454int 1455ss_put_value(struct proc *p, vaddr_t addr, u_int value) 1456{ 1457 struct uio uio; 1458 struct iovec iov; 1459 1460 iov.iov_base = (caddr_t)&value; 1461 iov.iov_len = sizeof(u_int); 1462 uio.uio_iov = &iov; 1463 uio.uio_iovcnt = 1; 1464 uio.uio_offset = (off_t)addr; 1465 uio.uio_resid = sizeof(u_int); 1466 uio.uio_segflg = UIO_SYSSPACE; 1467 uio.uio_rw = UIO_WRITE; 1468 uio.uio_procp = curproc; 1469 return (process_domem(curproc, p->p_p, &uio, PT_WRITE_I)); 1470} 1471 1472/* 1473 * ss_branch_taken(instruction, pc, regs) 1474 * 1475 * instruction will be a control flow instruction location at address pc. 1476 * Branch taken is supposed to return the address to which the instruction 1477 * would jump if the branch is taken. 1478 * 1479 * This is different from branch_taken() in ddb, as we also need to process 1480 * system calls. 1481 */ 1482vaddr_t 1483ss_branch_taken(u_int inst, vaddr_t pc, struct reg *regs) 1484{ 1485 u_int regno; 1486 1487 /* 1488 * Quick check of the instruction. Note that we know we are only 1489 * invoked if ss_inst_branch_or_call() returns TRUE, so we do not 1490 * need to repeat the jpm, jsr and syscall stricter checks here. 1491 */ 1492 switch (inst >> (32 - 5)) { 1493 case 0x18: /* br */ 1494 case 0x19: /* bsr */ 1495 /* signed 26 bit pc relative displacement, shift left 2 bits */ 1496 inst = (inst & 0x03ffffff) << 2; 1497 /* check if sign extension is needed */ 1498 if (inst & 0x08000000) 1499 inst |= 0xf0000000; 1500 return (pc + inst); 1501 1502 case 0x1a: /* bb0 */ 1503 case 0x1b: /* bb1 */ 1504 case 0x1d: /* bcnd */ 1505 /* signed 16 bit pc relative displacement, shift left 2 bits */ 1506 inst = (inst & 0x0000ffff) << 2; 1507 /* check if sign extension is needed */ 1508 if (inst & 0x00020000) 1509 inst |= 0xfffc0000; 1510 return (pc + inst); 1511 1512 case 0x1e: /* jmp or jsr */ 1513 regno = inst & 0x1f; /* get the register value */ 1514 return (regno == 0 ? 0 : regs->r[regno]); 1515 1516 default: /* system call */ 1517 /* 1518 * The regular (pc + 4) breakpoint will match the error 1519 * return. Successful system calls return at (pc + 8), 1520 * so we'll set up a branch breakpoint there. 1521 */ 1522 return (pc + 8); 1523 } 1524} 1525 1526int 1527ss_inst_branch_or_call(u_int ins) 1528{ 1529 /* check high five bits */ 1530 switch (ins >> (32 - 5)) { 1531 case 0x18: /* br */ 1532 case 0x19: /* bsr */ 1533 case 0x1a: /* bb0 */ 1534 case 0x1b: /* bb1 */ 1535 case 0x1d: /* bcnd */ 1536 return (TRUE); 1537 case 0x1e: /* could be jmp or jsr */ 1538 if ((ins & 0xfffff3e0) == 0xf400c000) 1539 return (TRUE); 1540 } 1541 1542 return (FALSE); 1543} 1544 1545int 1546ss_put_breakpoint(struct proc *p, vaddr_t va, vaddr_t *bpva, u_int *bpsave) 1547{ 1548 int rc; 1549 1550 /* Restore previous breakpoint if we did not trigger it. */ 1551 if (*bpva != 0) { 1552 ss_put_value(p, *bpva, *bpsave); 1553 *bpva = 0; 1554 } 1555 1556 /* Save instruction. */ 1557 if ((rc = ss_get_value(p, va, bpsave)) != 0) 1558 return (rc); 1559 1560 /* Store breakpoint instruction at the location now. */ 1561 *bpva = va; 1562 return (ss_put_value(p, va, SSBREAKPOINT)); 1563} 1564 1565int 1566process_sstep(struct proc *p, int sstep) 1567{ 1568 struct reg *sstf = USER_REGS(p); 1569 vaddr_t pc, brpc; 1570 u_int32_t instr; 1571 int rc; 1572 1573 if (sstep == 0) { 1574 /* Restore previous breakpoints if any. */ 1575 if (p->p_md.md_bp0va != 0) { 1576 ss_put_value(p, p->p_md.md_bp0va, p->p_md.md_bp0save); 1577 p->p_md.md_bp0va = 0; 1578 } 1579 if (p->p_md.md_bp1va != 0) { 1580 ss_put_value(p, p->p_md.md_bp1va, p->p_md.md_bp1save); 1581 p->p_md.md_bp1va = 0; 1582 } 1583 1584 return (0); 1585 } 1586 1587 /* 1588 * User was stopped at pc, e.g. the instruction at pc was not executed. 1589 * Fetch what's at the current location. 1590 */ 1591 pc = PC_REGS(sstf); 1592 if ((rc = ss_get_value(p, pc, &instr)) != 0) 1593 return (rc); 1594 1595 /* 1596 * Find if this instruction may cause a branch, and set up a breakpoint 1597 * at the branch location. 1598 */ 1599 if (ss_inst_branch_or_call(instr) || instr == SYSCALL_INSTR) { 1600 brpc = ss_branch_taken(instr, pc, sstf); 1601 1602 /* self-branches are hopeless */ 1603 if (brpc != pc && brpc != 0) { 1604 if ((rc = ss_put_breakpoint(p, brpc, 1605 &p->p_md.md_bp1va, &p->p_md.md_bp1save)) != 0) 1606 return (rc); 1607 } 1608 } 1609 1610 if ((rc = ss_put_breakpoint(p, pc + 4, 1611 &p->p_md.md_bp0va, &p->p_md.md_bp0save)) != 0) 1612 return (rc); 1613 1614 return (0); 1615} 1616 1617#endif /* PTRACE */ 1618 1619#ifdef DIAGNOSTIC 1620void 1621splassert_check(int wantipl, const char *func) 1622{ 1623 int oldipl; 1624 1625 oldipl = getipl(); 1626 1627 if (oldipl < wantipl) { 1628 splassert_fail(wantipl, oldipl, func); 1629 /* 1630 * This will raise the spl, 1631 * in a feeble attempt to reduce further damage. 1632 */ 1633 (void)splraise(wantipl); 1634 } 1635} 1636#endif 1637 1638/* 1639 * ld.d and st.d instructions referencing long aligned but not long long 1640 * aligned addresses will trigger a misaligned address exception. 1641 * 1642 * This routine attempts to recover these (valid) statements, by simulating 1643 * the split form of the instruction. If it fails, it returns the appropriate 1644 * signal number to deliver. 1645 * 1646 * Note that we do not attempt to do anything for .d.usr instructions - the 1647 * kernel never issues such instructions, and they cause a privileged 1648 * instruction exception from userland. 1649 */ 1650int 1651double_reg_fixup(struct trapframe *frame, int fault) 1652{ 1653 u_int32_t pc, instr, value; 1654 int regno, store; 1655 vaddr_t addr; 1656 1657 /* 1658 * Decode the faulting instruction. 1659 */ 1660 1661 pc = PC_REGS(&frame->tf_regs); 1662 if (copyin((void *)pc, &instr, sizeof(u_int32_t)) != 0) 1663 return SIGSEGV; 1664 1665 switch (instr & 0xfc00ff00) { 1666 case 0xf4001000: /* ld.d rD, rS1, rS2 */ 1667 addr = frame->tf_r[(instr >> 16) & 0x1f] 1668 + frame->tf_r[(instr & 0x1f)]; 1669 store = 0; 1670 break; 1671 case 0xf4002000: /* st.d rD, rS1, rS2 */ 1672 addr = frame->tf_r[(instr >> 16) & 0x1f] 1673 + frame->tf_r[(instr & 0x1f)]; 1674 store = 1; 1675 break; 1676 default: 1677 switch (instr >> 26) { 1678 case 0x10000000 >> 26: /* ld.d rD, rS, imm16 */ 1679 addr = (instr & 0x0000ffff) + 1680 frame->tf_r[(instr >> 16) & 0x1f]; 1681 store = 0; 1682 break; 1683 case 0x20000000 >> 26: /* st.d rD, rS, imm16 */ 1684 addr = (instr & 0x0000ffff) + 1685 frame->tf_r[(instr >> 16) & 0x1f]; 1686 store = 1; 1687 break; 1688 default: 1689 return SIGBUS; 1690 } 1691 break; 1692 } 1693 1694 regno = (instr >> 21) & 0x1f; 1695 1696 switch (fault) { 1697 case T_MISALGNFLT: 1698 /* We only handle long but not long long aligned access here */ 1699 if ((addr & 0x07) != 4) 1700 return SIGBUS; 1701 break; 1702 case T_ILLFLT: 1703 /* We only handle odd register pair number here */ 1704 if ((regno & 0x01) == 0) 1705 return SIGILL; 1706 /* We only handle long aligned access here */ 1707 if ((addr & 0x03) != 0) 1708 return SIGBUS; 1709 break; 1710 } 1711 1712 if (store) { 1713 /* 1714 * Two word stores. 1715 */ 1716 if (regno == 0) 1717 value = 0; 1718 else 1719 value = frame->tf_r[regno]; 1720 if (copyout(&value, (void *)addr, sizeof(u_int32_t)) != 0) 1721 return SIGSEGV; 1722 if (regno == 31) 1723 value = 0; 1724 else 1725 value = frame->tf_r[regno + 1]; 1726 if (copyout(&value, (void *)(addr + 4), sizeof(u_int32_t)) != 0) 1727 return SIGSEGV; 1728 } else { 1729 /* 1730 * Two word loads. r0 should be left unaltered, but the 1731 * value should still be fetched even if it is discarded. 1732 */ 1733 if (copyin((void *)addr, &value, sizeof(u_int32_t)) != 0) 1734 return SIGSEGV; 1735 if (regno != 0) 1736 frame->tf_r[regno] = value; 1737 if (copyin((void *)(addr + 4), &value, sizeof(u_int32_t)) != 0) 1738 return SIGSEGV; 1739 if (regno != 31) 1740 frame->tf_r[regno + 1] = value; 1741 } 1742 1743 return 0; 1744} 1745 1746void 1747cache_flush(struct trapframe *tf) 1748{ 1749 struct proc *p = curproc; 1750 struct pmap *pmap; 1751 paddr_t pa; 1752 vaddr_t va; 1753 vsize_t len, count; 1754 1755 p->p_md.md_tf = tf; 1756 1757 pmap = vm_map_pmap(&p->p_vmspace->vm_map); 1758 va = tf->tf_r[2]; 1759 len = tf->tf_r[3]; 1760 1761 if (va < VM_MIN_ADDRESS || va >= VM_MAXUSER_ADDRESS || 1762 va + len <= va || va + len >= VM_MAXUSER_ADDRESS) 1763 len = 0; 1764 1765 while (len != 0) { 1766 count = min(len, PAGE_SIZE - (va & PAGE_MASK)); 1767 if (pmap_extract(pmap, va, &pa) != FALSE) 1768 dma_cachectl(pa, count, DMA_CACHE_SYNC); 1769 va += count; 1770 len -= count; 1771 } 1772 1773#ifdef M88100 1774 if (CPU_IS88100) { 1775 /* clear the error bit */ 1776 tf->tf_sfip &= ~FIP_E; 1777 tf->tf_snip &= ~NIP_E; 1778 } 1779#endif 1780#ifdef M88110 1781 if (CPU_IS88110) { 1782 /* skip instruction */ 1783 m88110_skip_insn(tf); 1784 } 1785#endif 1786 1787 userret(p); 1788} 1789