1/* 2 * arch/sh/kernel/traps_64.c 3 * 4 * Copyright (C) 2000, 2001 Paolo Alberelli 5 * Copyright (C) 2003, 2004 Paul Mundt 6 * Copyright (C) 2003, 2004 Richard Curnow 7 * 8 * This file is subject to the terms and conditions of the GNU General Public 9 * License. See the file "COPYING" in the main directory of this archive 10 * for more details. 11 */ 12#include <linux/sched.h> 13#include <linux/kernel.h> 14#include <linux/string.h> 15#include <linux/errno.h> 16#include <linux/ptrace.h> 17#include <linux/timer.h> 18#include <linux/mm.h> 19#include <linux/smp.h> 20#include <linux/init.h> 21#include <linux/delay.h> 22#include <linux/spinlock.h> 23#include <linux/kallsyms.h> 24#include <linux/interrupt.h> 25#include <linux/sysctl.h> 26#include <linux/module.h> 27#include <asm/system.h> 28#include <asm/uaccess.h> 29#include <asm/io.h> 30#include <asm/atomic.h> 31#include <asm/processor.h> 32#include <asm/pgtable.h> 33#include <asm/fpu.h> 34 35#undef DEBUG_EXCEPTION 36#ifdef DEBUG_EXCEPTION 37/* implemented in ../lib/dbg.c */ 38extern void show_excp_regs(char *fname, int trapnr, int signr, 39 struct pt_regs *regs); 40#else 41#define show_excp_regs(a, b, c, d) 42#endif 43 44static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name, 45 unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk); 46 47#define DO_ERROR(trapnr, signr, str, name, tsk) \ 48asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \ 49{ \ 50 do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \ 51} 52 53spinlock_t die_lock; 54 55void die(const char * str, struct pt_regs * regs, long err) 56{ 57 console_verbose(); 58 spin_lock_irq(&die_lock); 59 printk("%s: %lx\n", str, (err & 0xffffff)); 60 show_regs(regs); 61 spin_unlock_irq(&die_lock); 62 do_exit(SIGSEGV); 63} 64 65static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err) 66{ 67 if (!user_mode(regs)) 68 die(str, regs, err); 69} 70 71static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err) 72{ 73 if (!user_mode(regs)) { 74 const struct exception_table_entry *fixup; 75 fixup = search_exception_tables(regs->pc); 76 if (fixup) { 77 regs->pc = fixup->fixup; 78 return; 79 } 80 die(str, regs, err); 81 } 82} 83 84DO_ERROR(13, SIGILL, "illegal slot instruction", illegal_slot_inst, current) 85DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current) 86 87 88/* Implement misaligned load/store handling for kernel (and optionally for user 89 mode too). Limitation : only SHmedia mode code is handled - there is no 90 handling at all for misaligned accesses occurring in SHcompact code yet. */ 91 92static int misaligned_fixup(struct pt_regs *regs); 93 94asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs) 95{ 96 if (misaligned_fixup(regs) < 0) { 97 do_unhandled_exception(7, SIGSEGV, "address error(load)", 98 "do_address_error_load", 99 error_code, regs, current); 100 } 101 return; 102} 103 104asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs) 105{ 106 if (misaligned_fixup(regs) < 0) { 107 do_unhandled_exception(8, SIGSEGV, "address error(store)", 108 "do_address_error_store", 109 error_code, regs, current); 110 } 111 return; 112} 113 114#if defined(CONFIG_SH64_ID2815_WORKAROUND) 115 116#define OPCODE_INVALID 0 117#define OPCODE_USER_VALID 1 118#define OPCODE_PRIV_VALID 2 119 120/* getcon/putcon - requires checking which control register is referenced. */ 121#define OPCODE_CTRL_REG 3 122 123/* Table of valid opcodes for SHmedia mode. 124 Form a 10-bit value by concatenating the major/minor opcodes i.e. 125 opcode[31:26,20:16]. The 6 MSBs of this value index into the following 126 array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to 127 LSBs==4'b0000 etc). */ 128static unsigned long shmedia_opcode_table[64] = { 129 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015, 130 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000, 131 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000, 132 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000, 133 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, 134 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, 135 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555, 136 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000 137}; 138 139void do_reserved_inst(unsigned long error_code, struct pt_regs *regs) 140{ 141 142 unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */ 143 unsigned long pc, aligned_pc; 144 int get_user_error; 145 int trapnr = 12; 146 int signr = SIGILL; 147 char *exception_name = "reserved_instruction"; 148 149 pc = regs->pc; 150 if ((pc & 3) == 1) { 151 /* SHmedia : check for defect. This requires executable vmas 152 to be readable too. */ 153 aligned_pc = pc & ~3; 154 if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) { 155 get_user_error = -EFAULT; 156 } else { 157 get_user_error = __get_user(opcode, (unsigned long *)aligned_pc); 158 } 159 if (get_user_error >= 0) { 160 unsigned long index, shift; 161 unsigned long major, minor, combined; 162 unsigned long reserved_field; 163 reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */ 164 major = (opcode >> 26) & 0x3f; 165 minor = (opcode >> 16) & 0xf; 166 combined = (major << 4) | minor; 167 index = major; 168 shift = minor << 1; 169 if (reserved_field == 0) { 170 int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3; 171 switch (opcode_state) { 172 case OPCODE_INVALID: 173 /* Trap. */ 174 break; 175 case OPCODE_USER_VALID: 176 /* Restart the instruction : the branch to the instruction will now be from an RTE 177 not from SHcompact so the silicon defect won't be triggered. */ 178 return; 179 case OPCODE_PRIV_VALID: 180 if (!user_mode(regs)) { 181 /* Should only ever get here if a module has 182 SHcompact code inside it. If so, the same fix up is needed. */ 183 return; /* same reason */ 184 } 185 /* Otherwise, user mode trying to execute a privileged instruction - 186 fall through to trap. */ 187 break; 188 case OPCODE_CTRL_REG: 189 /* If in privileged mode, return as above. */ 190 if (!user_mode(regs)) return; 191 /* In user mode ... */ 192 if (combined == 0x9f) { /* GETCON */ 193 unsigned long regno = (opcode >> 20) & 0x3f; 194 if (regno >= 62) { 195 return; 196 } 197 /* Otherwise, reserved or privileged control register, => trap */ 198 } else if (combined == 0x1bf) { /* PUTCON */ 199 unsigned long regno = (opcode >> 4) & 0x3f; 200 if (regno >= 62) { 201 return; 202 } 203 /* Otherwise, reserved or privileged control register, => trap */ 204 } else { 205 /* Trap */ 206 } 207 break; 208 default: 209 /* Fall through to trap. */ 210 break; 211 } 212 } 213 /* fall through to normal resinst processing */ 214 } else { 215 /* Error trying to read opcode. This typically means a 216 real fault, not a RESINST any more. So change the 217 codes. */ 218 trapnr = 87; 219 exception_name = "address error (exec)"; 220 signr = SIGSEGV; 221 } 222 } 223 224 do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current); 225} 226 227#else /* CONFIG_SH64_ID2815_WORKAROUND */ 228 229DO_ERROR(12, SIGILL, "reserved instruction", reserved_inst, current) 230 231#endif /* CONFIG_SH64_ID2815_WORKAROUND */ 232 233/* Called with interrupts disabled */ 234asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs) 235{ 236 show_excp_regs(__func__, -1, -1, regs); 237 die_if_kernel("exception", regs, ex); 238} 239 240int do_unknown_trapa(unsigned long scId, struct pt_regs *regs) 241{ 242 /* Syscall debug */ 243 printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId); 244 245 die_if_kernel("unknown trapa", regs, scId); 246 247 return -ENOSYS; 248} 249 250void show_stack(struct task_struct *tsk, unsigned long *sp) 251{ 252#ifdef CONFIG_KALLSYMS 253 extern void sh64_unwind(struct pt_regs *regs); 254 struct pt_regs *regs; 255 256 regs = tsk ? tsk->thread.kregs : NULL; 257 258 sh64_unwind(regs); 259#else 260 printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n"); 261#endif 262} 263 264void show_task(unsigned long *sp) 265{ 266 show_stack(NULL, sp); 267} 268 269void dump_stack(void) 270{ 271 show_task(NULL); 272} 273/* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */ 274EXPORT_SYMBOL(dump_stack); 275 276static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name, 277 unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk) 278{ 279 show_excp_regs(fn_name, trapnr, signr, regs); 280 tsk->thread.error_code = error_code; 281 tsk->thread.trap_no = trapnr; 282 283 if (user_mode(regs)) 284 force_sig(signr, tsk); 285 286 die_if_no_fixup(str, regs, error_code); 287} 288 289static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode) 290{ 291 int get_user_error; 292 unsigned long aligned_pc; 293 unsigned long opcode; 294 295 if ((pc & 3) == 1) { 296 /* SHmedia */ 297 aligned_pc = pc & ~3; 298 if (from_user_mode) { 299 if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) { 300 get_user_error = -EFAULT; 301 } else { 302 get_user_error = __get_user(opcode, (unsigned long *)aligned_pc); 303 *result_opcode = opcode; 304 } 305 return get_user_error; 306 } else { 307 /* If the fault was in the kernel, we can either read 308 * this directly, or if not, we fault. 309 */ 310 *result_opcode = *(unsigned long *) aligned_pc; 311 return 0; 312 } 313 } else if ((pc & 1) == 0) { 314 /* SHcompact */ 315 /* TODO : provide handling for this. We don't really support 316 user-mode SHcompact yet, and for a kernel fault, this would 317 have to come from a module built for SHcompact. */ 318 return -EFAULT; 319 } else { 320 /* misaligned */ 321 return -EFAULT; 322 } 323} 324 325static int address_is_sign_extended(__u64 a) 326{ 327 __u64 b; 328#if (NEFF == 32) 329 b = (__u64)(__s64)(__s32)(a & 0xffffffffUL); 330 return (b == a) ? 1 : 0; 331#else 332#error "Sign extend check only works for NEFF==32" 333#endif 334} 335 336static int generate_and_check_address(struct pt_regs *regs, 337 __u32 opcode, 338 int displacement_not_indexed, 339 int width_shift, 340 __u64 *address) 341{ 342 /* return -1 for fault, 0 for OK */ 343 344 __u64 base_address, addr; 345 int basereg; 346 347 basereg = (opcode >> 20) & 0x3f; 348 base_address = regs->regs[basereg]; 349 if (displacement_not_indexed) { 350 __s64 displacement; 351 displacement = (opcode >> 10) & 0x3ff; 352 displacement = ((displacement << 54) >> 54); /* sign extend */ 353 addr = (__u64)((__s64)base_address + (displacement << width_shift)); 354 } else { 355 __u64 offset; 356 int offsetreg; 357 offsetreg = (opcode >> 10) & 0x3f; 358 offset = regs->regs[offsetreg]; 359 addr = base_address + offset; 360 } 361 362 /* Check sign extended */ 363 if (!address_is_sign_extended(addr)) { 364 return -1; 365 } 366 367 /* Check accessible. For misaligned access in the kernel, assume the 368 address is always accessible (and if not, just fault when the 369 load/store gets done.) */ 370 if (user_mode(regs)) { 371 if (addr >= TASK_SIZE) { 372 return -1; 373 } 374 /* Do access_ok check later - it depends on whether it's a load or a store. */ 375 } 376 377 *address = addr; 378 return 0; 379} 380 381static int user_mode_unaligned_fixup_count = 10; 382static int user_mode_unaligned_fixup_enable = 1; 383static int kernel_mode_unaligned_fixup_count = 32; 384 385static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result) 386{ 387 unsigned short x; 388 unsigned char *p, *q; 389 p = (unsigned char *) (int) address; 390 q = (unsigned char *) &x; 391 q[0] = p[0]; 392 q[1] = p[1]; 393 394 if (do_sign_extend) { 395 *result = (__u64)(__s64) *(short *) &x; 396 } else { 397 *result = (__u64) x; 398 } 399} 400 401static void misaligned_kernel_word_store(__u64 address, __u64 value) 402{ 403 unsigned short x; 404 unsigned char *p, *q; 405 p = (unsigned char *) (int) address; 406 q = (unsigned char *) &x; 407 408 x = (__u16) value; 409 p[0] = q[0]; 410 p[1] = q[1]; 411} 412 413static int misaligned_load(struct pt_regs *regs, 414 __u32 opcode, 415 int displacement_not_indexed, 416 int width_shift, 417 int do_sign_extend) 418{ 419 /* Return -1 for a fault, 0 for OK */ 420 int error; 421 int destreg; 422 __u64 address; 423 424 error = generate_and_check_address(regs, opcode, 425 displacement_not_indexed, width_shift, &address); 426 if (error < 0) { 427 return error; 428 } 429 430 destreg = (opcode >> 4) & 0x3f; 431 if (user_mode(regs)) { 432 __u64 buffer; 433 434 if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) { 435 return -1; 436 } 437 438 if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) { 439 return -1; /* fault */ 440 } 441 switch (width_shift) { 442 case 1: 443 if (do_sign_extend) { 444 regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer; 445 } else { 446 regs->regs[destreg] = (__u64) *(__u16 *) &buffer; 447 } 448 break; 449 case 2: 450 regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer; 451 break; 452 case 3: 453 regs->regs[destreg] = buffer; 454 break; 455 default: 456 printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n", 457 width_shift, (unsigned long) regs->pc); 458 break; 459 } 460 } else { 461 /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */ 462 __u64 lo, hi; 463 464 switch (width_shift) { 465 case 1: 466 misaligned_kernel_word_load(address, do_sign_extend, ®s->regs[destreg]); 467 break; 468 case 2: 469 asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address)); 470 asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address)); 471 regs->regs[destreg] = lo | hi; 472 break; 473 case 3: 474 asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address)); 475 asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address)); 476 regs->regs[destreg] = lo | hi; 477 break; 478 479 default: 480 printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n", 481 width_shift, (unsigned long) regs->pc); 482 break; 483 } 484 } 485 486 return 0; 487 488} 489 490static int misaligned_store(struct pt_regs *regs, 491 __u32 opcode, 492 int displacement_not_indexed, 493 int width_shift) 494{ 495 /* Return -1 for a fault, 0 for OK */ 496 int error; 497 int srcreg; 498 __u64 address; 499 500 error = generate_and_check_address(regs, opcode, 501 displacement_not_indexed, width_shift, &address); 502 if (error < 0) { 503 return error; 504 } 505 506 srcreg = (opcode >> 4) & 0x3f; 507 if (user_mode(regs)) { 508 __u64 buffer; 509 510 if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) { 511 return -1; 512 } 513 514 switch (width_shift) { 515 case 1: 516 *(__u16 *) &buffer = (__u16) regs->regs[srcreg]; 517 break; 518 case 2: 519 *(__u32 *) &buffer = (__u32) regs->regs[srcreg]; 520 break; 521 case 3: 522 buffer = regs->regs[srcreg]; 523 break; 524 default: 525 printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n", 526 width_shift, (unsigned long) regs->pc); 527 break; 528 } 529 530 if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) { 531 return -1; /* fault */ 532 } 533 } else { 534 /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */ 535 __u64 val = regs->regs[srcreg]; 536 537 switch (width_shift) { 538 case 1: 539 misaligned_kernel_word_store(address, val); 540 break; 541 case 2: 542 asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address)); 543 asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address)); 544 break; 545 case 3: 546 asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address)); 547 asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address)); 548 break; 549 550 default: 551 printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n", 552 width_shift, (unsigned long) regs->pc); 553 break; 554 } 555 } 556 557 return 0; 558 559} 560 561/* Never need to fix up misaligned FPU accesses within the kernel since that's a real 562 error. */ 563static int misaligned_fpu_load(struct pt_regs *regs, 564 __u32 opcode, 565 int displacement_not_indexed, 566 int width_shift, 567 int do_paired_load) 568{ 569 /* Return -1 for a fault, 0 for OK */ 570 int error; 571 int destreg; 572 __u64 address; 573 574 error = generate_and_check_address(regs, opcode, 575 displacement_not_indexed, width_shift, &address); 576 if (error < 0) { 577 return error; 578 } 579 580 destreg = (opcode >> 4) & 0x3f; 581 if (user_mode(regs)) { 582 __u64 buffer; 583 __u32 buflo, bufhi; 584 585 if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) { 586 return -1; 587 } 588 589 if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) { 590 return -1; /* fault */ 591 } 592 /* 'current' may be the current owner of the FPU state, so 593 context switch the registers into memory so they can be 594 indexed by register number. */ 595 if (last_task_used_math == current) { 596 enable_fpu(); 597 save_fpu(current); 598 disable_fpu(); 599 last_task_used_math = NULL; 600 regs->sr |= SR_FD; 601 } 602 603 buflo = *(__u32*) &buffer; 604 bufhi = *(1 + (__u32*) &buffer); 605 606 switch (width_shift) { 607 case 2: 608 current->thread.xstate->hardfpu.fp_regs[destreg] = buflo; 609 break; 610 case 3: 611 if (do_paired_load) { 612 current->thread.xstate->hardfpu.fp_regs[destreg] = buflo; 613 current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi; 614 } else { 615#if defined(CONFIG_CPU_LITTLE_ENDIAN) 616 current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi; 617 current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo; 618#else 619 current->thread.xstate->hardfpu.fp_regs[destreg] = buflo; 620 current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi; 621#endif 622 } 623 break; 624 default: 625 printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n", 626 width_shift, (unsigned long) regs->pc); 627 break; 628 } 629 return 0; 630 } else { 631 die ("Misaligned FPU load inside kernel", regs, 0); 632 return -1; 633 } 634 635 636} 637 638static int misaligned_fpu_store(struct pt_regs *regs, 639 __u32 opcode, 640 int displacement_not_indexed, 641 int width_shift, 642 int do_paired_load) 643{ 644 /* Return -1 for a fault, 0 for OK */ 645 int error; 646 int srcreg; 647 __u64 address; 648 649 error = generate_and_check_address(regs, opcode, 650 displacement_not_indexed, width_shift, &address); 651 if (error < 0) { 652 return error; 653 } 654 655 srcreg = (opcode >> 4) & 0x3f; 656 if (user_mode(regs)) { 657 __u64 buffer; 658 /* Initialise these to NaNs. */ 659 __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL; 660 661 if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) { 662 return -1; 663 } 664 665 /* 'current' may be the current owner of the FPU state, so 666 context switch the registers into memory so they can be 667 indexed by register number. */ 668 if (last_task_used_math == current) { 669 enable_fpu(); 670 save_fpu(current); 671 disable_fpu(); 672 last_task_used_math = NULL; 673 regs->sr |= SR_FD; 674 } 675 676 switch (width_shift) { 677 case 2: 678 buflo = current->thread.xstate->hardfpu.fp_regs[srcreg]; 679 break; 680 case 3: 681 if (do_paired_load) { 682 buflo = current->thread.xstate->hardfpu.fp_regs[srcreg]; 683 bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1]; 684 } else { 685#if defined(CONFIG_CPU_LITTLE_ENDIAN) 686 bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg]; 687 buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1]; 688#else 689 buflo = current->thread.xstate->hardfpu.fp_regs[srcreg]; 690 bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1]; 691#endif 692 } 693 break; 694 default: 695 printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n", 696 width_shift, (unsigned long) regs->pc); 697 break; 698 } 699 700 *(__u32*) &buffer = buflo; 701 *(1 + (__u32*) &buffer) = bufhi; 702 if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) { 703 return -1; /* fault */ 704 } 705 return 0; 706 } else { 707 die ("Misaligned FPU load inside kernel", regs, 0); 708 return -1; 709 } 710} 711 712static int misaligned_fixup(struct pt_regs *regs) 713{ 714 unsigned long opcode; 715 int error; 716 int major, minor; 717 718 if (!user_mode_unaligned_fixup_enable) 719 return -1; 720 721 error = read_opcode(regs->pc, &opcode, user_mode(regs)); 722 if (error < 0) { 723 return error; 724 } 725 major = (opcode >> 26) & 0x3f; 726 minor = (opcode >> 16) & 0xf; 727 728 if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) { 729 --user_mode_unaligned_fixup_count; 730 /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */ 731 printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n", 732 current->comm, task_pid_nr(current), (__u32)regs->pc, opcode); 733 } else if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) { 734 --kernel_mode_unaligned_fixup_count; 735 if (in_interrupt()) { 736 printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n", 737 (__u32)regs->pc, opcode); 738 } else { 739 printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n", 740 current->comm, task_pid_nr(current), (__u32)regs->pc, opcode); 741 } 742 } 743 744 745 switch (major) { 746 case (0x84>>2): /* LD.W */ 747 error = misaligned_load(regs, opcode, 1, 1, 1); 748 break; 749 case (0xb0>>2): /* LD.UW */ 750 error = misaligned_load(regs, opcode, 1, 1, 0); 751 break; 752 case (0x88>>2): /* LD.L */ 753 error = misaligned_load(regs, opcode, 1, 2, 1); 754 break; 755 case (0x8c>>2): /* LD.Q */ 756 error = misaligned_load(regs, opcode, 1, 3, 0); 757 break; 758 759 case (0xa4>>2): /* ST.W */ 760 error = misaligned_store(regs, opcode, 1, 1); 761 break; 762 case (0xa8>>2): /* ST.L */ 763 error = misaligned_store(regs, opcode, 1, 2); 764 break; 765 case (0xac>>2): /* ST.Q */ 766 error = misaligned_store(regs, opcode, 1, 3); 767 break; 768 769 case (0x40>>2): /* indexed loads */ 770 switch (minor) { 771 case 0x1: /* LDX.W */ 772 error = misaligned_load(regs, opcode, 0, 1, 1); 773 break; 774 case 0x5: /* LDX.UW */ 775 error = misaligned_load(regs, opcode, 0, 1, 0); 776 break; 777 case 0x2: /* LDX.L */ 778 error = misaligned_load(regs, opcode, 0, 2, 1); 779 break; 780 case 0x3: /* LDX.Q */ 781 error = misaligned_load(regs, opcode, 0, 3, 0); 782 break; 783 default: 784 error = -1; 785 break; 786 } 787 break; 788 789 case (0x60>>2): /* indexed stores */ 790 switch (minor) { 791 case 0x1: /* STX.W */ 792 error = misaligned_store(regs, opcode, 0, 1); 793 break; 794 case 0x2: /* STX.L */ 795 error = misaligned_store(regs, opcode, 0, 2); 796 break; 797 case 0x3: /* STX.Q */ 798 error = misaligned_store(regs, opcode, 0, 3); 799 break; 800 default: 801 error = -1; 802 break; 803 } 804 break; 805 806 case (0x94>>2): /* FLD.S */ 807 error = misaligned_fpu_load(regs, opcode, 1, 2, 0); 808 break; 809 case (0x98>>2): /* FLD.P */ 810 error = misaligned_fpu_load(regs, opcode, 1, 3, 1); 811 break; 812 case (0x9c>>2): /* FLD.D */ 813 error = misaligned_fpu_load(regs, opcode, 1, 3, 0); 814 break; 815 case (0x1c>>2): /* floating indexed loads */ 816 switch (minor) { 817 case 0x8: /* FLDX.S */ 818 error = misaligned_fpu_load(regs, opcode, 0, 2, 0); 819 break; 820 case 0xd: /* FLDX.P */ 821 error = misaligned_fpu_load(regs, opcode, 0, 3, 1); 822 break; 823 case 0x9: /* FLDX.D */ 824 error = misaligned_fpu_load(regs, opcode, 0, 3, 0); 825 break; 826 default: 827 error = -1; 828 break; 829 } 830 break; 831 case (0xb4>>2): /* FLD.S */ 832 error = misaligned_fpu_store(regs, opcode, 1, 2, 0); 833 break; 834 case (0xb8>>2): /* FLD.P */ 835 error = misaligned_fpu_store(regs, opcode, 1, 3, 1); 836 break; 837 case (0xbc>>2): /* FLD.D */ 838 error = misaligned_fpu_store(regs, opcode, 1, 3, 0); 839 break; 840 case (0x3c>>2): /* floating indexed stores */ 841 switch (minor) { 842 case 0x8: /* FSTX.S */ 843 error = misaligned_fpu_store(regs, opcode, 0, 2, 0); 844 break; 845 case 0xd: /* FSTX.P */ 846 error = misaligned_fpu_store(regs, opcode, 0, 3, 1); 847 break; 848 case 0x9: /* FSTX.D */ 849 error = misaligned_fpu_store(regs, opcode, 0, 3, 0); 850 break; 851 default: 852 error = -1; 853 break; 854 } 855 break; 856 857 default: 858 /* Fault */ 859 error = -1; 860 break; 861 } 862 863 if (error < 0) { 864 return error; 865 } else { 866 regs->pc += 4; /* Skip the instruction that's just been emulated */ 867 return 0; 868 } 869 870} 871 872static ctl_table unaligned_table[] = { 873 { 874 .procname = "kernel_reports", 875 .data = &kernel_mode_unaligned_fixup_count, 876 .maxlen = sizeof(int), 877 .mode = 0644, 878 .proc_handler = proc_dointvec 879 }, 880 { 881 .procname = "user_reports", 882 .data = &user_mode_unaligned_fixup_count, 883 .maxlen = sizeof(int), 884 .mode = 0644, 885 .proc_handler = proc_dointvec 886 }, 887 { 888 .procname = "user_enable", 889 .data = &user_mode_unaligned_fixup_enable, 890 .maxlen = sizeof(int), 891 .mode = 0644, 892 .proc_handler = proc_dointvec}, 893 {} 894}; 895 896static ctl_table unaligned_root[] = { 897 { 898 .procname = "unaligned_fixup", 899 .mode = 0555, 900 .child = unaligned_table 901 }, 902 {} 903}; 904 905static ctl_table sh64_root[] = { 906 { 907 .procname = "sh64", 908 .mode = 0555, 909 .child = unaligned_root 910 }, 911 {} 912}; 913static struct ctl_table_header *sysctl_header; 914static int __init init_sysctl(void) 915{ 916 sysctl_header = register_sysctl_table(sh64_root); 917 return 0; 918} 919 920__initcall(init_sysctl); 921 922 923asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs) 924{ 925 u64 peek_real_address_q(u64 addr); 926 u64 poke_real_address_q(u64 addr, u64 val); 927 unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010; 928 unsigned long long exp_cause; 929 /* It's not worth ioremapping the debug module registers for the amount 930 of access we make to them - just go direct to their physical 931 addresses. */ 932 exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY); 933 if (exp_cause & ~4) { 934 printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n", 935 (unsigned long)(exp_cause & 0xffffffff)); 936 } 937 show_state(); 938 /* Clear all DEBUGINT causes */ 939 poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0); 940} 941 942void __cpuinit per_cpu_trap_init(void) 943{ 944 /* Nothing to do for now, VBR initialization later. */ 945} 946