1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle 7 * Copyright (C) 1999, 2000 Silicon Graphics, Inc. 8 * Copyright (C) 2001 MIPS Technologies, Inc. 9 * Copyright (C) 2002, 2007 Maciej W. Rozycki 10 */ 11#include <linux/init.h> 12 13#include <asm/asm.h> 14#include <asm/asmmacro.h> 15#include <asm/cacheops.h> 16#include <asm/irqflags.h> 17#include <asm/regdef.h> 18#include <asm/fpregdef.h> 19#include <asm/mipsregs.h> 20#include <asm/stackframe.h> 21#include <asm/war.h> 22#include <asm/page.h> 23#include <asm/thread_info.h> 24 25#ifdef CONFIG_MIPS_MT_SMTC 26#define PANIC_PIC(msg) \ 27 .set push; \ 28 .set nomicromips; \ 29 .set reorder; \ 30 PTR_LA a0,8f; \ 31 .set noat; \ 32 PTR_LA AT, panic; \ 33 jr AT; \ 349: b 9b; \ 35 .set pop; \ 36 TEXT(msg) 37#endif 38 39 __INIT 40 41/* 42 * General exception vector for all other CPUs. 43 * 44 * Be careful when changing this, it has to be at most 128 bytes 45 * to fit into space reserved for the exception handler. 46 */ 47NESTED(except_vec3_generic, 0, sp) 48 .set push 49 .set noat 50#if R5432_CP0_INTERRUPT_WAR 51 mfc0 k0, CP0_INDEX 52#endif 53 mfc0 k1, CP0_CAUSE 54 andi k1, k1, 0x7c 55#ifdef CONFIG_64BIT 56 dsll k1, k1, 1 57#endif 58 PTR_L k0, exception_handlers(k1) 59 jr k0 60 .set pop 61 END(except_vec3_generic) 62 63#if (cpu_has_vce != 0) 64/* 65 * General exception handler for CPUs with virtual coherency exception. 66 * 67 * Be careful when changing this, it has to be at most 256 (as a special 68 * exception) bytes to fit into space reserved for the exception handler. 69 */ 70NESTED(except_vec3_r4000, 0, sp) 71 .set push 72 .set mips3 73 .set noat 74 mfc0 k1, CP0_CAUSE 75 li k0, 31<<2 76 andi k1, k1, 0x7c 77 .set push 78 .set noreorder 79 .set nomacro 80 beq k1, k0, handle_vced 81 li k0, 14<<2 82 beq k1, k0, handle_vcei 83#ifdef CONFIG_64BIT 84 dsll k1, k1, 1 85#endif 86 .set pop 87 PTR_L k0, exception_handlers(k1) 88 jr k0 89 90 /* 91 * Big shit, we now may have two dirty primary cache lines for the same 92 * physical address. We can safely invalidate the line pointed to by 93 * c0_badvaddr because after return from this exception handler the 94 * load / store will be re-executed. 95 */ 96handle_vced: 97 MFC0 k0, CP0_BADVADDR 98 li k1, -4 # Is this ... 99 and k0, k1 # ... really needed? 100 mtc0 zero, CP0_TAGLO 101 cache Index_Store_Tag_D, (k0) 102 cache Hit_Writeback_Inv_SD, (k0) 103#ifdef CONFIG_PROC_FS 104 PTR_LA k0, vced_count 105 lw k1, (k0) 106 addiu k1, 1 107 sw k1, (k0) 108#endif 109 eret 110 111handle_vcei: 112 MFC0 k0, CP0_BADVADDR 113 cache Hit_Writeback_Inv_SD, (k0) # also cleans pi 114#ifdef CONFIG_PROC_FS 115 PTR_LA k0, vcei_count 116 lw k1, (k0) 117 addiu k1, 1 118 sw k1, (k0) 119#endif 120 eret 121 .set pop 122 END(except_vec3_r4000) 123#endif /* (cpu_has_vce == 0) */ 124 125 __FINIT 126 127 .align 5 /* 32 byte rollback region */ 128LEAF(r4k_wait) 129 .set push 130 .set noreorder 131 /* start of rollback region */ 132 LONG_L t0, TI_FLAGS($28) 133 nop 134 andi t0, _TIF_NEED_RESCHED 135 bnez t0, 1f 136 nop 137 nop 138 nop 139#ifdef CONFIG_CPU_MICROMIPS 140 nop 141 nop 142 nop 143 nop 144#endif 145 .set mips3 146 wait 147 /* end of rollback region (the region size must be power of two) */ 1481: 149 jr ra 150 nop 151 .set pop 152 END(r4k_wait) 153 154 .macro BUILD_ROLLBACK_PROLOGUE handler 155 FEXPORT(rollback_\handler) 156 .set push 157 .set noat 158 MFC0 k0, CP0_EPC 159 PTR_LA k1, r4k_wait 160 ori k0, 0x1f /* 32 byte rollback region */ 161 xori k0, 0x1f 162 bne k0, k1, 9f 163 MTC0 k0, CP0_EPC 1649: 165 .set pop 166 .endm 167 168 .align 5 169BUILD_ROLLBACK_PROLOGUE handle_int 170NESTED(handle_int, PT_SIZE, sp) 171#ifdef CONFIG_TRACE_IRQFLAGS 172 /* 173 * Check to see if the interrupted code has just disabled 174 * interrupts and ignore this interrupt for now if so. 175 * 176 * local_irq_disable() disables interrupts and then calls 177 * trace_hardirqs_off() to track the state. If an interrupt is taken 178 * after interrupts are disabled but before the state is updated 179 * it will appear to restore_all that it is incorrectly returning with 180 * interrupts disabled 181 */ 182 .set push 183 .set noat 184 mfc0 k0, CP0_STATUS 185#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 186 and k0, ST0_IEP 187 bnez k0, 1f 188 189 mfc0 k0, CP0_EPC 190 .set noreorder 191 j k0 192 rfe 193#else 194 and k0, ST0_IE 195 bnez k0, 1f 196 197 eret 198#endif 1991: 200 .set pop 201#endif 202 SAVE_ALL 203 CLI 204 TRACE_IRQS_OFF 205 206 LONG_L s0, TI_REGS($28) 207 LONG_S sp, TI_REGS($28) 208 PTR_LA ra, ret_from_irq 209 PTR_LA v0, plat_irq_dispatch 210 jr v0 211#ifdef CONFIG_CPU_MICROMIPS 212 nop 213#endif 214 END(handle_int) 215 216 __INIT 217 218/* 219 * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. 220 * This is a dedicated interrupt exception vector which reduces the 221 * interrupt processing overhead. The jump instruction will be replaced 222 * at the initialization time. 223 * 224 * Be careful when changing this, it has to be at most 128 bytes 225 * to fit into space reserved for the exception handler. 226 */ 227NESTED(except_vec4, 0, sp) 2281: j 1b /* Dummy, will be replaced */ 229 END(except_vec4) 230 231/* 232 * EJTAG debug exception handler. 233 * The EJTAG debug exception entry point is 0xbfc00480, which 234 * normally is in the boot PROM, so the boot PROM must do an 235 * unconditional jump to this vector. 236 */ 237NESTED(except_vec_ejtag_debug, 0, sp) 238 j ejtag_debug_handler 239#ifdef CONFIG_CPU_MICROMIPS 240 nop 241#endif 242 END(except_vec_ejtag_debug) 243 244 __FINIT 245 246/* 247 * Vectored interrupt handler. 248 * This prototype is copied to ebase + n*IntCtl.VS and patched 249 * to invoke the handler 250 */ 251BUILD_ROLLBACK_PROLOGUE except_vec_vi 252NESTED(except_vec_vi, 0, sp) 253 SAVE_SOME 254 SAVE_AT 255 .set push 256 .set noreorder 257#ifdef CONFIG_MIPS_MT_SMTC 258 /* 259 * To keep from blindly blocking *all* interrupts 260 * during service by SMTC kernel, we also want to 261 * pass the IM value to be cleared. 262 */ 263FEXPORT(except_vec_vi_mori) 264 ori a0, $0, 0 265#endif /* CONFIG_MIPS_MT_SMTC */ 266FEXPORT(except_vec_vi_lui) 267 lui v0, 0 /* Patched */ 268 j except_vec_vi_handler 269FEXPORT(except_vec_vi_ori) 270 ori v0, 0 /* Patched */ 271#ifdef CONFIG_CPU_MICROMIPS 272 nop 273#endif 274 .set pop 275 END(except_vec_vi) 276EXPORT(except_vec_vi_end) 277 278/* 279 * Common Vectored Interrupt code 280 * Complete the register saves and invoke the handler which is passed in $v0 281 */ 282NESTED(except_vec_vi_handler, 0, sp) 283 SAVE_TEMP 284 SAVE_STATIC 285#ifdef CONFIG_MIPS_MT_SMTC 286 /* 287 * SMTC has an interesting problem that interrupts are level-triggered, 288 * and the CLI macro will clear EXL, potentially causing a duplicate 289 * interrupt service invocation. So we need to clear the associated 290 * IM bit of Status prior to doing CLI, and restore it after the 291 * service routine has been invoked - we must assume that the 292 * service routine will have cleared the state, and any active 293 * level represents a new or otherwised unserviced event... 294 */ 295 mfc0 t1, CP0_STATUS 296 and t0, a0, t1 297#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 298 mfc0 t2, CP0_TCCONTEXT 299 or t2, t0, t2 300 mtc0 t2, CP0_TCCONTEXT 301#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ 302 xor t1, t1, t0 303 mtc0 t1, CP0_STATUS 304 _ehb 305#endif /* CONFIG_MIPS_MT_SMTC */ 306 CLI 307#ifdef CONFIG_TRACE_IRQFLAGS 308 move s0, v0 309#ifdef CONFIG_MIPS_MT_SMTC 310 move s1, a0 311#endif 312 TRACE_IRQS_OFF 313#ifdef CONFIG_MIPS_MT_SMTC 314 move a0, s1 315#endif 316 move v0, s0 317#endif 318 319 LONG_L s0, TI_REGS($28) 320 LONG_S sp, TI_REGS($28) 321 PTR_LA ra, ret_from_irq 322 jr v0 323 END(except_vec_vi_handler) 324 325/* 326 * EJTAG debug exception handler. 327 */ 328NESTED(ejtag_debug_handler, PT_SIZE, sp) 329 .set push 330 .set noat 331 MTC0 k0, CP0_DESAVE 332 mfc0 k0, CP0_DEBUG 333 334 sll k0, k0, 30 # Check for SDBBP. 335 bgez k0, ejtag_return 336 337 PTR_LA k0, ejtag_debug_buffer 338 LONG_S k1, 0(k0) 339 SAVE_ALL 340 move a0, sp 341 jal ejtag_exception_handler 342 RESTORE_ALL 343 PTR_LA k0, ejtag_debug_buffer 344 LONG_L k1, 0(k0) 345 346ejtag_return: 347 MFC0 k0, CP0_DESAVE 348 .set mips32 349 deret 350 .set pop 351 END(ejtag_debug_handler) 352 353/* 354 * This buffer is reserved for the use of the EJTAG debug 355 * handler. 356 */ 357 .data 358EXPORT(ejtag_debug_buffer) 359 .fill LONGSIZE 360 .previous 361 362 __INIT 363 364/* 365 * NMI debug exception handler for MIPS reference boards. 366 * The NMI debug exception entry point is 0xbfc00000, which 367 * normally is in the boot PROM, so the boot PROM must do a 368 * unconditional jump to this vector. 369 */ 370NESTED(except_vec_nmi, 0, sp) 371 j nmi_handler 372#ifdef CONFIG_CPU_MICROMIPS 373 nop 374#endif 375 END(except_vec_nmi) 376 377 __FINIT 378 379NESTED(nmi_handler, PT_SIZE, sp) 380 .set push 381 .set noat 382 SAVE_ALL 383 move a0, sp 384 jal nmi_exception_handler 385 RESTORE_ALL 386 .set mips3 387 eret 388 .set pop 389 END(nmi_handler) 390 391 .macro __build_clear_none 392 .endm 393 394 .macro __build_clear_sti 395 TRACE_IRQS_ON 396 STI 397 .endm 398 399 .macro __build_clear_cli 400 CLI 401 TRACE_IRQS_OFF 402 .endm 403 404 .macro __build_clear_fpe 405 .set push 406 /* gas fails to assemble cfc1 for some archs (octeon).*/ \ 407 .set mips1 408 cfc1 a1, fcr31 409 li a2, ~(0x3f << 12) 410 and a2, a1 411 ctc1 a2, fcr31 412 .set pop 413 TRACE_IRQS_ON 414 STI 415 .endm 416 417 .macro __build_clear_ade 418 MFC0 t0, CP0_BADVADDR 419 PTR_S t0, PT_BVADDR(sp) 420 KMODE 421 .endm 422 423 .macro __BUILD_silent exception 424 .endm 425 426 /* Gas tries to parse the PRINT argument as a string containing 427 string escapes and emits bogus warnings if it believes to 428 recognize an unknown escape code. So make the arguments 429 start with an n and gas will believe \n is ok ... */ 430 .macro __BUILD_verbose nexception 431 LONG_L a1, PT_EPC(sp) 432#ifdef CONFIG_32BIT 433 PRINT("Got \nexception at %08lx\012") 434#endif 435#ifdef CONFIG_64BIT 436 PRINT("Got \nexception at %016lx\012") 437#endif 438 .endm 439 440 .macro __BUILD_count exception 441 LONG_L t0,exception_count_\exception 442 LONG_ADDIU t0, 1 443 LONG_S t0,exception_count_\exception 444 .comm exception_count\exception, 8, 8 445 .endm 446 447 .macro __BUILD_HANDLER exception handler clear verbose ext 448 .align 5 449 NESTED(handle_\exception, PT_SIZE, sp) 450 .set noat 451 SAVE_ALL 452 FEXPORT(handle_\exception\ext) 453 __BUILD_clear_\clear 454 .set at 455 __BUILD_\verbose \exception 456 move a0, sp 457 PTR_LA ra, ret_from_exception 458 j do_\handler 459 END(handle_\exception) 460 .endm 461 462 .macro BUILD_HANDLER exception handler clear verbose 463 __BUILD_HANDLER \exception \handler \clear \verbose _int 464 .endm 465 466 BUILD_HANDLER adel ade ade silent /* #4 */ 467 BUILD_HANDLER ades ade ade silent /* #5 */ 468 BUILD_HANDLER ibe be cli silent /* #6 */ 469 BUILD_HANDLER dbe be cli silent /* #7 */ 470 BUILD_HANDLER bp bp sti silent /* #9 */ 471 BUILD_HANDLER ri ri sti silent /* #10 */ 472 BUILD_HANDLER cpu cpu sti silent /* #11 */ 473 BUILD_HANDLER ov ov sti silent /* #12 */ 474 BUILD_HANDLER tr tr sti silent /* #13 */ 475 BUILD_HANDLER fpe fpe fpe silent /* #15 */ 476 BUILD_HANDLER mdmx mdmx sti silent /* #22 */ 477#ifdef CONFIG_HARDWARE_WATCHPOINTS 478 /* 479 * For watch, interrupts will be enabled after the watch 480 * registers are read. 481 */ 482 BUILD_HANDLER watch watch cli silent /* #23 */ 483#else 484 BUILD_HANDLER watch watch sti verbose /* #23 */ 485#endif 486 BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ 487 BUILD_HANDLER mt mt sti silent /* #25 */ 488 BUILD_HANDLER dsp dsp sti silent /* #26 */ 489 BUILD_HANDLER reserved reserved sti verbose /* others */ 490 491 .align 5 492 LEAF(handle_ri_rdhwr_vivt) 493#ifdef CONFIG_MIPS_MT_SMTC 494 PANIC_PIC("handle_ri_rdhwr_vivt called") 495#else 496 .set push 497 .set noat 498 .set noreorder 499 /* check if TLB contains a entry for EPC */ 500 MFC0 k1, CP0_ENTRYHI 501 andi k1, 0xff /* ASID_MASK */ 502 MFC0 k0, CP0_EPC 503 PTR_SRL k0, PAGE_SHIFT + 1 504 PTR_SLL k0, PAGE_SHIFT + 1 505 or k1, k0 506 MTC0 k1, CP0_ENTRYHI 507 mtc0_tlbw_hazard 508 tlbp 509 tlb_probe_hazard 510 mfc0 k1, CP0_INDEX 511 .set pop 512 bltz k1, handle_ri /* slow path */ 513 /* fall thru */ 514#endif 515 END(handle_ri_rdhwr_vivt) 516 517 LEAF(handle_ri_rdhwr) 518 .set push 519 .set noat 520 .set noreorder 521 /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ 522 /* uMIPS: 0x007d6b3c: rdhwr v1,$29 -- in MIPS16e it is */ 523 /* ADDIUSP $16,0x7d; LI $3,0x3c and never RI. LY22 */ 524 MFC0 k1, CP0_EPC 525#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || \ 526 defined(CONFIG_CPU_MIPS64_R2) 527 and k0, k1, 1 528 beqz k0, 1f 529 xor k1, k0 530 lhu k0, (k1) 531 lhu k1, 2(k1) 532 ins k1, k0, 16, 16 533 lui k0, 0x007d 534 b docheck 535 ori k0, 0x6b3c 5361: 537 lui k0, 0x7c03 538 lw k1, (k1) 539 ori k0, 0xe83b 540#else 541 andi k0, k1, 1 542 bnez k0, handle_ri 543 lui k0, 0x7c03 544 lw k1, (k1) 545 ori k0, 0xe83b 546#endif 547 .set reorder 548docheck: 549 bne k0, k1, handle_ri /* if not ours */ 550 551isrdhwr: 552 /* The insn is rdhwr. No need to check CAUSE.BD here. */ 553 get_saved_sp /* k1 := current_thread_info */ 554 .set noreorder 555 MFC0 k0, CP0_EPC 556#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 557 ori k1, _THREAD_MASK 558 xori k1, _THREAD_MASK 559 LONG_L v1, TI_TP_VALUE(k1) 560 LONG_ADDIU k0, 4 561 jr k0 562 rfe 563#else 564#ifndef CONFIG_CPU_DADDI_WORKAROUNDS 565 LONG_ADDIU k0, 4 /* stall on $k0 */ 566#else 567 .set at=v1 568 LONG_ADDIU k0, 4 569 .set noat 570#endif 571 MTC0 k0, CP0_EPC 572 /* I hope three instructions between MTC0 and ERET are enough... */ 573 ori k1, _THREAD_MASK 574 xori k1, _THREAD_MASK 575 LONG_L v1, TI_TP_VALUE(k1) 576 .set mips3 577 eret 578 .set mips0 579#endif 580 .set pop 581 END(handle_ri_rdhwr) 582 583#ifdef CONFIG_64BIT 584/* A temporary overflow handler used by check_daddi(). */ 585 586 __INIT 587 588 BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */ 589#endif 590