1/* 2 * arch/xtensa/kernel/entry.S 3 * 4 * Low-level exception handling 5 * 6 * This file is subject to the terms and conditions of the GNU General Public 7 * License. See the file "COPYING" in the main directory of this archive 8 * for more details. 9 * 10 * Copyright (C) 2004-2005 by Tensilica Inc. 11 * 12 * Chris Zankel <chris@zankel.net> 13 * 14 */ 15 16#include <linux/linkage.h> 17#include <asm/asm-offsets.h> 18#include <asm/processor.h> 19#include <asm/thread_info.h> 20#include <asm/uaccess.h> 21#include <asm/unistd.h> 22#include <asm/ptrace.h> 23#include <asm/current.h> 24#include <asm/pgtable.h> 25#include <asm/page.h> 26#include <asm/signal.h> 27#include <asm/tlbflush.h> 28 29/* Unimplemented features. */ 30 31#undef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION 32#undef KERNEL_STACK_OVERFLOW_CHECK 33#undef PREEMPTIBLE_KERNEL 34#undef ALLOCA_EXCEPTION_IN_IRAM 35 36/* Not well tested. 37 * 38 * - fast_coprocessor 39 */ 40 41/* 42 * Macro to find first bit set in WINDOWBASE from the left + 1 43 * 44 * 100....0 -> 1 45 * 010....0 -> 2 46 * 000....1 -> WSBITS 47 */ 48 49 .macro ffs_ws bit mask 50 51#if XCHAL_HAVE_NSA 52 nsau \bit, \mask # 32-WSBITS ... 31 (32 iff 0) 53 addi \bit, \bit, WSBITS - 32 + 1 # uppest bit set -> return 1 54#else 55 movi \bit, WSBITS 56#if WSBITS > 16 57 _bltui \mask, 0x10000, 99f 58 addi \bit, \bit, -16 59 extui \mask, \mask, 16, 16 60#endif 61#if WSBITS > 8 6299: _bltui \mask, 0x100, 99f 63 addi \bit, \bit, -8 64 srli \mask, \mask, 8 65#endif 6699: _bltui \mask, 0x10, 99f 67 addi \bit, \bit, -4 68 srli \mask, \mask, 4 6999: _bltui \mask, 0x4, 99f 70 addi \bit, \bit, -2 71 srli \mask, \mask, 2 7299: _bltui \mask, 0x2, 99f 73 addi \bit, \bit, -1 7499: 75 76#endif 77 .endm 78 79/* ----------------- DEFAULT FIRST LEVEL EXCEPTION HANDLERS ----------------- */ 80 81/* 82 * First-level exception handler for user exceptions. 83 * Save some special registers, extra states and all registers in the AR 84 * register file that were in use in the user task, and jump to the common 85 * exception code. 86 * We save SAR (used to calculate WMASK), and WB and WS (we don't have to 87 * save them for kernel exceptions). 88 * 89 * Entry condition for user_exception: 90 * 91 * a0: trashed, original value saved on stack (PT_AREG0) 92 * a1: a1 93 * a2: new stack pointer, original value in depc 94 * a3: dispatch table 95 * depc: a2, original value saved on stack (PT_DEPC) 96 * excsave1: a3 97 * 98 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 99 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 100 * 101 * Entry condition for _user_exception: 102 * 103 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC 104 * excsave has been restored, and 105 * stack pointer (a1) has been set. 106 * 107 * Note: _user_exception might be at an odd adress. Don't use call0..call12 108 */ 109 110ENTRY(user_exception) 111 112 /* Save a2, a3, and depc, restore excsave_1 and set SP. */ 113 114 xsr a3, EXCSAVE_1 115 rsr a0, DEPC 116 s32i a1, a2, PT_AREG1 117 s32i a0, a2, PT_AREG2 118 s32i a3, a2, PT_AREG3 119 mov a1, a2 120 121 .globl _user_exception 122_user_exception: 123 124 /* Save SAR and turn off single stepping */ 125 126 movi a2, 0 127 rsr a3, SAR 128 xsr a2, ICOUNTLEVEL 129 s32i a3, a1, PT_SAR 130 s32i a2, a1, PT_ICOUNTLEVEL 131 132 /* Rotate ws so that the current windowbase is at bit0. */ 133 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ 134 135 rsr a2, WINDOWBASE 136 rsr a3, WINDOWSTART 137 ssr a2 138 s32i a2, a1, PT_WINDOWBASE 139 s32i a3, a1, PT_WINDOWSTART 140 slli a2, a3, 32-WSBITS 141 src a2, a3, a2 142 srli a2, a2, 32-WSBITS 143 s32i a2, a1, PT_WMASK # needed for restoring registers 144 145 /* Save only live registers. */ 146 147 _bbsi.l a2, 1, 1f 148 s32i a4, a1, PT_AREG4 149 s32i a5, a1, PT_AREG5 150 s32i a6, a1, PT_AREG6 151 s32i a7, a1, PT_AREG7 152 _bbsi.l a2, 2, 1f 153 s32i a8, a1, PT_AREG8 154 s32i a9, a1, PT_AREG9 155 s32i a10, a1, PT_AREG10 156 s32i a11, a1, PT_AREG11 157 _bbsi.l a2, 3, 1f 158 s32i a12, a1, PT_AREG12 159 s32i a13, a1, PT_AREG13 160 s32i a14, a1, PT_AREG14 161 s32i a15, a1, PT_AREG15 162 _bnei a2, 1, 1f # only one valid frame? 163 164 /* Only one valid frame, skip saving regs. */ 165 166 j 2f 167 168 /* Save the remaining registers. 169 * We have to save all registers up to the first '1' from 170 * the right, except the current frame (bit 0). 171 * Assume a2 is: 001001000110001 172 * All regiser frames starting from the top fiel to the marked '1' 173 * must be saved. 174 */ 175 1761: addi a3, a2, -1 # eliminate '1' in bit 0: yyyyxxww0 177 neg a3, a3 # yyyyxxww0 -> YYYYXXWW1+1 178 and a3, a3, a2 # max. only one bit is set 179 180 /* Find number of frames to save */ 181 182 ffs_ws a0, a3 # number of frames to the '1' from left 183 184 /* Store information into WMASK: 185 * bits 0..3: xxx1 masked lower 4 bits of the rotated windowstart, 186 * bits 4...: number of valid 4-register frames 187 */ 188 189 slli a3, a0, 4 # number of frames to save in bits 8..4 190 extui a2, a2, 0, 4 # mask for the first 16 registers 191 or a2, a3, a2 192 s32i a2, a1, PT_WMASK # needed when we restore the reg-file 193 194 /* Save 4 registers at a time */ 195 1961: rotw -1 197 s32i a0, a5, PT_AREG_END - 16 198 s32i a1, a5, PT_AREG_END - 12 199 s32i a2, a5, PT_AREG_END - 8 200 s32i a3, a5, PT_AREG_END - 4 201 addi a0, a4, -1 202 addi a1, a5, -16 203 _bnez a0, 1b 204 205 /* WINDOWBASE still in SAR! */ 206 207 rsr a2, SAR # original WINDOWBASE 208 movi a3, 1 209 ssl a2 210 sll a3, a3 211 wsr a3, WINDOWSTART # set corresponding WINDOWSTART bit 212 wsr a2, WINDOWBASE # and WINDOWSTART 213 rsync 214 215 /* We are back to the original stack pointer (a1) */ 216 2172: 218#if XCHAL_EXTRA_SA_SIZE 219 220 /* For user exceptions, save the extra state into the user's TCB. 221 * Note: We must assume that xchal_extra_store_funcbody destroys a2..a15 222 */ 223 224 GET_CURRENT(a2,a1) 225 addi a2, a2, THREAD_CP_SAVE 226 xchal_extra_store_funcbody 227#endif 228 229 /* Now, jump to the common exception handler. */ 230 231 j common_exception 232 233 234/* 235 * First-level exit handler for kernel exceptions 236 * Save special registers and the live window frame. 237 * Note: Even though we changes the stack pointer, we don't have to do a 238 * MOVSP here, as we do that when we return from the exception. 239 * (See comment in the kernel exception exit code) 240 * 241 * Entry condition for kernel_exception: 242 * 243 * a0: trashed, original value saved on stack (PT_AREG0) 244 * a1: a1 245 * a2: new stack pointer, original in DEPC 246 * a3: dispatch table 247 * depc: a2, original value saved on stack (PT_DEPC) 248 * excsave_1: a3 249 * 250 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 251 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 252 * 253 * Entry condition for _kernel_exception: 254 * 255 * a0-a3 and depc have been saved to PT_AREG0...PT_AREG3 and PT_DEPC 256 * excsave has been restored, and 257 * stack pointer (a1) has been set. 258 * 259 * Note: _kernel_exception might be at an odd adress. Don't use call0..call12 260 */ 261 262ENTRY(kernel_exception) 263 264 /* Save a0, a2, a3, DEPC and set SP. */ 265 266 xsr a3, EXCSAVE_1 # restore a3, excsave_1 267 rsr a0, DEPC # get a2 268 s32i a1, a2, PT_AREG1 269 s32i a0, a2, PT_AREG2 270 s32i a3, a2, PT_AREG3 271 mov a1, a2 272 273 .globl _kernel_exception 274_kernel_exception: 275 276 /* Save SAR and turn off single stepping */ 277 278 movi a2, 0 279 rsr a3, SAR 280 xsr a2, ICOUNTLEVEL 281 s32i a3, a1, PT_SAR 282 s32i a2, a1, PT_ICOUNTLEVEL 283 284 /* Rotate ws so that the current windowbase is at bit0. */ 285 /* Assume ws = xxwww1yyyy. Rotate ws right, so that a2 = yyyyxxwww1 */ 286 287 rsr a2, WINDOWBASE # don't need to save these, we only 288 rsr a3, WINDOWSTART # need shifted windowstart: windowmask 289 ssr a2 290 slli a2, a3, 32-WSBITS 291 src a2, a3, a2 292 srli a2, a2, 32-WSBITS 293 s32i a2, a1, PT_WMASK # needed for kernel_exception_exit 294 295 /* Save only the live window-frame */ 296 297 _bbsi.l a2, 1, 1f 298 s32i a4, a1, PT_AREG4 299 s32i a5, a1, PT_AREG5 300 s32i a6, a1, PT_AREG6 301 s32i a7, a1, PT_AREG7 302 _bbsi.l a2, 2, 1f 303 s32i a8, a1, PT_AREG8 304 s32i a9, a1, PT_AREG9 305 s32i a10, a1, PT_AREG10 306 s32i a11, a1, PT_AREG11 307 _bbsi.l a2, 3, 1f 308 s32i a12, a1, PT_AREG12 309 s32i a13, a1, PT_AREG13 310 s32i a14, a1, PT_AREG14 311 s32i a15, a1, PT_AREG15 312 3131: 314 315#ifdef KERNEL_STACK_OVERFLOW_CHECK 316 317 /* Stack overflow check, for debugging */ 318 extui a2, a1, TASK_SIZE_BITS,XX 319 movi a3, SIZE?? 320 _bge a2, a3, out_of_stack_panic 321 322#endif 323 324/* 325 * This is the common exception handler. 326 * We get here from the user exception handler or simply by falling through 327 * from the kernel exception handler. 328 * Save the remaining special registers, switch to kernel mode, and jump 329 * to the second-level exception handler. 330 * 331 */ 332 333common_exception: 334 335 /* Save some registers, disable loops and clear the syscall flag. */ 336 337 rsr a2, DEBUGCAUSE 338 rsr a3, EPC_1 339 s32i a2, a1, PT_DEBUGCAUSE 340 s32i a3, a1, PT_PC 341 342 movi a2, -1 343 rsr a3, EXCVADDR 344 s32i a2, a1, PT_SYSCALL 345 movi a2, 0 346 s32i a3, a1, PT_EXCVADDR 347 xsr a2, LCOUNT 348 s32i a2, a1, PT_LCOUNT 349 350 /* It is now save to restore the EXC_TABLE_FIXUP variable. */ 351 352 rsr a0, EXCCAUSE 353 movi a3, 0 354 rsr a2, EXCSAVE_1 355 s32i a0, a1, PT_EXCCAUSE 356 s32i a3, a2, EXC_TABLE_FIXUP 357 358 /* All unrecoverable states are saved on stack, now, and a1 is valid, 359 * so we can allow exceptions and interrupts (*) again. 360 * Set PS(EXCM = 0, UM = 0, RING = 0, OWB = 0, WOE = 1, INTLEVEL = X) 361 * 362 * (*) We only allow interrupts if PS.INTLEVEL was not set to 1 before 363 * (interrupts disabled) and if this exception is not an interrupt. 364 */ 365 366 rsr a3, PS 367 addi a0, a0, -4 368 movi a2, 1 369 extui a3, a3, 0, 1 # a3 = PS.INTLEVEL[0] 370 moveqz a3, a2, a0 # a3 = 1 iff interrupt exception 371 movi a2, 1 << PS_WOE_BIT 372 or a3, a3, a2 373 rsr a0, EXCCAUSE 374 xsr a3, PS 375 376 s32i a3, a1, PT_PS # save ps 377 378 /* Save LBEG, LEND */ 379 380 rsr a2, LBEG 381 rsr a3, LEND 382 s32i a2, a1, PT_LBEG 383 s32i a3, a1, PT_LEND 384 385 /* Go to second-level dispatcher. Set up parameters to pass to the 386 * exception handler and call the exception handler. 387 */ 388 389 movi a4, exc_table 390 mov a6, a1 # pass stack frame 391 mov a7, a0 # pass EXCCAUSE 392 addx4 a4, a0, a4 393 l32i a4, a4, EXC_TABLE_DEFAULT # load handler 394 395 /* Call the second-level handler */ 396 397 callx4 a4 398 399 /* Jump here for exception exit */ 400 401common_exception_return: 402 403 /* Jump if we are returning from kernel exceptions. */ 404 4051: l32i a3, a1, PT_PS 406 _bbsi.l a3, PS_UM_BIT, 2f 407 j kernel_exception_exit 408 409 /* Specific to a user exception exit: 410 * We need to check some flags for signal handling and rescheduling, 411 * and have to restore WB and WS, extra states, and all registers 412 * in the register file that were in use in the user task. 413 */ 414 4152: wsr a3, PS /* disable interrupts */ 416 417 /* Check for signals (keep interrupts disabled while we read TI_FLAGS) 418 * Note: PS.INTLEVEL = 0, PS.EXCM = 1 419 */ 420 421 GET_THREAD_INFO(a2,a1) 422 l32i a4, a2, TI_FLAGS 423 424 /* Enable interrupts again. 425 * Note: When we get here, we certainly have handled any interrupts. 426 * (Hint: There is only one user exception frame on stack) 427 */ 428 429 movi a3, 1 << PS_WOE_BIT 430 431 _bbsi.l a4, TIF_NEED_RESCHED, 3f 432 _bbci.l a4, TIF_SIGPENDING, 4f 433 434#ifndef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION 435 l32i a4, a1, PT_DEPC 436 bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 4f 437#endif 438 439 /* Reenable interrupts and call do_signal() */ 440 441 wsr a3, PS 442 movi a4, do_signal # int do_signal(struct pt_regs*, sigset_t*) 443 mov a6, a1 444 movi a7, 0 445 callx4 a4 446 j 1b 447 4483: /* Reenable interrupts and reschedule */ 449 450 wsr a3, PS 451 movi a4, schedule # void schedule (void) 452 callx4 a4 453 j 1b 454 455 /* Restore the state of the task and return from the exception. */ 456 4574: /* a2 holds GET_CURRENT(a2,a1) */ 458 459#if XCHAL_EXTRA_SA_SIZE 460 461 /* For user exceptions, restore the extra state from the user's TCB. */ 462 463 /* Note: a2 still contains GET_CURRENT(a2,a1) */ 464 addi a2, a2, THREAD_CP_SAVE 465 xchal_extra_load_funcbody 466 467 468#endif /* XCHAL_EXTRA_SA_SIZE */ 469 470 471 /* Switch to the user thread WINDOWBASE. Save SP temporarily in DEPC */ 472 473 l32i a2, a1, PT_WINDOWBASE 474 l32i a3, a1, PT_WINDOWSTART 475 wsr a1, DEPC # use DEPC as temp storage 476 wsr a3, WINDOWSTART # restore WINDOWSTART 477 ssr a2 # preserve user's WB in the SAR 478 wsr a2, WINDOWBASE # switch to user's saved WB 479 rsync 480 rsr a1, DEPC # restore stack pointer 481 l32i a2, a1, PT_WMASK # register frames saved (in bits 4...9) 482 rotw -1 # we restore a4..a7 483 _bltui a6, 16, 1f # only have to restore current window? 484 485 /* The working registers are a0 and a3. We are restoring to 486 * a4..a7. Be careful not to destroy what we have just restored. 487 * Note: wmask has the format YYYYM: 488 * Y: number of registers saved in groups of 4 489 * M: 4 bit mask of first 16 registers 490 */ 491 492 mov a2, a6 493 mov a3, a5 494 4952: rotw -1 # a0..a3 become a4..a7 496 addi a3, a7, -4*4 # next iteration 497 addi a2, a6, -16 # decrementing Y in WMASK 498 l32i a4, a3, PT_AREG_END + 0 499 l32i a5, a3, PT_AREG_END + 4 500 l32i a6, a3, PT_AREG_END + 8 501 l32i a7, a3, PT_AREG_END + 12 502 _bgeui a2, 16, 2b 503 504 /* Clear unrestored registers (don't leak anything to user-land */ 505 5061: rsr a0, WINDOWBASE 507 rsr a3, SAR 508 sub a3, a0, a3 509 beqz a3, 2f 510 extui a3, a3, 0, WBBITS 511 5121: rotw -1 513 addi a3, a7, -1 514 movi a4, 0 515 movi a5, 0 516 movi a6, 0 517 movi a7, 0 518 bgei a3, 1, 1b 519 520 /* We are back were we were when we started. 521 * Note: a2 still contains WMASK (if we've returned to the original 522 * frame where we had loaded a2), or at least the lower 4 bits 523 * (if we have restored WSBITS-1 frames). 524 */ 525 5262: j common_exception_exit 527 528 /* This is the kernel exception exit. 529 * We avoided to do a MOVSP when we entered the exception, but we 530 * have to do it here. 531 */ 532 533kernel_exception_exit: 534 535 /* Disable interrupts (a3 holds PT_PS) */ 536 537 wsr a3, PS 538 539#ifdef PREEMPTIBLE_KERNEL 540 541#ifdef CONFIG_PREEMPT 542 543 /* 544 * Note: We've just returned from a call4, so we have 545 * at least 4 addt'l regs. 546 */ 547 548 /* Check current_thread_info->preempt_count */ 549 550 GET_THREAD_INFO(a2) 551 l32i a3, a2, TI_PREEMPT 552 bnez a3, 1f 553 554 l32i a2, a2, TI_FLAGS 555 5561: 557 558#endif 559 560#endif 561 562 /* Check if we have to do a movsp. 563 * 564 * We only have to do a movsp if the previous window-frame has 565 * been spilled to the *temporary* exception stack instead of the 566 * task's stack. This is the case if the corresponding bit in 567 * WINDOWSTART for the previous window-frame was set before 568 * (not spilled) but is zero now (spilled). 569 * If this bit is zero, all other bits except the one for the 570 * current window frame are also zero. So, we can use a simple test: 571 * 'and' WINDOWSTART and WINDOWSTART-1: 572 * 573 * (XXXXXX1[0]* - 1) AND XXXXXX1[0]* = XXXXXX0[0]* 574 * 575 * The result is zero only if one bit was set. 576 * 577 * (Note: We might have gone through several task switches before 578 * we come back to the current task, so WINDOWBASE might be 579 * different from the time the exception occurred.) 580 */ 581 582 /* Test WINDOWSTART before and after the exception. 583 * We actually have WMASK, so we only have to test if it is 1 or not. 584 */ 585 586 l32i a2, a1, PT_WMASK 587 _beqi a2, 1, common_exception_exit # Spilled before exception,jump 588 589 /* Test WINDOWSTART now. If spilled, do the movsp */ 590 591 rsr a3, WINDOWSTART 592 addi a0, a3, -1 593 and a3, a3, a0 594 _bnez a3, common_exception_exit 595 596 /* Do a movsp (we returned from a call4, so we have at least a0..a7) */ 597 598 addi a0, a1, -16 599 l32i a3, a0, 0 600 l32i a4, a0, 4 601 s32i a3, a1, PT_SIZE+0 602 s32i a4, a1, PT_SIZE+4 603 l32i a3, a0, 8 604 l32i a4, a0, 12 605 s32i a3, a1, PT_SIZE+8 606 s32i a4, a1, PT_SIZE+12 607 608 /* Common exception exit. 609 * We restore the special register and the current window frame, and 610 * return from the exception. 611 * 612 * Note: We expect a2 to hold PT_WMASK 613 */ 614 615common_exception_exit: 616 617 _bbsi.l a2, 1, 1f 618 l32i a4, a1, PT_AREG4 619 l32i a5, a1, PT_AREG5 620 l32i a6, a1, PT_AREG6 621 l32i a7, a1, PT_AREG7 622 _bbsi.l a2, 2, 1f 623 l32i a8, a1, PT_AREG8 624 l32i a9, a1, PT_AREG9 625 l32i a10, a1, PT_AREG10 626 l32i a11, a1, PT_AREG11 627 _bbsi.l a2, 3, 1f 628 l32i a12, a1, PT_AREG12 629 l32i a13, a1, PT_AREG13 630 l32i a14, a1, PT_AREG14 631 l32i a15, a1, PT_AREG15 632 633 /* Restore PC, SAR */ 634 6351: l32i a2, a1, PT_PC 636 l32i a3, a1, PT_SAR 637 wsr a2, EPC_1 638 wsr a3, SAR 639 640 /* Restore LBEG, LEND, LCOUNT */ 641 642 l32i a2, a1, PT_LBEG 643 l32i a3, a1, PT_LEND 644 wsr a2, LBEG 645 l32i a2, a1, PT_LCOUNT 646 wsr a3, LEND 647 wsr a2, LCOUNT 648 649 /* We control single stepping through the ICOUNTLEVEL register. */ 650 651 l32i a2, a1, PT_ICOUNTLEVEL 652 movi a3, -2 653 wsr a2, ICOUNTLEVEL 654 wsr a3, ICOUNT 655 656 /* Check if it was double exception. */ 657 658 l32i a0, a1, PT_DEPC 659 l32i a3, a1, PT_AREG3 660 l32i a2, a1, PT_AREG2 661 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 662 663 /* Restore a0...a3 and return */ 664 665 l32i a0, a1, PT_AREG0 666 l32i a1, a1, PT_AREG1 667 rfe 668 6691: wsr a0, DEPC 670 l32i a0, a1, PT_AREG0 671 l32i a1, a1, PT_AREG1 672 rfde 673 674/* 675 * Debug exception handler. 676 * 677 * Currently, we don't support KGDB, so only user application can be debugged. 678 * 679 * When we get here, a0 is trashed and saved to excsave[debuglevel] 680 */ 681 682ENTRY(debug_exception) 683 684 rsr a0, EPS + XCHAL_DEBUGLEVEL 685 bbsi.l a0, PS_EXCM_BIT, 1f # exception mode 686 687 /* Set EPC_1 and EXCCAUSE */ 688 689 wsr a2, DEPC # save a2 temporarily 690 rsr a2, EPC + XCHAL_DEBUGLEVEL 691 wsr a2, EPC_1 692 693 movi a2, EXCCAUSE_MAPPED_DEBUG 694 wsr a2, EXCCAUSE 695 696 /* Restore PS to the value before the debug exc but with PS.EXCM set.*/ 697 698 movi a2, 1 << PS_EXCM_BIT 699 or a2, a0, a2 700 movi a0, debug_exception # restore a3, debug jump vector 701 wsr a2, PS 702 xsr a0, EXCSAVE + XCHAL_DEBUGLEVEL 703 704 /* Switch to kernel/user stack, restore jump vector, and save a0 */ 705 706 bbsi.l a2, PS_UM_BIT, 2f # jump if user mode 707 708 addi a2, a1, -16-PT_SIZE # assume kernel stack 709 s32i a0, a2, PT_AREG0 710 movi a0, 0 711 s32i a1, a2, PT_AREG1 712 s32i a0, a2, PT_DEPC # mark it as a regular exception 713 xsr a0, DEPC 714 s32i a3, a2, PT_AREG3 715 s32i a0, a2, PT_AREG2 716 mov a1, a2 717 j _kernel_exception 718 7192: rsr a2, EXCSAVE_1 720 l32i a2, a2, EXC_TABLE_KSTK # load kernel stack pointer 721 s32i a0, a2, PT_AREG0 722 movi a0, 0 723 s32i a1, a2, PT_AREG1 724 s32i a0, a2, PT_DEPC 725 xsr a0, DEPC 726 s32i a3, a2, PT_AREG3 727 s32i a0, a2, PT_AREG2 728 mov a1, a2 729 j _user_exception 730 731 /* Debug exception while in exception mode. */ 7321: j 1b 733 734 735/* 736 * We get here in case of an unrecoverable exception. 737 * The only thing we can do is to be nice and print a panic message. 738 * We only produce a single stack frame for panic, so ??? 739 * 740 * 741 * Entry conditions: 742 * 743 * - a0 contains the caller address; original value saved in excsave1. 744 * - the original a0 contains a valid return address (backtrace) or 0. 745 * - a2 contains a valid stackpointer 746 * 747 * Notes: 748 * 749 * - If the stack pointer could be invalid, the caller has to setup a 750 * dummy stack pointer (e.g. the stack of the init_task) 751 * 752 * - If the return address could be invalid, the caller has to set it 753 * to 0, so the backtrace would stop. 754 * 755 */ 756 .align 4 757unrecoverable_text: 758 .ascii "Unrecoverable error in exception handler\0" 759 760ENTRY(unrecoverable_exception) 761 762 movi a0, 1 763 movi a1, 0 764 765 wsr a0, WINDOWSTART 766 wsr a1, WINDOWBASE 767 rsync 768 769 movi a1, (1 << PS_WOE_BIT) | 1 770 wsr a1, PS 771 rsync 772 773 movi a1, init_task 774 movi a0, 0 775 addi a1, a1, PT_REGS_OFFSET 776 777 movi a4, panic 778 movi a6, unrecoverable_text 779 780 callx4 a4 781 7821: j 1b 783 784 785/* -------------------------- FAST EXCEPTION HANDLERS ----------------------- */ 786 787/* 788 * Fast-handler for alloca exceptions 789 * 790 * The ALLOCA handler is entered when user code executes the MOVSP 791 * instruction and the caller's frame is not in the register file. 792 * In this case, the caller frame's a0..a3 are on the stack just 793 * below sp (a1), and this handler moves them. 794 * 795 * For "MOVSP <ar>,<as>" without destination register a1, this routine 796 * simply moves the value from <as> to <ar> without moving the save area. 797 * 798 * Entry condition: 799 * 800 * a0: trashed, original value saved on stack (PT_AREG0) 801 * a1: a1 802 * a2: new stack pointer, original in DEPC 803 * a3: dispatch table 804 * depc: a2, original value saved on stack (PT_DEPC) 805 * excsave_1: a3 806 * 807 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 808 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 809 */ 810 811#if XCHAL_HAVE_BE 812#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 4, 4 813#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 0, 4 814#else 815#define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 0, 4 816#define _EXTUI_MOVSP_DST(ar) extui ar, ar, 4, 4 817#endif 818 819ENTRY(fast_alloca) 820 821 /* We shouldn't be in a double exception. */ 822 823 l32i a0, a2, PT_DEPC 824 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double 825 826 rsr a0, DEPC # get a2 827 s32i a4, a2, PT_AREG4 # save a4 and 828 s32i a0, a2, PT_AREG2 # a2 to stack 829 830 /* Exit critical section. */ 831 832 movi a0, 0 833 s32i a0, a3, EXC_TABLE_FIXUP 834 835 /* Restore a3, excsave_1 */ 836 837 xsr a3, EXCSAVE_1 # make sure excsave_1 is valid for dbl. 838 rsr a4, EPC_1 # get exception address 839 s32i a3, a2, PT_AREG3 # save a3 to stack 840 841#ifdef ALLOCA_EXCEPTION_IN_IRAM 842#error iram not supported 843#else 844 /* Note: l8ui not allowed in IRAM/IROM!! */ 845 l8ui a0, a4, 1 # read as(src) from MOVSP instruction 846#endif 847 movi a3, .Lmovsp_src 848 _EXTUI_MOVSP_SRC(a0) # extract source register number 849 addx8 a3, a0, a3 850 jx a3 851 852.Lunhandled_double: 853 wsr a0, EXCSAVE_1 854 movi a0, unrecoverable_exception 855 callx0 a0 856 857 .align 8 858.Lmovsp_src: 859 l32i a3, a2, PT_AREG0; _j 1f; .align 8 860 mov a3, a1; _j 1f; .align 8 861 l32i a3, a2, PT_AREG2; _j 1f; .align 8 862 l32i a3, a2, PT_AREG3; _j 1f; .align 8 863 l32i a3, a2, PT_AREG4; _j 1f; .align 8 864 mov a3, a5; _j 1f; .align 8 865 mov a3, a6; _j 1f; .align 8 866 mov a3, a7; _j 1f; .align 8 867 mov a3, a8; _j 1f; .align 8 868 mov a3, a9; _j 1f; .align 8 869 mov a3, a10; _j 1f; .align 8 870 mov a3, a11; _j 1f; .align 8 871 mov a3, a12; _j 1f; .align 8 872 mov a3, a13; _j 1f; .align 8 873 mov a3, a14; _j 1f; .align 8 874 mov a3, a15; _j 1f; .align 8 875 8761: 877 878#ifdef ALLOCA_EXCEPTION_IN_IRAM 879#error iram not supported 880#else 881 l8ui a0, a4, 0 # read ar(dst) from MOVSP instruction 882#endif 883 addi a4, a4, 3 # step over movsp 884 _EXTUI_MOVSP_DST(a0) # extract destination register 885 wsr a4, EPC_1 # save new epc_1 886 887 _bnei a0, 1, 1f # no 'movsp a1, ax': jump 888 889 /* Move the save area. This implies the use of the L32E 890 * and S32E instructions, because this move must be done with 891 * the user's PS.RING privilege levels, not with ring 0 892 * (kernel's) privileges currently active with PS.EXCM 893 * set. Note that we have stil registered a fixup routine with the 894 * double exception vector in case a double exception occurs. 895 */ 896 897 /* a0,a4:avail a1:old user stack a2:exc. stack a3:new user stack. */ 898 899 l32e a0, a1, -16 900 l32e a4, a1, -12 901 s32e a0, a3, -16 902 s32e a4, a3, -12 903 l32e a0, a1, -8 904 l32e a4, a1, -4 905 s32e a0, a3, -8 906 s32e a4, a3, -4 907 908 /* Restore stack-pointer and all the other saved registers. */ 909 910 mov a1, a3 911 912 l32i a4, a2, PT_AREG4 913 l32i a3, a2, PT_AREG3 914 l32i a0, a2, PT_AREG0 915 l32i a2, a2, PT_AREG2 916 rfe 917 918 /* MOVSP <at>,<as> was invoked with <at> != a1. 919 * Because the stack pointer is not being modified, 920 * we should be able to just modify the pointer 921 * without moving any save area. 922 * The processor only traps these occurrences if the 923 * caller window isn't live, so unfortunately we can't 924 * use this as an alternate trap mechanism. 925 * So we just do the move. This requires that we 926 * resolve the destination register, not just the source, 927 * so there's some extra work. 928 * (PERHAPS NOT REALLY NEEDED, BUT CLEANER...) 929 */ 930 931 /* a0 dst-reg, a1 user-stack, a2 stack, a3 value of src reg. */ 932 9331: movi a4, .Lmovsp_dst 934 addx8 a4, a0, a4 935 jx a4 936 937 .align 8 938.Lmovsp_dst: 939 s32i a3, a2, PT_AREG0; _j 1f; .align 8 940 mov a1, a3; _j 1f; .align 8 941 s32i a3, a2, PT_AREG2; _j 1f; .align 8 942 s32i a3, a2, PT_AREG3; _j 1f; .align 8 943 s32i a3, a2, PT_AREG4; _j 1f; .align 8 944 mov a5, a3; _j 1f; .align 8 945 mov a6, a3; _j 1f; .align 8 946 mov a7, a3; _j 1f; .align 8 947 mov a8, a3; _j 1f; .align 8 948 mov a9, a3; _j 1f; .align 8 949 mov a10, a3; _j 1f; .align 8 950 mov a11, a3; _j 1f; .align 8 951 mov a12, a3; _j 1f; .align 8 952 mov a13, a3; _j 1f; .align 8 953 mov a14, a3; _j 1f; .align 8 954 mov a15, a3; _j 1f; .align 8 955 9561: l32i a4, a2, PT_AREG4 957 l32i a3, a2, PT_AREG3 958 l32i a0, a2, PT_AREG0 959 l32i a2, a2, PT_AREG2 960 rfe 961 962 963/* 964 * fast system calls. 965 * 966 * WARNING: The kernel doesn't save the entire user context before 967 * handling a fast system call. These functions are small and short, 968 * usually offering some functionality not available to user tasks. 969 * 970 * BE CAREFUL TO PRESERVE THE USER'S CONTEXT. 971 * 972 * Entry condition: 973 * 974 * a0: trashed, original value saved on stack (PT_AREG0) 975 * a1: a1 976 * a2: new stack pointer, original in DEPC 977 * a3: dispatch table 978 * depc: a2, original value saved on stack (PT_DEPC) 979 * excsave_1: a3 980 */ 981 982ENTRY(fast_syscall_kernel) 983 984 /* Skip syscall. */ 985 986 rsr a0, EPC_1 987 addi a0, a0, 3 988 wsr a0, EPC_1 989 990 l32i a0, a2, PT_DEPC 991 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable 992 993 rsr a0, DEPC # get syscall-nr 994 _beqz a0, fast_syscall_spill_registers 995 _beqi a0, __NR_xtensa, fast_syscall_xtensa 996 997 j kernel_exception 998 999ENTRY(fast_syscall_user) 1000 1001 /* Skip syscall. */ 1002 1003 rsr a0, EPC_1 1004 addi a0, a0, 3 1005 wsr a0, EPC_1 1006 1007 l32i a0, a2, PT_DEPC 1008 bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_syscall_unrecoverable 1009 1010 rsr a0, DEPC # get syscall-nr 1011 _beqz a0, fast_syscall_spill_registers 1012 _beqi a0, __NR_xtensa, fast_syscall_xtensa 1013 1014 j user_exception 1015 1016ENTRY(fast_syscall_unrecoverable) 1017 1018 /* Restore all states. */ 1019 1020 l32i a0, a2, PT_AREG0 # restore a0 1021 xsr a2, DEPC # restore a2, depc 1022 rsr a3, EXCSAVE_1 1023 1024 wsr a0, EXCSAVE_1 1025 movi a0, unrecoverable_exception 1026 callx0 a0 1027 1028 1029 1030/* 1031 * sysxtensa syscall handler 1032 * 1033 * int sysxtensa (SYS_XTENSA_ATOMIC_SET, ptr, val, unused); 1034 * int sysxtensa (SYS_XTENSA_ATOMIC_ADD, ptr, val, unused); 1035 * int sysxtensa (SYS_XTENSA_ATOMIC_EXG_ADD, ptr, val, unused); 1036 * int sysxtensa (SYS_XTENSA_ATOMIC_CMP_SWP, ptr, oldval, newval); 1037 * a2 a6 a3 a4 a5 1038 * 1039 * Entry condition: 1040 * 1041 * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0) 1042 * a1: a1 1043 * a2: new stack pointer, original in a0 and DEPC 1044 * a3: dispatch table, original in excsave_1 1045 * a4..a15: unchanged 1046 * depc: a2, original value saved on stack (PT_DEPC) 1047 * excsave_1: a3 1048 * 1049 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1050 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1051 * 1052 * Note: we don't have to save a2; a2 holds the return value 1053 * 1054 * We use the two macros TRY and CATCH: 1055 * 1056 * TRY adds an entry to the __ex_table fixup table for the immediately 1057 * following instruction. 1058 * 1059 * CATCH catches any exception that occurred at one of the preceeding TRY 1060 * statements and continues from there 1061 * 1062 * Usage TRY l32i a0, a1, 0 1063 * <other code> 1064 * done: rfe 1065 * CATCH <set return code> 1066 * j done 1067 */ 1068 1069#define TRY \ 1070 .section __ex_table, "a"; \ 1071 .word 66f, 67f; \ 1072 .text; \ 107366: 1074 1075#define CATCH \ 107667: 1077 1078ENTRY(fast_syscall_xtensa) 1079 1080 xsr a3, EXCSAVE_1 # restore a3, excsave1 1081 1082 s32i a7, a2, PT_AREG7 # we need an additional register 1083 movi a7, 4 # sizeof(unsigned int) 1084 access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp 1085 1086 addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1 1087 _bgeui a6, SYS_XTENSA_COUNT - 1, .Lill 1088 _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp 1089 1090 /* Fall through for ATOMIC_CMP_SWP. */ 1091 1092.Lswp: /* Atomic compare and swap */ 1093 1094TRY l32i a0, a3, 0 # read old value 1095 bne a0, a4, 1f # same as old value? jump 1096TRY s32i a5, a3, 0 # different, modify value 1097 l32i a7, a2, PT_AREG7 # restore a7 1098 l32i a0, a2, PT_AREG0 # restore a0 1099 movi a2, 1 # and return 1 1100 addi a6, a6, 1 # restore a6 (really necessary?) 1101 rfe 1102 11031: l32i a7, a2, PT_AREG7 # restore a7 1104 l32i a0, a2, PT_AREG0 # restore a0 1105 movi a2, 0 # return 0 (note that we cannot set 1106 addi a6, a6, 1 # restore a6 (really necessary?) 1107 rfe 1108 1109.Lnswp: /* Atomic set, add, and exg_add. */ 1110 1111TRY l32i a7, a3, 0 # orig 1112 add a0, a4, a7 # + arg 1113 moveqz a0, a4, a6 # set 1114TRY s32i a0, a3, 0 # write new value 1115 1116 mov a0, a2 1117 mov a2, a7 1118 l32i a7, a0, PT_AREG7 # restore a7 1119 l32i a0, a0, PT_AREG0 # restore a0 1120 addi a6, a6, 1 # restore a6 (really necessary?) 1121 rfe 1122 1123CATCH 1124.Leac: l32i a7, a2, PT_AREG7 # restore a7 1125 l32i a0, a2, PT_AREG0 # restore a0 1126 movi a2, -EFAULT 1127 rfe 1128 1129.Lill: l32i a7, a2, PT_AREG0 # restore a7 1130 l32i a0, a2, PT_AREG0 # restore a0 1131 movi a2, -EINVAL 1132 rfe 1133 1134 1135 1136 1137/* fast_syscall_spill_registers. 1138 * 1139 * Entry condition: 1140 * 1141 * a0: trashed, original value saved on stack (PT_AREG0) 1142 * a1: a1 1143 * a2: new stack pointer, original in DEPC 1144 * a3: dispatch table 1145 * depc: a2, original value saved on stack (PT_DEPC) 1146 * excsave_1: a3 1147 * 1148 * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. 1149 * Note: We don't need to save a2 in depc (return value) 1150 */ 1151 1152ENTRY(fast_syscall_spill_registers) 1153 1154 /* Register a FIXUP handler (pass current wb as a parameter) */ 1155 1156 movi a0, fast_syscall_spill_registers_fixup 1157 s32i a0, a3, EXC_TABLE_FIXUP 1158 rsr a0, WINDOWBASE 1159 s32i a0, a3, EXC_TABLE_PARAM 1160 1161 /* Save a3 and SAR on stack. */ 1162 1163 rsr a0, SAR 1164 xsr a3, EXCSAVE_1 # restore a3 and excsave_1 1165 s32i a0, a2, PT_AREG4 # store SAR to PT_AREG4 1166 s32i a3, a2, PT_AREG3 1167 1168 /* The spill routine might clobber a7, a11, and a15. */ 1169 1170 s32i a7, a2, PT_AREG5 1171 s32i a11, a2, PT_AREG6 1172 s32i a15, a2, PT_AREG7 1173 1174 call0 _spill_registers # destroys a3, DEPC, and SAR 1175 1176 /* Advance PC, restore registers and SAR, and return from exception. */ 1177 1178 l32i a3, a2, PT_AREG4 1179 l32i a0, a2, PT_AREG0 1180 wsr a3, SAR 1181 l32i a3, a2, PT_AREG3 1182 1183 /* Restore clobbered registers. */ 1184 1185 l32i a7, a2, PT_AREG5 1186 l32i a11, a2, PT_AREG6 1187 l32i a15, a2, PT_AREG7 1188 1189 movi a2, 0 1190 rfe 1191 1192/* Fixup handler. 1193 * 1194 * We get here if the spill routine causes an exception, e.g. tlb miss. 1195 * We basically restore WINDOWBASE and WINDOWSTART to the condition when 1196 * we entered the spill routine and jump to the user exception handler. 1197 * 1198 * a0: value of depc, original value in depc 1199 * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE 1200 * a3: exctable, original value in excsave1 1201 */ 1202 1203fast_syscall_spill_registers_fixup: 1204 1205 rsr a2, WINDOWBASE # get current windowbase (a2 is saved) 1206 xsr a0, DEPC # restore depc and a0 1207 ssl a2 # set shift (32 - WB) 1208 1209 /* We need to make sure the current registers (a0-a3) are preserved. 1210 * To do this, we simply set the bit for the current window frame 1211 * in WS, so that the exception handlers save them to the task stack. 1212 */ 1213 1214 rsr a3, EXCSAVE_1 # get spill-mask 1215 slli a2, a3, 1 # shift left by one 1216 1217 slli a3, a2, 32-WSBITS 1218 src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy...... 1219 wsr a2, WINDOWSTART # set corrected windowstart 1220 1221 movi a3, exc_table 1222 l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2 1223 l32i a3, a3, EXC_TABLE_PARAM # original WB (in user task) 1224 1225 /* Return to the original (user task) WINDOWBASE. 1226 * We leave the following frame behind: 1227 * a0, a1, a2 same 1228 * a3: trashed (saved in excsave_1) 1229 * depc: depc (we have to return to that address) 1230 * excsave_1: a3 1231 */ 1232 1233 wsr a3, WINDOWBASE 1234 rsync 1235 1236 /* We are now in the original frame when we entered _spill_registers: 1237 * a0: return address 1238 * a1: used, stack pointer 1239 * a2: kernel stack pointer 1240 * a3: available, saved in EXCSAVE_1 1241 * depc: exception address 1242 * excsave: a3 1243 * Note: This frame might be the same as above. 1244 */ 1245 1246#ifdef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION 1247 /* Restore registers we precautiously saved. 1248 * We have the value of the 'right' a3 1249 */ 1250 1251 l32i a7, a2, PT_AREG5 1252 l32i a11, a2, PT_AREG6 1253 l32i a15, a2, PT_AREG7 1254#endif 1255 1256 /* Setup stack pointer. */ 1257 1258 addi a2, a2, -PT_USER_SIZE 1259 s32i a0, a2, PT_AREG0 1260 1261 /* Make sure we return to this fixup handler. */ 1262 1263 movi a3, fast_syscall_spill_registers_fixup_return 1264 s32i a3, a2, PT_DEPC # setup depc 1265 1266 /* Jump to the exception handler. */ 1267 1268 movi a3, exc_table 1269 rsr a0, EXCCAUSE 1270 addx4 a0, a0, a3 # find entry in table 1271 l32i a0, a0, EXC_TABLE_FAST_USER # load handler 1272 jx a0 1273 1274fast_syscall_spill_registers_fixup_return: 1275 1276 /* When we return here, all registers have been restored (a2: DEPC) */ 1277 1278 wsr a2, DEPC # exception address 1279 1280 /* Restore fixup handler. */ 1281 1282 xsr a3, EXCSAVE_1 1283 movi a2, fast_syscall_spill_registers_fixup 1284 s32i a2, a3, EXC_TABLE_FIXUP 1285 rsr a2, WINDOWBASE 1286 s32i a2, a3, EXC_TABLE_PARAM 1287 l32i a2, a3, EXC_TABLE_KSTK 1288 1289#ifdef SIGNAL_HANDLING_IN_DOUBLE_EXCEPTION 1290 /* Save registers again that might be clobbered. */ 1291 1292 s32i a7, a2, PT_AREG5 1293 s32i a11, a2, PT_AREG6 1294 s32i a15, a2, PT_AREG7 1295#endif 1296 1297 /* Load WB at the time the exception occurred. */ 1298 1299 rsr a3, SAR # WB is still in SAR 1300 neg a3, a3 1301 wsr a3, WINDOWBASE 1302 rsync 1303 1304 /* Restore a3 and return. */ 1305 1306 movi a3, exc_table 1307 xsr a3, EXCSAVE_1 1308 1309 rfde 1310 1311 1312/* 1313 * spill all registers. 1314 * 1315 * This is not a real function. The following conditions must be met: 1316 * 1317 * - must be called with call0. 1318 * - uses DEPC, a3 and SAR. 1319 * - the last 'valid' register of each frame are clobbered. 1320 * - the caller must have registered a fixup handler 1321 * (or be inside a critical section) 1322 * - PS_EXCM must be set (PS_WOE cleared?) 1323 */ 1324 1325ENTRY(_spill_registers) 1326 1327 /* 1328 * Rotate ws so that the current windowbase is at bit 0. 1329 * Assume ws = xxxwww1yy (www1 current window frame). 1330 * Rotate ws right so that a2 = yyxxxwww1. 1331 */ 1332 1333 wsr a2, DEPC # preserve a2 1334 rsr a2, WINDOWBASE 1335 rsr a3, WINDOWSTART 1336 ssr a2 # holds WB 1337 slli a2, a3, WSBITS 1338 or a3, a3, a2 # a2 = xxxwww1yyxxxwww1yy 1339 srl a3, a3 1340 1341 /* We are done if there are no more than the current register frame. */ 1342 1343 extui a3, a3, 1, WSBITS-2 # a3 = 0yyxxxwww 1344 movi a2, (1 << (WSBITS-1)) 1345 _beqz a3, .Lnospill # only one active frame? jump 1346 1347 /* We want 1 at the top, so that we return to the current windowbase */ 1348 1349 or a3, a3, a2 # 1yyxxxwww 1350 1351 /* Skip empty frames - get 'oldest' WINDOWSTART-bit. */ 1352 1353 wsr a3, WINDOWSTART # save shifted windowstart 1354 neg a2, a3 1355 and a3, a2, a3 # first bit set from right: 000010000 1356 1357 ffs_ws a2, a3 # a2: shifts to skip empty frames 1358 movi a3, WSBITS 1359 sub a2, a3, a2 # WSBITS-a2:number of 0-bits from right 1360 ssr a2 # save in SAR for later. 1361 1362 rsr a3, WINDOWBASE 1363 add a3, a3, a2 1364 rsr a2, DEPC # restore a2 1365 wsr a3, WINDOWBASE 1366 rsync 1367 1368 rsr a3, WINDOWSTART 1369 srl a3, a3 # shift windowstart 1370 1371 /* WB is now just one frame below the oldest frame in the register 1372 window. WS is shifted so the oldest frame is in bit 0, thus, WB 1373 and WS differ by one 4-register frame. */ 1374 1375 /* Save frames. Depending what call was used (call4, call8, call12), 1376 * we have to save 4,8. or 12 registers. 1377 */ 1378 1379 _bbsi.l a3, 1, .Lc4 1380 _bbsi.l a3, 2, .Lc8 1381 1382 /* Special case: we have a call12-frame starting at a4. */ 1383 1384 _bbci.l a3, 3, .Lc12 # bit 3 shouldn't be zero! (Jump to Lc12 first) 1385 1386 s32e a4, a1, -16 # a1 is valid with an empty spill area 1387 l32e a4, a5, -12 1388 s32e a8, a4, -48 1389 mov a8, a4 1390 l32e a4, a1, -16 1391 j .Lc12c 1392 1393.Lloop: _bbsi.l a3, 1, .Lc4 1394 _bbci.l a3, 2, .Lc12 1395 1396.Lc8: s32e a4, a13, -16 1397 l32e a4, a5, -12 1398 s32e a8, a4, -32 1399 s32e a5, a13, -12 1400 s32e a6, a13, -8 1401 s32e a7, a13, -4 1402 s32e a9, a4, -28 1403 s32e a10, a4, -24 1404 s32e a11, a4, -20 1405 1406 srli a11, a3, 2 # shift windowbase by 2 1407 rotw 2 1408 _bnei a3, 1, .Lloop 1409 1410.Lexit: /* Done. Do the final rotation, set WS, and return. */ 1411 1412 rotw 1 1413 rsr a3, WINDOWBASE 1414 ssl a3 1415 movi a3, 1 1416 sll a3, a3 1417 wsr a3, WINDOWSTART 1418 1419.Lnospill: 1420 jx a0 1421 1422.Lc4: s32e a4, a9, -16 1423 s32e a5, a9, -12 1424 s32e a6, a9, -8 1425 s32e a7, a9, -4 1426 1427 srli a7, a3, 1 1428 rotw 1 1429 _bnei a3, 1, .Lloop 1430 j .Lexit 1431 1432.Lc12: _bbci.l a3, 3, .Linvalid_mask # bit 2 shouldn't be zero! 1433 1434 /* 12-register frame (call12) */ 1435 1436 l32e a2, a5, -12 1437 s32e a8, a2, -48 1438 mov a8, a2 1439 1440.Lc12c: s32e a9, a8, -44 1441 s32e a10, a8, -40 1442 s32e a11, a8, -36 1443 s32e a12, a8, -32 1444 s32e a13, a8, -28 1445 s32e a14, a8, -24 1446 s32e a15, a8, -20 1447 srli a15, a3, 3 1448 1449 /* The stack pointer for a4..a7 is out of reach, so we rotate the 1450 * window, grab the stackpointer, and rotate back. 1451 * Alternatively, we could also use the following approach, but that 1452 * makes the fixup routine much more complicated: 1453 * rotw 1 1454 * s32e a0, a13, -16 1455 * ... 1456 * rotw 2 1457 */ 1458 1459 rotw 1 1460 mov a5, a13 1461 rotw -1 1462 1463 s32e a4, a9, -16 1464 s32e a5, a9, -12 1465 s32e a6, a9, -8 1466 s32e a7, a9, -4 1467 1468 rotw 3 1469 1470 _beqi a3, 1, .Lexit 1471 j .Lloop 1472 1473.Linvalid_mask: 1474 1475 /* We get here because of an unrecoverable error in the window 1476 * registers. If we are in user space, we kill the application, 1477 * however, this condition is unrecoverable in kernel space. 1478 */ 1479 1480 rsr a0, PS 1481 _bbci.l a0, PS_UM_BIT, 1f 1482 1483 /* User space: Setup a dummy frame and kill application. 1484 * Note: We assume EXC_TABLE_KSTK contains a valid stack pointer. 1485 */ 1486 1487 movi a0, 1 1488 movi a1, 0 1489 1490 wsr a0, WINDOWSTART 1491 wsr a1, WINDOWBASE 1492 rsync 1493 1494 movi a0, 0 1495 1496 movi a3, exc_table 1497 l32i a1, a3, EXC_TABLE_KSTK 1498 wsr a3, EXCSAVE_1 1499 1500 movi a4, (1 << PS_WOE_BIT) | 1 1501 wsr a4, PS 1502 rsync 1503 1504 movi a6, SIGSEGV 1505 movi a4, do_exit 1506 callx4 a4 1507 15081: /* Kernel space: PANIC! */ 1509 1510 wsr a0, EXCSAVE_1 1511 movi a0, unrecoverable_exception 1512 callx0 a0 # should not return 15131: j 1b 1514 1515/* 1516 * We should never get here. Bail out! 1517 */ 1518 1519ENTRY(fast_second_level_miss_double_kernel) 1520 15211: movi a0, unrecoverable_exception 1522 callx0 a0 # should not return 15231: j 1b 1524 1525/* First-level entry handler for user, kernel, and double 2nd-level 1526 * TLB miss exceptions. Note that for now, user and kernel miss 1527 * exceptions share the same entry point and are handled identically. 1528 * 1529 * An old, less-efficient C version of this function used to exist. 1530 * We include it below, interleaved as comments, for reference. 1531 * 1532 * Entry condition: 1533 * 1534 * a0: trashed, original value saved on stack (PT_AREG0) 1535 * a1: a1 1536 * a2: new stack pointer, original in DEPC 1537 * a3: dispatch table 1538 * depc: a2, original value saved on stack (PT_DEPC) 1539 * excsave_1: a3 1540 * 1541 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1542 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1543 */ 1544 1545ENTRY(fast_second_level_miss) 1546 1547 /* Save a1. Note: we don't expect a double exception. */ 1548 1549 s32i a1, a2, PT_AREG1 1550 1551 /* We need to map the page of PTEs for the user task. Find 1552 * the pointer to that page. Also, it's possible for tsk->mm 1553 * to be NULL while tsk->active_mm is nonzero if we faulted on 1554 * a vmalloc address. In that rare case, we must use 1555 * active_mm instead to avoid a fault in this handler. See 1556 * 1557 * http://mail.nl.linux.org/linux-mm/2002-08/msg00258.html 1558 * (or search Internet on "mm vs. active_mm") 1559 * 1560 * if (!mm) 1561 * mm = tsk->active_mm; 1562 * pgd = pgd_offset (mm, regs->excvaddr); 1563 * pmd = pmd_offset (pgd, regs->excvaddr); 1564 * pmdval = *pmd; 1565 */ 1566 1567 GET_CURRENT(a1,a2) 1568 l32i a0, a1, TASK_MM # tsk->mm 1569 beqz a0, 9f 1570 15718: rsr a1, EXCVADDR # fault address 1572 _PGD_OFFSET(a0, a1, a1) 1573 l32i a0, a0, 0 # read pmdval 1574 //beqi a0, _PAGE_USER, 2f 1575 beqz a0, 2f 1576 1577 /* Read ptevaddr and convert to top of page-table page. 1578 * 1579 * vpnval = read_ptevaddr_register() & PAGE_MASK; 1580 * vpnval += DTLB_WAY_PGTABLE; 1581 * pteval = mk_pte (virt_to_page(pmd_val(pmdval)), PAGE_KERNEL); 1582 * write_dtlb_entry (pteval, vpnval); 1583 * 1584 * The messy computation for 'pteval' above really simplifies 1585 * into the following: 1586 * 1587 * pteval = ((pmdval - PAGE_OFFSET) & PAGE_MASK) | PAGE_KERNEL 1588 */ 1589 1590 movi a1, -PAGE_OFFSET 1591 add a0, a0, a1 # pmdval - PAGE_OFFSET 1592 extui a1, a0, 0, PAGE_SHIFT # ... & PAGE_MASK 1593 xor a0, a0, a1 1594 1595 1596 movi a1, PAGE_DIRECTORY 1597 or a0, a0, a1 # ... | PAGE_DIRECTORY 1598 1599 rsr a1, PTEVADDR 1600 srli a1, a1, PAGE_SHIFT 1601 slli a1, a1, PAGE_SHIFT # ptevaddr & PAGE_MASK 1602 addi a1, a1, DTLB_WAY_PGD # ... + way_number 1603 1604 wdtlb a0, a1 1605 dsync 1606 1607 /* Exit critical section. */ 1608 1609 movi a0, 0 1610 s32i a0, a3, EXC_TABLE_FIXUP 1611 1612 /* Restore the working registers, and return. */ 1613 1614 l32i a0, a2, PT_AREG0 1615 l32i a1, a2, PT_AREG1 1616 l32i a2, a2, PT_DEPC 1617 xsr a3, EXCSAVE_1 1618 1619 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 1620 1621 /* Restore excsave1 and return. */ 1622 1623 rsr a2, DEPC 1624 rfe 1625 1626 /* Return from double exception. */ 1627 16281: xsr a2, DEPC 1629 esync 1630 rfde 1631 16329: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 1633 j 8b 1634 16352: /* Invalid PGD, default exception handling */ 1636 1637 rsr a1, DEPC 1638 xsr a3, EXCSAVE_1 1639 s32i a1, a2, PT_AREG2 1640 s32i a3, a2, PT_AREG3 1641 mov a1, a2 1642 1643 rsr a2, PS 1644 bbsi.l a2, PS_UM_BIT, 1f 1645 j _kernel_exception 16461: j _user_exception 1647 1648 1649/* 1650 * StoreProhibitedException 1651 * 1652 * Update the pte and invalidate the itlb mapping for this pte. 1653 * 1654 * Entry condition: 1655 * 1656 * a0: trashed, original value saved on stack (PT_AREG0) 1657 * a1: a1 1658 * a2: new stack pointer, original in DEPC 1659 * a3: dispatch table 1660 * depc: a2, original value saved on stack (PT_DEPC) 1661 * excsave_1: a3 1662 * 1663 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1664 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1665 */ 1666 1667ENTRY(fast_store_prohibited) 1668 1669 /* Save a1 and a4. */ 1670 1671 s32i a1, a2, PT_AREG1 1672 s32i a4, a2, PT_AREG4 1673 1674 GET_CURRENT(a1,a2) 1675 l32i a0, a1, TASK_MM # tsk->mm 1676 beqz a0, 9f 1677 16788: rsr a1, EXCVADDR # fault address 1679 _PGD_OFFSET(a0, a1, a4) 1680 l32i a0, a0, 0 1681 beqz a0, 2f 1682 1683 _PTE_OFFSET(a0, a1, a4) 1684 l32i a4, a0, 0 # read pteval 1685 movi a1, _PAGE_VALID | _PAGE_RW 1686 bnall a4, a1, 2f 1687 1688 movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_WRENABLE 1689 or a4, a4, a1 1690 rsr a1, EXCVADDR 1691 s32i a4, a0, 0 1692 1693 /* We need to flush the cache if we have page coloring. */ 1694#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK 1695 dhwb a0, 0 1696#endif 1697 pdtlb a0, a1 1698 beqz a0, 1f 1699 idtlb a0 1700 wdtlb a4, a0 17011: 1702 1703 /* Exit critical section. */ 1704 1705 movi a0, 0 1706 s32i a0, a3, EXC_TABLE_FIXUP 1707 1708 /* Restore the working registers, and return. */ 1709 1710 l32i a4, a2, PT_AREG4 1711 l32i a1, a2, PT_AREG1 1712 l32i a0, a2, PT_AREG0 1713 l32i a2, a2, PT_DEPC 1714 1715 /* Restore excsave1 and a3. */ 1716 1717 xsr a3, EXCSAVE_1 1718 bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f 1719 1720 rsr a2, DEPC 1721 rfe 1722 1723 /* Double exception. Restore FIXUP handler and return. */ 1724 17251: xsr a2, DEPC 1726 esync 1727 rfde 1728 17299: l32i a0, a1, TASK_ACTIVE_MM # unlikely case mm == 0 1730 j 8b 1731 17322: /* If there was a problem, handle fault in C */ 1733 1734 rsr a4, DEPC # still holds a2 1735 xsr a3, EXCSAVE_1 1736 s32i a4, a2, PT_AREG2 1737 s32i a3, a2, PT_AREG3 1738 l32i a4, a2, PT_AREG4 1739 mov a1, a2 1740 1741 rsr a2, PS 1742 bbsi.l a2, PS_UM_BIT, 1f 1743 j _kernel_exception 17441: j _user_exception 1745 1746 1747#if XCHAL_EXTRA_SA_SIZE 1748 1749#warning fast_coprocessor untested 1750 1751/* 1752 * Entry condition: 1753 * 1754 * a0: trashed, original value saved on stack (PT_AREG0) 1755 * a1: a1 1756 * a2: new stack pointer, original in DEPC 1757 * a3: dispatch table 1758 * depc: a2, original value saved on stack (PT_DEPC) 1759 * excsave_1: a3 1760 * 1761 * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC 1762 * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception 1763 */ 1764 1765ENTRY(fast_coprocessor_double) 1766 wsr a0, EXCSAVE_1 1767 movi a0, unrecoverable_exception 1768 callx0 a0 1769 1770ENTRY(fast_coprocessor) 1771 1772 /* Fatal if we are in a double exception. */ 1773 1774 l32i a0, a2, PT_DEPC 1775 _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, fast_coprocessor_double 1776 1777 /* Save some registers a1, a3, a4, SAR */ 1778 1779 xsr a3, EXCSAVE_1 1780 s32i a3, a2, PT_AREG3 1781 rsr a3, SAR 1782 s32i a4, a2, PT_AREG4 1783 s32i a1, a2, PT_AREG1 1784 s32i a5, a1, PT_AREG5 1785 s32i a3, a2, PT_SAR 1786 mov a1, a2 1787 1788 /* Currently, the HAL macros only guarantee saving a0 and a1. 1789 * These can and will be refined in the future, but for now, 1790 * just save the remaining registers of a2...a15. 1791 */ 1792 s32i a6, a1, PT_AREG6 1793 s32i a7, a1, PT_AREG7 1794 s32i a8, a1, PT_AREG8 1795 s32i a9, a1, PT_AREG9 1796 s32i a10, a1, PT_AREG10 1797 s32i a11, a1, PT_AREG11 1798 s32i a12, a1, PT_AREG12 1799 s32i a13, a1, PT_AREG13 1800 s32i a14, a1, PT_AREG14 1801 s32i a15, a1, PT_AREG15 1802 1803 /* Find coprocessor number. Subtract first CP EXCCAUSE from EXCCAUSE */ 1804 1805 rsr a0, EXCCAUSE 1806 addi a3, a0, -XCHAL_EXCCAUSE_COPROCESSOR0_DISABLED 1807 1808 /* Set corresponding CPENABLE bit */ 1809 1810 movi a4, 1 1811 ssl a3 # SAR: 32 - coprocessor_number 1812 rsr a5, CPENABLE 1813 sll a4, a4 1814 or a4, a5, a4 1815 wsr a4, CPENABLE 1816 rsync 1817 movi a5, coprocessor_info # list of owner and offset into cp_save 1818 addx8 a0, a4, a5 # entry for CP 1819 1820 bne a4, a5, .Lload # bit wasn't set before, cp not in use 1821 1822 /* Now compare the current task with the owner of the coprocessor. 1823 * If they are the same, there is no reason to save or restore any 1824 * coprocessor state. Having already enabled the coprocessor, 1825 * branch ahead to return. 1826 */ 1827 GET_CURRENT(a5,a1) 1828 l32i a4, a0, COPROCESSOR_INFO_OWNER # a4: current owner for this CP 1829 beq a4, a5, .Ldone 1830 1831 /* Find location to dump current coprocessor state: 1832 * task_struct->task_cp_save_offset + coprocessor_offset[coprocessor] 1833 * 1834 * Note: a0 pointer to the entry in the coprocessor owner table, 1835 * a3 coprocessor number, 1836 * a4 current owner of coprocessor. 1837 */ 1838 l32i a5, a0, COPROCESSOR_INFO_OFFSET 1839 addi a2, a4, THREAD_CP_SAVE 1840 add a2, a2, a5 1841 1842 /* Store current coprocessor states. (a5 still has CP number) */ 1843 1844 xchal_cpi_store_funcbody 1845 1846 /* The macro might have destroyed a3 (coprocessor number), but 1847 * SAR still has 32 - coprocessor_number! 1848 */ 1849 movi a3, 32 1850 rsr a4, SAR 1851 sub a3, a3, a4 1852 1853.Lload: /* A new task now owns the corpocessors. Save its TCB pointer into 1854 * the coprocessor owner table. 1855 * 1856 * Note: a0 pointer to the entry in the coprocessor owner table, 1857 * a3 coprocessor number. 1858 */ 1859 GET_CURRENT(a4,a1) 1860 s32i a4, a0, 0 1861 1862 /* Find location from where to restore the current coprocessor state.*/ 1863 1864 l32i a5, a0, COPROCESSOR_INFO_OFFSET 1865 addi a2, a4, THREAD_CP_SAVE 1866 add a2, a2, a4 1867 1868 xchal_cpi_load_funcbody 1869 1870 /* We must assume that the xchal_cpi_store_funcbody macro destroyed 1871 * registers a2..a15. 1872 */ 1873 1874.Ldone: l32i a15, a1, PT_AREG15 1875 l32i a14, a1, PT_AREG14 1876 l32i a13, a1, PT_AREG13 1877 l32i a12, a1, PT_AREG12 1878 l32i a11, a1, PT_AREG11 1879 l32i a10, a1, PT_AREG10 1880 l32i a9, a1, PT_AREG9 1881 l32i a8, a1, PT_AREG8 1882 l32i a7, a1, PT_AREG7 1883 l32i a6, a1, PT_AREG6 1884 l32i a5, a1, PT_AREG5 1885 l32i a4, a1, PT_AREG4 1886 l32i a3, a1, PT_AREG3 1887 l32i a2, a1, PT_AREG2 1888 l32i a0, a1, PT_AREG0 1889 l32i a1, a1, PT_AREG1 1890 1891 rfe 1892 1893#endif /* XCHAL_EXTRA_SA_SIZE */ 1894 1895/* 1896 * System Calls. 1897 * 1898 * void system_call (struct pt_regs* regs, int exccause) 1899 * a2 a3 1900 */ 1901 1902ENTRY(system_call) 1903 entry a1, 32 1904 1905 /* regs->syscall = regs->areg[2] */ 1906 1907 l32i a3, a2, PT_AREG2 1908 mov a6, a2 1909 movi a4, do_syscall_trace_enter 1910 s32i a3, a2, PT_SYSCALL 1911 callx4 a4 1912 1913 /* syscall = sys_call_table[syscall_nr] */ 1914 1915 movi a4, sys_call_table; 1916 movi a5, __NR_syscall_count 1917 movi a6, -ENOSYS 1918 bgeu a3, a5, 1f 1919 1920 addx4 a4, a3, a4 1921 l32i a4, a4, 0 1922 movi a5, sys_ni_syscall; 1923 beq a4, a5, 1f 1924 1925 /* Load args: arg0 - arg5 are passed via regs. */ 1926 1927 l32i a6, a2, PT_AREG6 1928 l32i a7, a2, PT_AREG3 1929 l32i a8, a2, PT_AREG4 1930 l32i a9, a2, PT_AREG5 1931 l32i a10, a2, PT_AREG8 1932 l32i a11, a2, PT_AREG9 1933 1934 /* Pass one additional argument to the syscall: pt_regs (on stack) */ 1935 s32i a2, a1, 0 1936 1937 callx4 a4 1938 19391: /* regs->areg[2] = return_value */ 1940 1941 s32i a6, a2, PT_AREG2 1942 movi a4, do_syscall_trace_leave 1943 mov a6, a2 1944 callx4 a4 1945 retw 1946 1947 1948/* 1949 * Create a kernel thread 1950 * 1951 * int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags) 1952 * a2 a2 a3 a4 1953 */ 1954 1955ENTRY(kernel_thread) 1956 entry a1, 16 1957 1958 mov a5, a2 # preserve fn over syscall 1959 mov a7, a3 # preserve args over syscall 1960 1961 movi a3, _CLONE_VM | _CLONE_UNTRACED 1962 movi a2, __NR_clone 1963 or a6, a4, a3 # arg0: flags 1964 mov a3, a1 # arg1: sp 1965 syscall 1966 1967 beq a3, a1, 1f # branch if parent 1968 mov a6, a7 # args 1969 callx4 a5 # fn(args) 1970 1971 movi a2, __NR_exit 1972 syscall # return value of fn(args) still in a6 1973 19741: retw 1975 1976/* 1977 * Do a system call from kernel instead of calling sys_execve, so we end up 1978 * with proper pt_regs. 1979 * 1980 * int kernel_execve(const char *fname, char *const argv[], charg *const envp[]) 1981 * a2 a2 a3 a4 1982 */ 1983 1984ENTRY(kernel_execve) 1985 entry a1, 16 1986 mov a6, a2 # arg0 is in a6 1987 movi a2, __NR_execve 1988 syscall 1989 1990 retw 1991 1992/* 1993 * Task switch. 1994 * 1995 * struct task* _switch_to (struct task* prev, struct task* next) 1996 * a2 a2 a3 1997 */ 1998 1999ENTRY(_switch_to) 2000 2001 entry a1, 16 2002 2003 mov a4, a3 # preserve a3 2004 2005 s32i a0, a2, THREAD_RA # save return address 2006 s32i a1, a2, THREAD_SP # save stack pointer 2007 2008 /* Disable ints while we manipulate the stack pointer; spill regs. */ 2009 2010 movi a5, (1 << PS_EXCM_BIT) | LOCKLEVEL 2011 xsr a5, PS 2012 rsr a3, EXCSAVE_1 2013 rsync 2014 s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */ 2015 2016 call0 _spill_registers 2017 2018 /* Set kernel stack (and leave critical section) 2019 * Note: It's save to set it here. The stack will not be overwritten 2020 * because the kernel stack will only be loaded again after 2021 * we return from kernel space. 2022 */ 2023 2024 l32i a0, a4, TASK_THREAD_INFO 2025 rsr a3, EXCSAVE_1 # exc_table 2026 movi a1, 0 2027 addi a0, a0, PT_REGS_OFFSET 2028 s32i a1, a3, EXC_TABLE_FIXUP 2029 s32i a0, a3, EXC_TABLE_KSTK 2030 2031 /* restore context of the task that 'next' addresses */ 2032 2033 l32i a0, a4, THREAD_RA /* restore return address */ 2034 l32i a1, a4, THREAD_SP /* restore stack pointer */ 2035 2036 wsr a5, PS 2037 rsync 2038 2039 retw 2040 2041 2042ENTRY(ret_from_fork) 2043 2044 /* void schedule_tail (struct task_struct *prev) 2045 * Note: prev is still in a6 (return value from fake call4 frame) 2046 */ 2047 movi a4, schedule_tail 2048 callx4 a4 2049 2050 movi a4, do_syscall_trace_leave 2051 mov a6, a1 2052 callx4 a4 2053 2054 j common_exception_return 2055