1/* Modified by Broadcom Corp. Portions Copyright (c) Broadcom Corp, 2012. */ 2/* 3 * linux/arch/arm/kernel/entry-armv.S 4 * 5 * Copyright (C) 1996,1997,1998 Russell King. 6 * ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk) 7 * nommu support by Hyok S. Choi (hyok.choi@samsung.com) 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * Low-level vector interface routines 14 * 15 * Note: there is a StrongARM bug in the STMIA rn, {regs}^ instruction 16 * that causes it to save wrong values... Be aware! 17 */ 18 19#include <asm/memory.h> 20#include <asm/glue.h> 21#include <asm/vfpmacros.h> 22#include <mach/entry-macro.S> 23#include <asm/thread_notify.h> 24#include <asm/unwind.h> 25#include <asm/unistd.h> 26#include <asm/tls.h> 27 28#include "entry-header.S" 29 30/* 31 * Interrupt handling. Preserves r7, r8, r9 32 */ 33 .macro irq_handler 34 get_irqnr_preamble r5, lr 351: get_irqnr_and_base r0, r6, r5, lr 36 movne r1, sp 37 @ 38 @ routine called with r0 = irq number, r1 = struct pt_regs * 39 @ 40 adrne lr, BSYM(1b) 41 bne asm_do_IRQ 42 43#ifdef CONFIG_SMP 44 test_for_ipi r0, r6, r5, lr 45 movne r0, sp 46 adrne lr, BSYM(1b) 47 bne do_IPI 48 49#ifdef CONFIG_LOCAL_TIMERS 50 test_for_ltirq r0, r6, r5, lr 51 movne r0, sp 52 adrne lr, BSYM(1b) 53 bne do_local_timer 54#endif 55#endif 56 57 .endm 58 59#ifdef CONFIG_KPROBES 60 .section .kprobes.text,"ax",%progbits 61#else 62 .text 63 .section .text.fastpath, "a" 64#endif 65 66/* 67 * Invalid mode handlers 68 */ 69 .macro inv_entry, reason 70 sub sp, sp, #S_FRAME_SIZE 71 ARM( stmib sp, {r1 - lr} ) 72 THUMB( stmia sp, {r0 - r12} ) 73 THUMB( str sp, [sp, #S_SP] ) 74 THUMB( str lr, [sp, #S_LR] ) 75 mov r1, #\reason 76 .endm 77 78__pabt_invalid: 79 inv_entry BAD_PREFETCH 80 b common_invalid 81ENDPROC(__pabt_invalid) 82 83__dabt_invalid: 84 inv_entry BAD_DATA 85 b common_invalid 86ENDPROC(__dabt_invalid) 87 88__irq_invalid: 89 inv_entry BAD_IRQ 90 b common_invalid 91ENDPROC(__irq_invalid) 92 93__und_invalid: 94 inv_entry BAD_UNDEFINSTR 95 96 @ 97 @ XXX fall through to common_invalid 98 @ 99 100@ 101@ common_invalid - generic code for failed exception (re-entrant version of handlers) 102@ 103common_invalid: 104 zero_fp 105 106 ldmia r0, {r4 - r6} 107 add r0, sp, #S_PC @ here for interlock avoidance 108 mov r7, #-1 @ "" "" "" "" 109 str r4, [sp] @ save preserved r0 110 stmia r0, {r5 - r7} @ lr_<exception>, 111 @ cpsr_<exception>, "old_r0" 112 113 mov r0, sp 114 b bad_mode 115ENDPROC(__und_invalid) 116 117/* 118 * SVC mode handlers 119 */ 120 121#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) 122#define SPFIX(code...) code 123#else 124#define SPFIX(code...) 125#endif 126 127 .macro svc_entry, stack_hole=0 128 UNWIND(.fnstart ) 129 UNWIND(.save {r0 - pc} ) 130 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4) 131#ifdef CONFIG_THUMB2_KERNEL 132 SPFIX( str r0, [sp] ) @ temporarily saved 133 SPFIX( mov r0, sp ) 134 SPFIX( tst r0, #4 ) @ test original stack alignment 135 SPFIX( ldr r0, [sp] ) @ restored 136#else 137 SPFIX( tst sp, #4 ) 138#endif 139 SPFIX( subeq sp, sp, #4 ) 140 stmia sp, {r1 - r12} 141 142 ldmia r0, {r1 - r3} 143 add r5, sp, #S_SP - 4 @ here for interlock avoidance 144 mov r4, #-1 @ "" "" "" "" 145 add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4) 146 SPFIX( addeq r0, r0, #4 ) 147 str r1, [sp, #-4]! @ save the "real" r0 copied 148 @ from the exception stack 149 150 mov r1, lr 151 152 @ 153 @ We are now ready to fill in the remaining blanks on the stack: 154 @ 155 @ r0 - sp_svc 156 @ r1 - lr_svc 157 @ r2 - lr_<exception>, already fixed up for correct return/restart 158 @ r3 - spsr_<exception> 159 @ r4 - orig_r0 (see pt_regs definition in ptrace.h) 160 @ 161 stmia r5, {r0 - r4} 162 .endm 163 164 .align 5 165__dabt_svc: 166 svc_entry 167 168 @ 169 @ get ready to re-enable interrupts if appropriate 170 @ 171 mrs r9, cpsr 172 tst r3, #PSR_I_BIT 173 biceq r9, r9, #PSR_I_BIT 174 175 @ 176 @ Call the processor-specific abort handler: 177 @ 178 @ r2 - aborted context pc 179 @ r3 - aborted context cpsr 180 @ 181 @ The abort handler must return the aborted address in r0, and 182 @ the fault status register in r1. r9 must be preserved. 183 @ 184#ifdef MULTI_DABORT 185 ldr r4, .LCprocfns 186 mov lr, pc 187 ldr pc, [r4, #PROCESSOR_DABT_FUNC] 188#else 189 bl CPU_DABORT_HANDLER 190#endif 191 192 @ 193 @ set desired IRQ state, then call main handler 194 @ 195 msr cpsr_c, r9 196 mov r2, sp 197 bl do_DataAbort 198 199 @ 200 @ IRQs off again before pulling preserved data off the stack 201 @ 202 disable_irq_notrace 203 204 @ 205 @ restore SPSR and restart the instruction 206 @ 207 ldr r2, [sp, #S_PSR] 208 svc_exit r2 @ return from exception 209 UNWIND(.fnend ) 210ENDPROC(__dabt_svc) 211 212 .align 5 213__irq_svc: 214 svc_entry 215 216#ifdef CONFIG_TRACE_IRQFLAGS 217 bl trace_hardirqs_off 218#endif 219#ifdef CONFIG_PREEMPT 220 get_thread_info tsk 221 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count 222 add r7, r8, #1 @ increment it 223 str r7, [tsk, #TI_PREEMPT] 224#endif 225 226 irq_handler 227#ifdef CONFIG_PREEMPT 228 str r8, [tsk, #TI_PREEMPT] @ restore preempt count 229 ldr r0, [tsk, #TI_FLAGS] @ get flags 230 teq r8, #0 @ if preempt count != 0 231 movne r0, #0 @ force flags to 0 232 tst r0, #_TIF_NEED_RESCHED 233 blne svc_preempt 234#endif 235 ldr r4, [sp, #S_PSR] @ irqs are already disabled 236#ifdef CONFIG_TRACE_IRQFLAGS 237 tst r4, #PSR_I_BIT 238 bleq trace_hardirqs_on 239#endif 240 svc_exit r4 @ return from exception 241 UNWIND(.fnend ) 242ENDPROC(__irq_svc) 243 244 .ltorg 245 246#ifdef CONFIG_PREEMPT 247svc_preempt: 248 mov r8, lr 2491: bl preempt_schedule_irq @ irq en/disable is done inside 250 ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS 251 tst r0, #_TIF_NEED_RESCHED 252 moveq pc, r8 @ go again 253 b 1b 254#endif 255 256 .align 5 257__und_svc: 258#ifdef CONFIG_KPROBES 259 @ If a kprobe is about to simulate a "stmdb sp..." instruction, 260 @ it obviously needs free stack space which then will belong to 261 @ the saved context. 262 svc_entry 64 263#else 264 svc_entry 265#endif 266 267 @ 268 @ call emulation code, which returns using r9 if it has emulated 269 @ the instruction, or the more conventional lr if we are to treat 270 @ this as a real undefined instruction 271 @ 272 @ r0 - instruction 273 @ 274#ifndef CONFIG_THUMB2_KERNEL 275 ldr r0, [r2, #-4] 276#else 277 ldrh r0, [r2, #-2] @ Thumb instruction at LR - 2 278 and r9, r0, #0xf800 279 cmp r9, #0xe800 @ 32-bit instruction if xx >= 0 280 ldrhhs r9, [r2] @ bottom 16 bits 281 orrhs r0, r9, r0, lsl #16 282#endif 283 adr r9, BSYM(1f) 284 bl call_fpe 285 286 mov r0, sp @ struct pt_regs *regs 287 bl do_undefinstr 288 289 @ 290 @ IRQs off again before pulling preserved data off the stack 291 @ 2921: disable_irq_notrace 293 294 @ 295 @ restore SPSR and restart the instruction 296 @ 297 ldr r2, [sp, #S_PSR] @ Get SVC cpsr 298 svc_exit r2 @ return from exception 299 UNWIND(.fnend ) 300ENDPROC(__und_svc) 301 302 .align 5 303__pabt_svc: 304 svc_entry 305 306 @ 307 @ re-enable interrupts if appropriate 308 @ 309 mrs r9, cpsr 310 tst r3, #PSR_I_BIT 311 biceq r9, r9, #PSR_I_BIT 312 313 mov r0, r2 @ pass address of aborted instruction. 314#ifdef MULTI_PABORT 315 ldr r4, .LCprocfns 316 mov lr, pc 317 ldr pc, [r4, #PROCESSOR_PABT_FUNC] 318#else 319 bl CPU_PABORT_HANDLER 320#endif 321 msr cpsr_c, r9 @ Maybe enable interrupts 322 mov r2, sp @ regs 323 bl do_PrefetchAbort @ call abort handler 324 325 @ 326 @ IRQs off again before pulling preserved data off the stack 327 @ 328 disable_irq_notrace 329 330 @ 331 @ restore SPSR and restart the instruction 332 @ 333 ldr r2, [sp, #S_PSR] 334 svc_exit r2 @ return from exception 335 UNWIND(.fnend ) 336ENDPROC(__pabt_svc) 337 338 .align 5 339.LCcralign: 340 .word cr_alignment 341#ifdef MULTI_DABORT 342.LCprocfns: 343 .word processor 344#endif 345.LCfp: 346 .word fp_enter 347 348/* 349 * User mode handlers 350 * 351 * EABI note: sp_svc is always 64-bit aligned here, so should S_FRAME_SIZE 352 */ 353 354#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5) && (S_FRAME_SIZE & 7) 355#error "sizeof(struct pt_regs) must be a multiple of 8" 356#endif 357 358 .macro usr_entry 359 UNWIND(.fnstart ) 360 UNWIND(.cantunwind ) @ don't unwind the user space 361 sub sp, sp, #S_FRAME_SIZE 362 ARM( stmib sp, {r1 - r12} ) 363 THUMB( stmia sp, {r0 - r12} ) 364 365 ldmia r0, {r1 - r3} 366 add r0, sp, #S_PC @ here for interlock avoidance 367 mov r4, #-1 @ "" "" "" "" 368 369 str r1, [sp] @ save the "real" r0 copied 370 @ from the exception stack 371 372 @ 373 @ We are now ready to fill in the remaining blanks on the stack: 374 @ 375 @ r2 - lr_<exception>, already fixed up for correct return/restart 376 @ r3 - spsr_<exception> 377 @ r4 - orig_r0 (see pt_regs definition in ptrace.h) 378 @ 379 @ Also, separately save sp_usr and lr_usr 380 @ 381 stmia r0, {r2 - r4} 382 ARM( stmdb r0, {sp, lr}^ ) 383 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) 384 385 @ 386 @ Enable the alignment trap while in kernel mode 387 @ 388 alignment_trap r0 389 390 @ 391 @ Clear FP to mark the first stack frame 392 @ 393 zero_fp 394 .endm 395 396 .macro kuser_cmpxchg_check 397#if __LINUX_ARM_ARCH__ < 6 && !defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 398#ifndef CONFIG_MMU 399#warning "NPTL on non MMU needs fixing" 400#else 401 @ Make sure our user space atomic helper is restarted 402 @ if it was interrupted in a critical region. Here we 403 @ perform a quick test inline since it should be false 404 @ 99.9999% of the time. The rest is done out of line. 405 cmp r2, #TASK_SIZE 406 blhs kuser_cmpxchg_fixup 407#endif 408#endif 409 .endm 410 411 .align 5 412__dabt_usr: 413 usr_entry 414 kuser_cmpxchg_check 415 416 @ 417 @ Call the processor-specific abort handler: 418 @ 419 @ r2 - aborted context pc 420 @ r3 - aborted context cpsr 421 @ 422 @ The abort handler must return the aborted address in r0, and 423 @ the fault status register in r1. 424 @ 425#ifdef MULTI_DABORT 426 ldr r4, .LCprocfns 427 mov lr, pc 428 ldr pc, [r4, #PROCESSOR_DABT_FUNC] 429#else 430 bl CPU_DABORT_HANDLER 431#endif 432 433 @ 434 @ IRQs on, then call the main handler 435 @ 436 enable_irq 437 mov r2, sp 438 adr lr, BSYM(ret_from_exception) 439 b do_DataAbort 440 UNWIND(.fnend ) 441ENDPROC(__dabt_usr) 442 443 .align 5 444__irq_usr: 445 usr_entry 446 kuser_cmpxchg_check 447 448 get_thread_info tsk 449#ifdef CONFIG_PREEMPT 450 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count 451 add r7, r8, #1 @ increment it 452 str r7, [tsk, #TI_PREEMPT] 453#endif 454 455 irq_handler 456#ifdef CONFIG_PREEMPT 457 ldr r0, [tsk, #TI_PREEMPT] 458 str r8, [tsk, #TI_PREEMPT] 459 teq r0, r7 460 ARM( strne r0, [r0, -r0] ) 461 THUMB( movne r0, #0 ) 462 THUMB( strne r0, [r0] ) 463#endif 464 465 mov why, #0 466 b ret_to_user 467 UNWIND(.fnend ) 468ENDPROC(__irq_usr) 469 470 .ltorg 471 472 .align 5 473__und_usr: 474 usr_entry 475 476 @ 477 @ fall through to the emulation code, which returns using r9 if 478 @ it has emulated the instruction, or the more conventional lr 479 @ if we are to treat this as a real undefined instruction 480 @ 481 @ r0 - instruction 482 @ 483 adr r9, BSYM(ret_from_exception) 484 adr lr, BSYM(__und_usr_unknown) 485 tst r3, #PSR_T_BIT @ Thumb mode? 486 itet eq @ explicit IT needed for the 1f label 487 subeq r4, r2, #4 @ ARM instr at LR - 4 488 subne r4, r2, #2 @ Thumb instr at LR - 2 4891: ldreqt r0, [r4] 490#ifdef CONFIG_CPU_ENDIAN_BE8 491 reveq r0, r0 @ little endian instruction 492#endif 493 beq call_fpe 494 @ Thumb instruction 495#if __LINUX_ARM_ARCH__ >= 7 4962: 497 ARM( ldrht r5, [r4], #2 ) 498 THUMB( ldrht r5, [r4] ) 499 THUMB( add r4, r4, #2 ) 500 and r0, r5, #0xf800 @ mask bits 111x x... .... .... 501 cmp r0, #0xe800 @ 32bit instruction if xx != 0 502 blo __und_usr_unknown 5033: ldrht r0, [r4] 504 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4 505 orr r0, r0, r5, lsl #16 506#else 507 b __und_usr_unknown 508#endif 509 UNWIND(.fnend ) 510ENDPROC(__und_usr) 511 512 @ 513 @ fallthrough to call_fpe 514 @ 515 516/* 517 * The out of line fixup for the ldrt above. 518 */ 519 .pushsection .fixup, "ax" 5204: mov pc, r9 521 .popsection 522 .pushsection __ex_table,"a" 523 .long 1b, 4b 524#if __LINUX_ARM_ARCH__ >= 7 525 .long 2b, 4b 526 .long 3b, 4b 527#endif 528 .popsection 529 530/* 531 * Check whether the instruction is a co-processor instruction. 532 * If yes, we need to call the relevant co-processor handler. 533 * 534 * Note that we don't do a full check here for the co-processor 535 * instructions; all instructions with bit 27 set are well 536 * defined. The only instructions that should fault are the 537 * co-processor instructions. However, we have to watch out 538 * for the ARM6/ARM7 SWI bug. 539 * 540 * NEON is a special case that has to be handled here. Not all 541 * NEON instructions are co-processor instructions, so we have 542 * to make a special case of checking for them. Plus, there's 543 * five groups of them, so we have a table of mask/opcode pairs 544 * to check against, and if any match then we branch off into the 545 * NEON handler code. 546 * 547 * Emulators may wish to make use of the following registers: 548 * r0 = instruction opcode. 549 * r2 = PC+4 550 * r9 = normal "successful" return address 551 * r10 = this threads thread_info structure. 552 * lr = unrecognised instruction return address 553 */ 554 @ 555 @ Fall-through from Thumb-2 __und_usr 556 @ 557#ifdef CONFIG_NEON 558 adr r6, .LCneon_thumb_opcodes 559 b 2f 560#endif 561call_fpe: 562#ifdef CONFIG_NEON 563 adr r6, .LCneon_arm_opcodes 5642: 565 ldr r7, [r6], #4 @ mask value 566 cmp r7, #0 @ end mask? 567 beq 1f 568 and r8, r0, r7 569 ldr r7, [r6], #4 @ opcode bits matching in mask 570 cmp r8, r7 @ NEON instruction? 571 bne 2b 572 get_thread_info r10 573 mov r7, #1 574 strb r7, [r10, #TI_USED_CP + 10] @ mark CP#10 as used 575 strb r7, [r10, #TI_USED_CP + 11] @ mark CP#11 as used 576 b do_vfp @ let VFP handler handle this 5771: 578#endif 579 tst r0, #0x08000000 @ only CDP/CPRT/LDC/STC have bit 27 580 tstne r0, #0x04000000 @ bit 26 set on both ARM and Thumb-2 581#if defined(CONFIG_CPU_ARM610) || defined(CONFIG_CPU_ARM710) 582 and r8, r0, #0x0f000000 @ mask out op-code bits 583 teqne r8, #0x0f000000 @ SWI (ARM6/7 bug)? 584#endif 585 moveq pc, lr 586 get_thread_info r10 @ get current thread 587 and r8, r0, #0x00000f00 @ mask out CP number 588 THUMB( lsr r8, r8, #8 ) 589 mov r7, #1 590 add r6, r10, #TI_USED_CP 591 ARM( strb r7, [r6, r8, lsr #8] ) @ set appropriate used_cp[] 592 THUMB( strb r7, [r6, r8] ) @ set appropriate used_cp[] 593#ifdef CONFIG_IWMMXT 594 @ Test if we need to give access to iWMMXt coprocessors 595 ldr r5, [r10, #TI_FLAGS] 596 rsbs r7, r8, #(1 << 8) @ CP 0 or 1 only 597 movcss r7, r5, lsr #(TIF_USING_IWMMXT + 1) 598 bcs iwmmxt_task_enable 599#endif 600 ARM( add pc, pc, r8, lsr #6 ) 601 THUMB( lsl r8, r8, #2 ) 602 THUMB( add pc, r8 ) 603 nop 604 605 movw_pc lr @ CP#0 606 W(b) do_fpe @ CP#1 (FPE) 607 W(b) do_fpe @ CP#2 (FPE) 608 movw_pc lr @ CP#3 609#ifdef CONFIG_CRUNCH 610 b crunch_task_enable @ CP#4 (MaverickCrunch) 611 b crunch_task_enable @ CP#5 (MaverickCrunch) 612 b crunch_task_enable @ CP#6 (MaverickCrunch) 613#else 614 movw_pc lr @ CP#4 615 movw_pc lr @ CP#5 616 movw_pc lr @ CP#6 617#endif 618 movw_pc lr @ CP#7 619 movw_pc lr @ CP#8 620 movw_pc lr @ CP#9 621#ifdef CONFIG_VFP 622 W(b) do_vfp @ CP#10 (VFP) 623 W(b) do_vfp @ CP#11 (VFP) 624#else 625 movw_pc lr @ CP#10 (VFP) 626 movw_pc lr @ CP#11 (VFP) 627#endif 628 movw_pc lr @ CP#12 629 movw_pc lr @ CP#13 630 movw_pc lr @ CP#14 (Debug) 631 movw_pc lr @ CP#15 (Control) 632 633#ifdef CONFIG_NEON 634 .align 6 635 636.LCneon_arm_opcodes: 637 .word 0xfe000000 @ mask 638 .word 0xf2000000 @ opcode 639 640 .word 0xff100000 @ mask 641 .word 0xf4000000 @ opcode 642 643 .word 0x00000000 @ mask 644 .word 0x00000000 @ opcode 645 646.LCneon_thumb_opcodes: 647 .word 0xef000000 @ mask 648 .word 0xef000000 @ opcode 649 650 .word 0xff100000 @ mask 651 .word 0xf9000000 @ opcode 652 653 .word 0x00000000 @ mask 654 .word 0x00000000 @ opcode 655#endif 656 657do_fpe: 658 enable_irq 659 ldr r4, .LCfp 660 add r10, r10, #TI_FPSTATE @ r10 = workspace 661 ldr pc, [r4] @ Call FP module USR entry point 662 663/* 664 * The FP module is called with these registers set: 665 * r0 = instruction 666 * r2 = PC+4 667 * r9 = normal "successful" return address 668 * r10 = FP workspace 669 * lr = unrecognised FP instruction return address 670 */ 671 672 .pushsection .data 673ENTRY(fp_enter) 674 .word no_fp 675 .popsection 676 677ENTRY(no_fp) 678 mov pc, lr 679ENDPROC(no_fp) 680 681__und_usr_unknown: 682 enable_irq 683 mov r0, sp 684 adr lr, BSYM(ret_from_exception) 685 b do_undefinstr 686ENDPROC(__und_usr_unknown) 687 688 .align 5 689__pabt_usr: 690 usr_entry 691 692 mov r0, r2 @ pass address of aborted instruction. 693#ifdef MULTI_PABORT 694 ldr r4, .LCprocfns 695 mov lr, pc 696 ldr pc, [r4, #PROCESSOR_PABT_FUNC] 697#else 698 bl CPU_PABORT_HANDLER 699#endif 700 enable_irq @ Enable interrupts 701 mov r2, sp @ regs 702 bl do_PrefetchAbort @ call abort handler 703 UNWIND(.fnend ) 704 /* fall through */ 705/* 706 * This is the return code to user mode for abort handlers 707 */ 708ENTRY(ret_from_exception) 709 UNWIND(.fnstart ) 710 UNWIND(.cantunwind ) 711 get_thread_info tsk 712 mov why, #0 713 b ret_to_user 714 UNWIND(.fnend ) 715ENDPROC(__pabt_usr) 716ENDPROC(ret_from_exception) 717 718/* 719 * Register switch for ARMv3 and ARMv4 processors 720 * r0 = previous task_struct, r1 = previous thread_info, r2 = next thread_info 721 * previous and next are guaranteed not to be the same. 722 */ 723ENTRY(__switch_to) 724 UNWIND(.fnstart ) 725 UNWIND(.cantunwind ) 726 add ip, r1, #TI_CPU_SAVE 727 ldr r3, [r2, #TI_TP_VALUE] 728 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack 729 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack 730 THUMB( str sp, [ip], #4 ) 731 THUMB( str lr, [ip], #4 ) 732#ifdef CONFIG_MMU 733 ldr r6, [r2, #TI_CPU_DOMAIN] 734#endif 735 set_tls r3, r4, r5 736#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 737 ldr r7, [r2, #TI_TASK] 738 ldr r8, =__stack_chk_guard 739 ldr r7, [r7, #TSK_STACK_CANARY] 740#endif 741#ifdef CONFIG_MMU 742 mcr p15, 0, r6, c3, c0, 0 @ Set domain register 743#endif 744 mov r5, r0 745 add r4, r2, #TI_CPU_SAVE 746 ldr r0, =thread_notify_head 747 mov r1, #THREAD_NOTIFY_SWITCH 748 bl atomic_notifier_call_chain 749#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 750 str r7, [r8] 751#endif 752 THUMB( mov ip, r4 ) 753 mov r0, r5 754 ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously 755 THUMB( ldmia ip!, {r4 - sl, fp} ) @ Load all regs saved previously 756 THUMB( ldr sp, [ip], #4 ) 757 THUMB( ldr pc, [ip] ) 758 UNWIND(.fnend ) 759ENDPROC(__switch_to) 760 761 __INIT 762 763/* 764 * User helpers. 765 * 766 * These are segment of kernel provided user code reachable from user space 767 * at a fixed address in kernel memory. This is used to provide user space 768 * with some operations which require kernel help because of unimplemented 769 * native feature and/or instructions in many ARM CPUs. The idea is for 770 * this code to be executed directly in user mode for best efficiency but 771 * which is too intimate with the kernel counter part to be left to user 772 * libraries. In fact this code might even differ from one CPU to another 773 * depending on the available instruction set and restrictions like on 774 * SMP systems. In other words, the kernel reserves the right to change 775 * this code as needed without warning. Only the entry points and their 776 * results are guaranteed to be stable. 777 * 778 * Each segment is 32-byte aligned and will be moved to the top of the high 779 * vector page. New segments (if ever needed) must be added in front of 780 * existing ones. This mechanism should be used only for things that are 781 * really small and justified, and not be abused freely. 782 * 783 * User space is expected to implement those things inline when optimizing 784 * for a processor that has the necessary native support, but only if such 785 * resulting binaries are already to be incompatible with earlier ARM 786 * processors due to the use of unsupported instructions other than what 787 * is provided here. In other words don't make binaries unable to run on 788 * earlier processors just for the sake of not using these kernel helpers 789 * if your compiled code is not going to use the new instructions for other 790 * purpose. 791 */ 792 THUMB( .arm ) 793 794 .macro usr_ret, reg 795#ifdef CONFIG_ARM_THUMB 796 bx \reg 797#else 798 mov pc, \reg 799#endif 800 .endm 801 802 .align 5 803 .globl __kuser_helper_start 804__kuser_helper_start: 805 806/* 807 * Reference prototype: 808 * 809 * void __kernel_memory_barrier(void) 810 * 811 * Input: 812 * 813 * lr = return address 814 * 815 * Output: 816 * 817 * none 818 * 819 * Clobbered: 820 * 821 * none 822 * 823 * Definition and user space usage example: 824 * 825 * typedef void (__kernel_dmb_t)(void); 826 * #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0) 827 * 828 * Apply any needed memory barrier to preserve consistency with data modified 829 * manually and __kuser_cmpxchg usage. 830 * 831 * This could be used as follows: 832 * 833 * #define __kernel_dmb() \ 834 * asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \ 835 * : : : "r0", "lr","cc" ) 836 */ 837 838__kuser_memory_barrier: @ 0xffff0fa0 839 smp_dmb 840 usr_ret lr 841 842 .align 5 843 844/* 845 * Reference prototype: 846 * 847 * int __kernel_cmpxchg(int oldval, int newval, int *ptr) 848 * 849 * Input: 850 * 851 * r0 = oldval 852 * r1 = newval 853 * r2 = ptr 854 * lr = return address 855 * 856 * Output: 857 * 858 * r0 = returned value (zero or non-zero) 859 * C flag = set if r0 == 0, clear if r0 != 0 860 * 861 * Clobbered: 862 * 863 * r3, ip, flags 864 * 865 * Definition and user space usage example: 866 * 867 * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr); 868 * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0) 869 * 870 * Atomically store newval in *ptr if *ptr is equal to oldval for user space. 871 * Return zero if *ptr was changed or non-zero if no exchange happened. 872 * The C flag is also set if *ptr was changed to allow for assembly 873 * optimization in the calling code. 874 * 875 * Notes: 876 * 877 * - This routine already includes memory barriers as needed. 878 * 879 * For example, a user space atomic_add implementation could look like this: 880 * 881 * #define atomic_add(ptr, val) \ 882 * ({ register unsigned int *__ptr asm("r2") = (ptr); \ 883 * register unsigned int __result asm("r1"); \ 884 * asm volatile ( \ 885 * "1: @ atomic_add\n\t" \ 886 * "ldr r0, [r2]\n\t" \ 887 * "mov r3, #0xffff0fff\n\t" \ 888 * "add lr, pc, #4\n\t" \ 889 * "add r1, r0, %2\n\t" \ 890 * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \ 891 * "bcc 1b" \ 892 * : "=&r" (__result) \ 893 * : "r" (__ptr), "rIL" (val) \ 894 * : "r0","r3","ip","lr","cc","memory" ); \ 895 * __result; }) 896 */ 897 898__kuser_cmpxchg: @ 0xffff0fc0 899 900#if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG) 901 902 /* 903 * Poor you. No fast solution possible... 904 * The kernel itself must perform the operation. 905 * A special ghost syscall is used for that (see traps.c). 906 */ 907 stmfd sp!, {r7, lr} 908 ldr r7, =1f @ it's 20 bits 909 swi __ARM_NR_cmpxchg 910 ldmfd sp!, {r7, pc} 9111: .word __ARM_NR_cmpxchg 912 913#elif __LINUX_ARM_ARCH__ < 6 914 915#ifdef CONFIG_MMU 916 917 /* 918 * The only thing that can break atomicity in this cmpxchg 919 * implementation is either an IRQ or a data abort exception 920 * causing another process/thread to be scheduled in the middle 921 * of the critical sequence. To prevent this, code is added to 922 * the IRQ and data abort exception handlers to set the pc back 923 * to the beginning of the critical section if it is found to be 924 * within that critical section (see kuser_cmpxchg_fixup). 925 */ 9261: ldr r3, [r2] @ load current val 927 subs r3, r3, r0 @ compare with oldval 9282: streq r1, [r2] @ store newval if eq 929 rsbs r0, r3, #0 @ set return val and C flag 930 usr_ret lr 931 932 .text 933kuser_cmpxchg_fixup: 934 @ Called from kuser_cmpxchg_check macro. 935 @ r2 = address of interrupted insn (must be preserved). 936 @ sp = saved regs. r7 and r8 are clobbered. 937 @ 1b = first critical insn, 2b = last critical insn. 938 @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b. 939 mov r7, #0xffff0fff 940 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) 941 subs r8, r2, r7 942 rsbcss r8, r8, #(2b - 1b) 943 strcs r7, [sp, #S_PC] 944 mov pc, lr 945 .previous 946 947#else 948#warning "NPTL on non MMU needs fixing" 949 mov r0, #-1 950 adds r0, r0, #0 951 usr_ret lr 952#endif 953 954#else 955 956 smp_dmb 9571: ldrex r3, [r2] 958 subs r3, r3, r0 959 strexeq r3, r1, [r2] 960 teqeq r3, #1 961 beq 1b 962 rsbs r0, r3, #0 963 /* beware -- each __kuser slot must be 8 instructions max */ 964#ifdef CONFIG_SMP 965 b __kuser_memory_barrier 966#else 967 usr_ret lr 968#endif 969 970#endif 971 972 .align 5 973 974/* 975 * Reference prototype: 976 * 977 * int __kernel_get_tls(void) 978 * 979 * Input: 980 * 981 * lr = return address 982 * 983 * Output: 984 * 985 * r0 = TLS value 986 * 987 * Clobbered: 988 * 989 * none 990 * 991 * Definition and user space usage example: 992 * 993 * typedef int (__kernel_get_tls_t)(void); 994 * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0) 995 * 996 * Get the TLS value as previously set via the __ARM_NR_set_tls syscall. 997 * 998 * This could be used as follows: 999 * 1000 * #define __kernel_get_tls() \ 1001 * ({ register unsigned int __val asm("r0"); \ 1002 * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \ 1003 * : "=r" (__val) : : "lr","cc" ); \ 1004 * __val; }) 1005 */ 1006 1007__kuser_get_tls: @ 0xffff0fe0 1008 ldr r0, [pc, #(16 - 8)] @ read TLS, set in kuser_get_tls_init 1009 usr_ret lr 1010 mrc p15, 0, r0, c13, c0, 3 @ 0xffff0fe8 hardware TLS code 1011 .rep 4 1012 .word 0 @ 0xffff0ff0 software TLS value, then 1013 .endr @ pad up to __kuser_helper_version 1014 1015/* 1016 * Reference declaration: 1017 * 1018 * extern unsigned int __kernel_helper_version; 1019 * 1020 * Definition and user space usage example: 1021 * 1022 * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc) 1023 * 1024 * User space may read this to determine the curent number of helpers 1025 * available. 1026 */ 1027 1028__kuser_helper_version: @ 0xffff0ffc 1029 .word ((__kuser_helper_end - __kuser_helper_start) >> 5) 1030 1031 .globl __kuser_helper_end 1032__kuser_helper_end: 1033 1034 THUMB( .thumb ) 1035 1036/* 1037 * Vector stubs. 1038 * 1039 * This code is copied to 0xffff0200 so we can use branches in the 1040 * vectors, rather than ldr's. Note that this code must not 1041 * exceed 0x300 bytes. 1042 * 1043 * Common stub entry macro: 1044 * Enter in IRQ mode, spsr = SVC/USR CPSR, lr = SVC/USR PC 1045 * 1046 * SP points to a minimal amount of processor-private memory, the address 1047 * of which is copied into r0 for the mode specific abort handler. 1048 */ 1049 .macro vector_stub, name, mode, correction=0 1050 .align 5 1051 1052vector_\name: 1053 .if \correction 1054 sub lr, lr, #\correction 1055 .endif 1056 1057 @ 1058 @ Save r0, lr_<exception> (parent PC) and spsr_<exception> 1059 @ (parent CPSR) 1060 @ 1061 stmia sp, {r0, lr} @ save r0, lr 1062 mrs lr, spsr 1063 str lr, [sp, #8] @ save spsr 1064 1065 @ 1066 @ Prepare for SVC32 mode. IRQs remain disabled. 1067 @ 1068 mrs r0, cpsr 1069 eor r0, r0, #(\mode ^ SVC_MODE | PSR_ISETSTATE) 1070 msr spsr_cxsf, r0 1071 1072 @ 1073 @ the branch table must immediately follow this code 1074 @ 1075 and lr, lr, #0x0f 1076 THUMB( adr r0, 1f ) 1077 THUMB( ldr lr, [r0, lr, lsl #2] ) 1078 mov r0, sp 1079 ARM( ldr lr, [pc, lr, lsl #2] ) 1080 movs pc, lr @ branch to handler in SVC mode 1081ENDPROC(vector_\name) 1082 1083 .align 2 1084 @ handler addresses follow this label 10851: 1086 .endm 1087 1088 .globl __stubs_start 1089__stubs_start: 1090/* 1091 * Interrupt dispatcher 1092 */ 1093 vector_stub irq, IRQ_MODE, 4 1094 1095 .long __irq_usr @ 0 (USR_26 / USR_32) 1096 .long __irq_invalid @ 1 (FIQ_26 / FIQ_32) 1097 .long __irq_invalid @ 2 (IRQ_26 / IRQ_32) 1098 .long __irq_svc @ 3 (SVC_26 / SVC_32) 1099 .long __irq_invalid @ 4 1100 .long __irq_invalid @ 5 1101 .long __irq_invalid @ 6 1102 .long __irq_invalid @ 7 1103 .long __irq_invalid @ 8 1104 .long __irq_invalid @ 9 1105 .long __irq_invalid @ a 1106 .long __irq_invalid @ b 1107 .long __irq_invalid @ c 1108 .long __irq_invalid @ d 1109 .long __irq_invalid @ e 1110 .long __irq_invalid @ f 1111 1112/* 1113 * Data abort dispatcher 1114 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC 1115 */ 1116 vector_stub dabt, ABT_MODE, 8 1117 1118 .long __dabt_usr @ 0 (USR_26 / USR_32) 1119 .long __dabt_invalid @ 1 (FIQ_26 / FIQ_32) 1120 .long __dabt_invalid @ 2 (IRQ_26 / IRQ_32) 1121 .long __dabt_svc @ 3 (SVC_26 / SVC_32) 1122 .long __dabt_invalid @ 4 1123 .long __dabt_invalid @ 5 1124 .long __dabt_invalid @ 6 1125 .long __dabt_invalid @ 7 1126 .long __dabt_invalid @ 8 1127 .long __dabt_invalid @ 9 1128 .long __dabt_invalid @ a 1129 .long __dabt_invalid @ b 1130 .long __dabt_invalid @ c 1131 .long __dabt_invalid @ d 1132 .long __dabt_invalid @ e 1133 .long __dabt_invalid @ f 1134 1135/* 1136 * Prefetch abort dispatcher 1137 * Enter in ABT mode, spsr = USR CPSR, lr = USR PC 1138 */ 1139 vector_stub pabt, ABT_MODE, 4 1140 1141 .long __pabt_usr @ 0 (USR_26 / USR_32) 1142 .long __pabt_invalid @ 1 (FIQ_26 / FIQ_32) 1143 .long __pabt_invalid @ 2 (IRQ_26 / IRQ_32) 1144 .long __pabt_svc @ 3 (SVC_26 / SVC_32) 1145 .long __pabt_invalid @ 4 1146 .long __pabt_invalid @ 5 1147 .long __pabt_invalid @ 6 1148 .long __pabt_invalid @ 7 1149 .long __pabt_invalid @ 8 1150 .long __pabt_invalid @ 9 1151 .long __pabt_invalid @ a 1152 .long __pabt_invalid @ b 1153 .long __pabt_invalid @ c 1154 .long __pabt_invalid @ d 1155 .long __pabt_invalid @ e 1156 .long __pabt_invalid @ f 1157 1158/* 1159 * Undef instr entry dispatcher 1160 * Enter in UND mode, spsr = SVC/USR CPSR, lr = SVC/USR PC 1161 */ 1162 vector_stub und, UND_MODE 1163 1164 .long __und_usr @ 0 (USR_26 / USR_32) 1165 .long __und_invalid @ 1 (FIQ_26 / FIQ_32) 1166 .long __und_invalid @ 2 (IRQ_26 / IRQ_32) 1167 .long __und_svc @ 3 (SVC_26 / SVC_32) 1168 .long __und_invalid @ 4 1169 .long __und_invalid @ 5 1170 .long __und_invalid @ 6 1171 .long __und_invalid @ 7 1172 .long __und_invalid @ 8 1173 .long __und_invalid @ 9 1174 .long __und_invalid @ a 1175 .long __und_invalid @ b 1176 .long __und_invalid @ c 1177 .long __und_invalid @ d 1178 .long __und_invalid @ e 1179 .long __und_invalid @ f 1180 1181 .align 5 1182 1183/*============================================================================= 1184 * Undefined FIQs 1185 *----------------------------------------------------------------------------- 1186 * Enter in FIQ mode, spsr = ANY CPSR, lr = ANY PC 1187 * MUST PRESERVE SVC SPSR, but need to switch to SVC mode to show our msg. 1188 * Basically to switch modes, we *HAVE* to clobber one register... brain 1189 * damage alert! I don't think that we can execute any code in here in any 1190 * other mode than FIQ... Ok you can switch to another mode, but you can't 1191 * get out of that mode without clobbering one register. 1192 */ 1193vector_fiq: 1194 disable_fiq 1195 subs pc, lr, #4 1196 1197/*============================================================================= 1198 * Address exception handler 1199 *----------------------------------------------------------------------------- 1200 * These aren't too critical. 1201 * (they're not supposed to happen, and won't happen in 32-bit data mode). 1202 */ 1203 1204vector_addrexcptn: 1205 b vector_addrexcptn 1206 1207/* 1208 * We group all the following data together to optimise 1209 * for CPUs with separate I & D caches. 1210 */ 1211 .align 5 1212 1213.LCvswi: 1214 .word vector_swi 1215 1216 .globl __stubs_end 1217__stubs_end: 1218 1219 .equ stubs_offset, __vectors_start + 0x200 - __stubs_start 1220 1221 .globl __vectors_start 1222__vectors_start: 1223 ARM( swi SYS_ERROR0 ) 1224 THUMB( svc #0 ) 1225 THUMB( nop ) 1226 W(b) vector_und + stubs_offset 1227 W(ldr) pc, .LCvswi + stubs_offset 1228 W(b) vector_pabt + stubs_offset 1229 W(b) vector_dabt + stubs_offset 1230 W(b) vector_addrexcptn + stubs_offset 1231 W(b) vector_irq + stubs_offset 1232 W(b) vector_fiq + stubs_offset 1233 1234 .globl __vectors_end 1235__vectors_end: 1236 1237 .data 1238 1239 .globl cr_alignment 1240 .globl cr_no_alignment 1241cr_alignment: 1242 .space 4 1243cr_no_alignment: 1244 .space 4 1245