1/* 2 * linux/arch/arm/boot/compressed/head.S 3 * 4 * Copyright (C) 1996-2002 Russell King 5 * Copyright (C) 2004 Hyok S. Choi (MPU support) 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11#include <linux/linkage.h> 12 13/* 14 * Debugging stuff 15 * 16 * Note that these macros must not contain any code which is not 17 * 100% relocatable. Any attempt to do so will result in a crash. 18 * Please select one of the following when turning on debugging. 19 */ 20#ifdef DEBUG 21 22#if defined(CONFIG_DEBUG_ICEDCC) 23 24#ifdef CONFIG_CPU_V6 25 .macro loadsp, rb 26 .endm 27 .macro writeb, ch, rb 28 mcr p14, 0, \ch, c0, c5, 0 29 .endm 30#else 31 .macro loadsp, rb 32 .endm 33 .macro writeb, ch, rb 34 mcr p14, 0, \ch, c0, c1, 0 35 .endm 36#endif 37 38#else 39 40#include <asm/arch/debug-macro.S> 41 42 .macro writeb, ch, rb 43 senduart \ch, \rb 44 .endm 45 46#if defined(CONFIG_ARCH_SA1100) 47 .macro loadsp, rb 48 mov \rb, #0x80000000 @ physical base address 49#ifdef CONFIG_DEBUG_LL_SER3 50 add \rb, \rb, #0x00050000 @ Ser3 51#else 52 add \rb, \rb, #0x00010000 @ Ser1 53#endif 54 .endm 55#elif defined(CONFIG_ARCH_S3C2410) 56 .macro loadsp, rb 57 mov \rb, #0x50000000 58 add \rb, \rb, #0x4000 * CONFIG_S3C2410_LOWLEVEL_UART_PORT 59 .endm 60#else 61 .macro loadsp, rb 62 addruart \rb 63 .endm 64#endif 65#endif 66#endif 67 68 .macro kputc,val 69 mov r0, \val 70 bl putc 71 .endm 72 73 .macro kphex,val,len 74 mov r0, \val 75 mov r1, #\len 76 bl phex 77 .endm 78 79 .macro debug_reloc_start 80#ifdef DEBUG 81 kputc #'\n' 82 kphex r6, 8 /* processor id */ 83 kputc #':' 84 kphex r7, 8 /* architecture id */ 85#ifdef CONFIG_CPU_CP15 86 kputc #':' 87 mrc p15, 0, r0, c1, c0 88 kphex r0, 8 /* control reg */ 89#endif 90 kputc #'\n' 91 kphex r5, 8 /* decompressed kernel start */ 92 kputc #'-' 93 kphex r9, 8 /* decompressed kernel end */ 94 kputc #'>' 95 kphex r4, 8 /* kernel execution address */ 96 kputc #'\n' 97#endif 98 .endm 99 100 .macro debug_reloc_end 101#ifdef DEBUG 102 kphex r5, 8 /* end of kernel */ 103 kputc #'\n' 104 mov r0, r4 105 bl memdump /* dump 256 bytes at start of kernel */ 106#endif 107 .endm 108 109 .section ".start", #alloc, #execinstr 110/* 111 * sort out different calling conventions 112 */ 113 .align 114start: 115 .type start,#function 116 .rept 8 117 mov r0, r0 118 .endr 119 120 b 1f 121 .word 0x016f2818 @ Magic numbers to help the loader 122 .word start @ absolute load/run zImage address 123 .word _edata @ zImage end address 1241: mov r7, r1 @ save architecture ID 125 mov r8, r2 @ save atags pointer 126 127#ifndef __ARM_ARCH_2__ 128 /* 129 * Booting from Angel - need to enter SVC mode and disable 130 * FIQs/IRQs (numeric definitions from angel arm.h source). 131 * We only do this if we were in user mode on entry. 132 */ 133 mrs r2, cpsr @ get current mode 134 tst r2, #3 @ not user? 135 bne not_angel 136 mov r0, #0x17 @ angel_SWIreason_EnterSVC 137 swi 0x123456 @ angel_SWI_ARM 138not_angel: 139 mrs r2, cpsr @ turn off interrupts to 140 orr r2, r2, #0xc0 @ prevent angel from running 141 msr cpsr_c, r2 142#else 143 teqp pc, #0x0c000003 @ turn off interrupts 144#endif 145 146 /* 147 * Note that some cache flushing and other stuff may 148 * be needed here - is there an Angel SWI call for this? 149 */ 150 151 /* 152 * some architecture specific code can be inserted 153 * by the linker here, but it should preserve r7, r8, and r9. 154 */ 155 156 .text 157 adr r0, LC0 158 ldmia r0, {r1, r2, r3, r4, r5, r6, ip, sp} 159 subs r0, r0, r1 @ calculate the delta offset 160 161 @ if delta is zero, we are 162 beq not_relocated @ running at the address we 163 @ were linked at. 164 165 /* 166 * We're running at a different address. We need to fix 167 * up various pointers: 168 * r5 - zImage base address 169 * r6 - GOT start 170 * ip - GOT end 171 */ 172 add r5, r5, r0 173 add r6, r6, r0 174 add ip, ip, r0 175 176#ifndef CONFIG_ZBOOT_ROM 177 /* 178 * If we're running fully PIC === CONFIG_ZBOOT_ROM = n, 179 * we need to fix up pointers into the BSS region. 180 * r2 - BSS start 181 * r3 - BSS end 182 * sp - stack pointer 183 */ 184 add r2, r2, r0 185 add r3, r3, r0 186 add sp, sp, r0 187 188 /* 189 * Relocate all entries in the GOT table. 190 */ 1911: ldr r1, [r6, #0] @ relocate entries in the GOT 192 add r1, r1, r0 @ table. This fixes up the 193 str r1, [r6], #4 @ C references. 194 cmp r6, ip 195 blo 1b 196#else 197 198 /* 199 * Relocate entries in the GOT table. We only relocate 200 * the entries that are outside the (relocated) BSS region. 201 */ 2021: ldr r1, [r6, #0] @ relocate entries in the GOT 203 cmp r1, r2 @ entry < bss_start || 204 cmphs r3, r1 @ _end < entry 205 addlo r1, r1, r0 @ table. This fixes up the 206 str r1, [r6], #4 @ C references. 207 cmp r6, ip 208 blo 1b 209#endif 210 211not_relocated: mov r0, #0 2121: str r0, [r2], #4 @ clear bss 213 str r0, [r2], #4 214 str r0, [r2], #4 215 str r0, [r2], #4 216 cmp r2, r3 217 blo 1b 218 219 /* 220 * The C runtime environment should now be setup 221 * sufficiently. Turn the cache on, set up some 222 * pointers, and start decompressing. 223 */ 224 bl cache_on 225 226 mov r1, sp @ malloc space above stack 227 add r2, sp, #0x10000 @ 64k max 228 229/* 230 * Check to see if we will overwrite ourselves. 231 * r4 = final kernel address 232 * r5 = start of this image 233 * r2 = end of malloc space (and therefore this image) 234 * We basically want: 235 * r4 >= r2 -> OK 236 * r4 + image length <= r5 -> OK 237 */ 238 cmp r4, r2 239 bhs wont_overwrite 240 sub r3, sp, r5 @ > compressed kernel size 241 add r0, r4, r3, lsl #2 @ allow for 4x expansion 242 cmp r0, r5 243 bls wont_overwrite 244 245 mov r5, r2 @ decompress after malloc space 246 mov r0, r5 247 mov r3, r7 248 bl decompress_kernel 249 250 add r0, r0, #127 + 128 @ alignment + stack 251 bic r0, r0, #127 @ align the kernel length 252/* 253 * r0 = decompressed kernel length 254 * r1-r3 = unused 255 * r4 = kernel execution address 256 * r5 = decompressed kernel start 257 * r6 = processor ID 258 * r7 = architecture ID 259 * r8 = atags pointer 260 * r9-r14 = corrupted 261 */ 262 add r1, r5, r0 @ end of decompressed kernel 263 adr r2, reloc_start 264 ldr r3, LC1 265 add r3, r2, r3 2661: ldmia r2!, {r9 - r14} @ copy relocation code 267 stmia r1!, {r9 - r14} 268 ldmia r2!, {r9 - r14} 269 stmia r1!, {r9 - r14} 270 cmp r2, r3 271 blo 1b 272 add sp, r1, #128 @ relocate the stack 273 274 bl cache_clean_flush 275 add pc, r5, r0 @ call relocation code 276 277/* 278 * We're not in danger of overwriting ourselves. Do this the simple way. 279 * 280 * r4 = kernel execution address 281 * r7 = architecture ID 282 */ 283wont_overwrite: mov r0, r4 284 mov r3, r7 285 bl decompress_kernel 286 b call_kernel 287 288 .type LC0, #object 289LC0: .word LC0 @ r1 290 .word __bss_start @ r2 291 .word _end @ r3 292 .word zreladdr @ r4 293 .word _start @ r5 294 .word _got_start @ r6 295 .word _got_end @ ip 296 .word user_stack+4096 @ sp 297LC1: .word reloc_end - reloc_start 298 .size LC0, . - LC0 299 300#ifdef CONFIG_ARCH_RPC 301 .globl params 302params: ldr r0, =params_phys 303 mov pc, lr 304 .ltorg 305 .align 306#endif 307 308/* 309 * Turn on the cache. We need to setup some page tables so that we 310 * can have both the I and D caches on. 311 * 312 * We place the page tables 16k down from the kernel execution address, 313 * and we hope that nothing else is using it. If we're using it, we 314 * will go pop! 315 * 316 * On entry, 317 * r4 = kernel execution address 318 * r6 = processor ID 319 * r7 = architecture number 320 * r8 = atags pointer 321 * r9 = run-time address of "start" (???) 322 * On exit, 323 * r1, r2, r3, r9, r10, r12 corrupted 324 * This routine must preserve: 325 * r4, r5, r6, r7, r8 326 */ 327 .align 5 328cache_on: mov r3, #8 @ cache_on function 329 b call_cache_fn 330 331__armv4_mpu_cache_on: 332 mov r0, #0x3f @ 4G, the whole 333 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting 334 mcr p15, 0, r0, c6, c7, 1 335 336 mov r0, #0x80 @ PR7 337 mcr p15, 0, r0, c2, c0, 0 @ D-cache on 338 mcr p15, 0, r0, c2, c0, 1 @ I-cache on 339 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on 340 341 mov r0, #0xc000 342 mcr p15, 0, r0, c5, c0, 1 @ I-access permission 343 mcr p15, 0, r0, c5, c0, 0 @ D-access permission 344 345 mov r0, #0 346 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 347 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache 348 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache 349 mrc p15, 0, r0, c1, c0, 0 @ read control reg 350 @ ...I .... ..D. WC.M 351 orr r0, r0, #0x002d @ .... .... ..1. 11.1 352 orr r0, r0, #0x1000 @ ...1 .... .... .... 353 354 mcr p15, 0, r0, c1, c0, 0 @ write control reg 355 356 mov r0, #0 357 mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache 358 mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache 359 mov pc, lr 360 361__armv3_mpu_cache_on: 362 mov r0, #0x3f @ 4G, the whole 363 mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting 364 365 mov r0, #0x80 @ PR7 366 mcr p15, 0, r0, c2, c0, 0 @ cache on 367 mcr p15, 0, r0, c3, c0, 0 @ write-buffer on 368 369 mov r0, #0xc000 370 mcr p15, 0, r0, c5, c0, 0 @ access permission 371 372 mov r0, #0 373 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 374 mrc p15, 0, r0, c1, c0, 0 @ read control reg 375 @ .... .... .... WC.M 376 orr r0, r0, #0x000d @ .... .... .... 11.1 377 mov r0, #0 378 mcr p15, 0, r0, c1, c0, 0 @ write control reg 379 380 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 381 mov pc, lr 382 383__setup_mmu: sub r3, r4, #16384 @ Page directory size 384 bic r3, r3, #0xff @ Align the pointer 385 bic r3, r3, #0x3f00 386/* 387 * Initialise the page tables, turning on the cacheable and bufferable 388 * bits for the RAM area only. 389 */ 390 mov r0, r3 391 mov r9, r0, lsr #18 392 mov r9, r9, lsl #18 @ start of RAM 393 add r10, r9, #0x10000000 @ a reasonable RAM size 394 mov r1, #0x12 395 orr r1, r1, #3 << 10 396 add r2, r3, #16384 3971: cmp r1, r9 @ if virt > start of RAM 398 orrhs r1, r1, #0x0c @ set cacheable, bufferable 399 cmp r1, r10 @ if virt > end of RAM 400 bichs r1, r1, #0x0c @ clear cacheable, bufferable 401 str r1, [r0], #4 @ 1:1 mapping 402 add r1, r1, #1048576 403 teq r0, r2 404 bne 1b 405/* 406 * If ever we are running from Flash, then we surely want the cache 407 * to be enabled also for our execution instance... We map 2MB of it 408 * so there is no map overlap problem for up to 1 MB compressed kernel. 409 * If the execution is in RAM then we would only be duplicating the above. 410 */ 411 mov r1, #0x1e 412 orr r1, r1, #3 << 10 413 mov r2, pc, lsr #20 414 orr r1, r1, r2, lsl #20 415 add r0, r3, r2, lsl #2 416 str r1, [r0], #4 417 add r1, r1, #1048576 418 str r1, [r0] 419 mov pc, lr 420 421__armv4_mmu_cache_on: 422 mov r12, lr 423 bl __setup_mmu 424 mov r0, #0 425 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 426 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs 427 mrc p15, 0, r0, c1, c0, 0 @ read control reg 428 orr r0, r0, #0x5000 @ I-cache enable, RR cache replacement 429 orr r0, r0, #0x0030 430 bl __common_mmu_cache_on 431 mov r0, #0 432 mcr p15, 0, r0, c8, c7, 0 @ flush I,D TLBs 433 mov pc, r12 434 435__arm6_mmu_cache_on: 436 mov r12, lr 437 bl __setup_mmu 438 mov r0, #0 439 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 440 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3 441 mov r0, #0x30 442 bl __common_mmu_cache_on 443 mov r0, #0 444 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3 445 mov pc, r12 446 447__common_mmu_cache_on: 448#ifndef DEBUG 449 orr r0, r0, #0x000d @ Write buffer, mmu 450#endif 451 mov r1, #-1 452 mcr p15, 0, r3, c2, c0, 0 @ load page table pointer 453 mcr p15, 0, r1, c3, c0, 0 @ load domain access control 454 b 1f 455 .align 5 @ cache line aligned 4561: mcr p15, 0, r0, c1, c0, 0 @ load control register 457 mrc p15, 0, r0, c1, c0, 0 @ and read it back to 458 sub pc, lr, r0, lsr #32 @ properly flush pipeline 459 460/* 461 * All code following this line is relocatable. It is relocated by 462 * the above code to the end of the decompressed kernel image and 463 * executed there. During this time, we have no stacks. 464 * 465 * r0 = decompressed kernel length 466 * r1-r3 = unused 467 * r4 = kernel execution address 468 * r5 = decompressed kernel start 469 * r6 = processor ID 470 * r7 = architecture ID 471 * r8 = atags pointer 472 * r9-r14 = corrupted 473 */ 474 .align 5 475reloc_start: add r9, r5, r0 476 sub r9, r9, #128 @ do not copy the stack 477 debug_reloc_start 478 mov r1, r4 4791: 480 .rept 4 481 ldmia r5!, {r0, r2, r3, r10 - r14} @ relocate kernel 482 stmia r1!, {r0, r2, r3, r10 - r14} 483 .endr 484 485 cmp r5, r9 486 blo 1b 487 add sp, r1, #128 @ relocate the stack 488 debug_reloc_end 489 490call_kernel: bl cache_clean_flush 491 bl cache_off 492 mov r0, #0 @ must be zero 493 mov r1, r7 @ restore architecture number 494 mov r2, r8 @ restore atags pointer 495 mov pc, r4 @ call kernel 496 497/* 498 * Here follow the relocatable cache support functions for the 499 * various processors. This is a generic hook for locating an 500 * entry and jumping to an instruction at the specified offset 501 * from the start of the block. Please note this is all position 502 * independent code. 503 * 504 * r1 = corrupted 505 * r2 = corrupted 506 * r3 = block offset 507 * r6 = corrupted 508 * r12 = corrupted 509 */ 510 511call_cache_fn: adr r12, proc_types 512#ifdef CONFIG_CPU_CP15 513 mrc p15, 0, r6, c0, c0 @ get processor ID 514#else 515 ldr r6, =CONFIG_PROCESSOR_ID 516#endif 5171: ldr r1, [r12, #0] @ get value 518 ldr r2, [r12, #4] @ get mask 519 eor r1, r1, r6 @ (real ^ match) 520 tst r1, r2 @ & mask 521 addeq pc, r12, r3 @ call cache function 522 add r12, r12, #4*5 523 b 1b 524 525/* 526 * Table for cache operations. This is basically: 527 * - CPU ID match 528 * - CPU ID mask 529 * - 'cache on' method instruction 530 * - 'cache off' method instruction 531 * - 'cache flush' method instruction 532 * 533 * We match an entry using: ((real_id ^ match) & mask) == 0 534 * 535 * Writethrough caches generally only need 'on' and 'off' 536 * methods. Writeback caches _must_ have the flush method 537 * defined. 538 */ 539 .type proc_types,#object 540proc_types: 541 .word 0x41560600 @ ARM6/610 542 .word 0xffffffe0 543 b __arm6_mmu_cache_off @ works, but slow 544 b __arm6_mmu_cache_off 545 mov pc, lr 546@ b __arm6_mmu_cache_on @ untested 547@ b __arm6_mmu_cache_off 548@ b __armv3_mmu_cache_flush 549 550 .word 0x00000000 @ old ARM ID 551 .word 0x0000f000 552 mov pc, lr 553 mov pc, lr 554 mov pc, lr 555 556 .word 0x41007000 @ ARM7/710 557 .word 0xfff8fe00 558 b __arm7_mmu_cache_off 559 b __arm7_mmu_cache_off 560 mov pc, lr 561 562 .word 0x41807200 @ ARM720T (writethrough) 563 .word 0xffffff00 564 b __armv4_mmu_cache_on 565 b __armv4_mmu_cache_off 566 mov pc, lr 567 568 .word 0x41007400 @ ARM74x 569 .word 0xff00ff00 570 b __armv3_mpu_cache_on 571 b __armv3_mpu_cache_off 572 b __armv3_mpu_cache_flush 573 574 .word 0x41009400 @ ARM94x 575 .word 0xff00ff00 576 b __armv4_mpu_cache_on 577 b __armv4_mpu_cache_off 578 b __armv4_mpu_cache_flush 579 580 .word 0x00007000 @ ARM7 IDs 581 .word 0x0000f000 582 mov pc, lr 583 mov pc, lr 584 mov pc, lr 585 586 @ Everything from here on will be the new ID system. 587 588 .word 0x4401a100 @ sa110 / sa1100 589 .word 0xffffffe0 590 b __armv4_mmu_cache_on 591 b __armv4_mmu_cache_off 592 b __armv4_mmu_cache_flush 593 594 .word 0x6901b110 @ sa1110 595 .word 0xfffffff0 596 b __armv4_mmu_cache_on 597 b __armv4_mmu_cache_off 598 b __armv4_mmu_cache_flush 599 600 @ These match on the architecture ID 601 602 .word 0x00020000 @ ARMv4T 603 .word 0x000f0000 604 b __armv4_mmu_cache_on 605 b __armv4_mmu_cache_off 606 b __armv4_mmu_cache_flush 607 608 .word 0x00050000 @ ARMv5TE 609 .word 0x000f0000 610 b __armv4_mmu_cache_on 611 b __armv4_mmu_cache_off 612 b __armv4_mmu_cache_flush 613 614 .word 0x00060000 @ ARMv5TEJ 615 .word 0x000f0000 616 b __armv4_mmu_cache_on 617 b __armv4_mmu_cache_off 618 b __armv4_mmu_cache_flush 619 620 .word 0x0007b000 @ ARMv6 621 .word 0x0007f000 622 b __armv4_mmu_cache_on 623 b __armv4_mmu_cache_off 624 b __armv6_mmu_cache_flush 625 626 .word 0 @ unrecognised type 627 .word 0 628 mov pc, lr 629 mov pc, lr 630 mov pc, lr 631 632 .size proc_types, . - proc_types 633 634/* 635 * Turn off the Cache and MMU. ARMv3 does not support 636 * reading the control register, but ARMv4 does. 637 * 638 * On entry, r6 = processor ID 639 * On exit, r0, r1, r2, r3, r12 corrupted 640 * This routine must preserve: r4, r6, r7 641 */ 642 .align 5 643cache_off: mov r3, #12 @ cache_off function 644 b call_cache_fn 645 646__armv4_mpu_cache_off: 647 mrc p15, 0, r0, c1, c0 648 bic r0, r0, #0x000d 649 mcr p15, 0, r0, c1, c0 @ turn MPU and cache off 650 mov r0, #0 651 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer 652 mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache 653 mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache 654 mov pc, lr 655 656__armv3_mpu_cache_off: 657 mrc p15, 0, r0, c1, c0 658 bic r0, r0, #0x000d 659 mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off 660 mov r0, #0 661 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 662 mov pc, lr 663 664__armv4_mmu_cache_off: 665 mrc p15, 0, r0, c1, c0 666 bic r0, r0, #0x000d 667 mcr p15, 0, r0, c1, c0 @ turn MMU and cache off 668 mov r0, #0 669 mcr p15, 0, r0, c7, c7 @ invalidate whole cache v4 670 mcr p15, 0, r0, c8, c7 @ invalidate whole TLB v4 671 mov pc, lr 672 673__arm6_mmu_cache_off: 674 mov r0, #0x00000030 @ ARM6 control reg. 675 b __armv3_mmu_cache_off 676 677__arm7_mmu_cache_off: 678 mov r0, #0x00000070 @ ARM7 control reg. 679 b __armv3_mmu_cache_off 680 681__armv3_mmu_cache_off: 682 mcr p15, 0, r0, c1, c0, 0 @ turn MMU and cache off 683 mov r0, #0 684 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 685 mcr p15, 0, r0, c5, c0, 0 @ invalidate whole TLB v3 686 mov pc, lr 687 688/* 689 * Clean and flush the cache to maintain consistency. 690 * 691 * On entry, 692 * r6 = processor ID 693 * On exit, 694 * r1, r2, r3, r11, r12 corrupted 695 * This routine must preserve: 696 * r0, r4, r5, r6, r7 697 */ 698 .align 5 699cache_clean_flush: 700 mov r3, #16 701 b call_cache_fn 702 703__armv4_mpu_cache_flush: 704 mov r2, #1 705 mov r3, #0 706 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 707 mov r1, #7 << 5 @ 8 segments 7081: orr r3, r1, #63 << 26 @ 64 entries 7092: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index 710 subs r3, r3, #1 << 26 711 bcs 2b @ entries 63 to 0 712 subs r1, r1, #1 << 5 713 bcs 1b @ segments 7 to 0 714 715 teq r2, #0 716 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 717 mcr p15, 0, ip, c7, c10, 4 @ drain WB 718 mov pc, lr 719 720 721__armv6_mmu_cache_flush: 722 mov r1, #0 723 mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D 724 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB 725 mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified 726 mcr p15, 0, r1, c7, c10, 4 @ drain WB 727 mov pc, lr 728 729__armv4_mmu_cache_flush: 730 mov r2, #64*1024 @ default: 32K dcache size (*2) 731 mov r11, #32 @ default: 32 byte line size 732 mrc p15, 0, r3, c0, c0, 1 @ read cache type 733 teq r3, r6 @ cache ID register present? 734 beq no_cache_id 735 mov r1, r3, lsr #18 736 and r1, r1, #7 737 mov r2, #1024 738 mov r2, r2, lsl r1 @ base dcache size *2 739 tst r3, #1 << 14 @ test M bit 740 addne r2, r2, r2, lsr #1 @ +1/2 size if M == 1 741 mov r3, r3, lsr #12 742 and r3, r3, #3 743 mov r11, #8 744 mov r11, r11, lsl r3 @ cache line size in bytes 745no_cache_id: 746 bic r1, pc, #63 @ align to longest cache line 747 add r2, r1, r2 7481: ldr r3, [r1], r11 @ s/w flush D cache 749 teq r1, r2 750 bne 1b 751 752 mcr p15, 0, r1, c7, c5, 0 @ flush I cache 753 mcr p15, 0, r1, c7, c6, 0 @ flush D cache 754 mcr p15, 0, r1, c7, c10, 4 @ drain WB 755 mov pc, lr 756 757__armv3_mmu_cache_flush: 758__armv3_mpu_cache_flush: 759 mov r1, #0 760 mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3 761 mov pc, lr 762 763/* 764 * Various debugging routines for printing hex characters and 765 * memory, which again must be relocatable. 766 */ 767#ifdef DEBUG 768 .type phexbuf,#object 769phexbuf: .space 12 770 .size phexbuf, . - phexbuf 771 772phex: adr r3, phexbuf 773 mov r2, #0 774 strb r2, [r3, r1] 7751: subs r1, r1, #1 776 movmi r0, r3 777 bmi puts 778 and r2, r0, #15 779 mov r0, r0, lsr #4 780 cmp r2, #10 781 addge r2, r2, #7 782 add r2, r2, #'0' 783 strb r2, [r3, r1] 784 b 1b 785 786puts: loadsp r3 7871: ldrb r2, [r0], #1 788 teq r2, #0 789 moveq pc, lr 7902: writeb r2, r3 791 mov r1, #0x00020000 7923: subs r1, r1, #1 793 bne 3b 794 teq r2, #'\n' 795 moveq r2, #'\r' 796 beq 2b 797 teq r0, #0 798 bne 1b 799 mov pc, lr 800putc: 801 mov r2, r0 802 mov r0, #0 803 loadsp r3 804 b 2b 805 806memdump: mov r12, r0 807 mov r10, lr 808 mov r11, #0 8092: mov r0, r11, lsl #2 810 add r0, r0, r12 811 mov r1, #8 812 bl phex 813 mov r0, #':' 814 bl putc 8151: mov r0, #' ' 816 bl putc 817 ldr r0, [r12, r11, lsl #2] 818 mov r1, #8 819 bl phex 820 and r0, r11, #7 821 teq r0, #3 822 moveq r0, #' ' 823 bleq putc 824 and r0, r11, #7 825 add r11, r11, #1 826 teq r0, #7 827 bne 1b 828 mov r0, #'\n' 829 bl putc 830 cmp r11, #64 831 blt 2b 832 mov pc, r10 833#endif 834 835 .ltorg 836reloc_end: 837 838 .align 839 .section ".stack", "w" 840user_stack: .space 4096 841