locore-v4.S revision 261039
1/* $NetBSD: locore.S,v 1.14 2003/04/20 16:21:40 thorpej Exp $ */ 2 3/*- 4 * Copyright 2011 Semihalf 5 * Copyright (C) 1994-1997 Mark Brinicombe 6 * Copyright (C) 1994 Brini 7 * All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed by Brini. 20 * 4. The name of Brini may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL BRINI BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 28 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 29 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 30 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 31 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 32 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 */ 35 36#include "assym.s" 37#include <sys/syscall.h> 38#include <machine/asm.h> 39#include <machine/armreg.h> 40#include <machine/pte.h> 41 42__FBSDID("$FreeBSD: head/sys/arm/arm/locore.S 261039 2014-01-22 21:23:58Z imp $"); 43 44/* What size should this really be ? It is only used by initarm() */ 45#define INIT_ARM_STACK_SIZE (2048 * 4) 46 47#define CPWAIT_BRANCH \ 48 sub pc, pc, #4 49 50#define CPWAIT(tmp) \ 51 mrc p15, 0, tmp, c2, c0, 0 /* arbitrary read of CP15 */ ;\ 52 mov tmp, tmp /* wait for it to complete */ ;\ 53 CPWAIT_BRANCH /* branch to next insn */ 54 55/* 56 * This is for kvm_mkdb, and should be the address of the beginning 57 * of the kernel text segment (not necessarily the same as kernbase). 58 */ 59 .text 60 .align 0 61.globl kernbase 62.set kernbase,KERNBASE 63.globl physaddr 64.set physaddr,PHYSADDR 65 66/* 67 * On entry for FreeBSD boot ABI: 68 * r0 - metadata pointer or 0 (boothowto on AT91's boot2) 69 * r1 - if (r0 == 0) then metadata pointer 70 * On entry for Linux boot ABI: 71 * r0 - 0 72 * r1 - machine type (passed as arg2 to initarm) 73 * r2 - Pointer to a tagged list or dtb image (phys addr) (passed as arg1 initarm) 74 * 75 * For both types of boot we gather up the args, put them in a struct arm_boot_params 76 * structure and pass that to initarm. 77 */ 78ENTRY_NP(btext) 79ASENTRY_NP(_start) 80 STOP_UNWINDING /* Can't unwind into the bootloader! */ 81 82 mov r9, r0 /* 0 or boot mode from boot2 */ 83 mov r8, r1 /* Save Machine type */ 84 mov ip, r2 /* Save meta data */ 85 mov fp, r3 /* Future expantion */ 86 87 /* Make sure interrupts are disabled. */ 88 mrs r7, cpsr 89 orr r7, r7, #(I32_bit|F32_bit) 90 msr cpsr_c, r7 91 92#if defined (FLASHADDR) && defined(LOADERRAMADDR) 93 /* Check if we're running from flash. */ 94 ldr r7, =FLASHADDR 95 /* 96 * If we're running with MMU disabled, test against the 97 * physical address instead. 98 */ 99 mrc p15, 0, r2, c1, c0, 0 100 ands r2, r2, #CPU_CONTROL_MMU_ENABLE 101 ldreq r6, =PHYSADDR 102 ldrne r6, =LOADERRAMADDR 103 cmp r7, r6 104 bls flash_lower 105 cmp r7, pc 106 bhi from_ram 107 b do_copy 108 109flash_lower: 110 cmp r6, pc 111 bls from_ram 112do_copy: 113 ldr r7, =KERNBASE 114 adr r1, _start 115 ldr r0, Lreal_start 116 ldr r2, Lend 117 sub r2, r2, r0 118 sub r0, r0, r7 119 add r0, r0, r6 120 mov r4, r0 121 bl memcpy 122 ldr r0, Lram_offset 123 add pc, r4, r0 124Lram_offset: .word from_ram-_C_LABEL(_start) 125from_ram: 126 nop 127#endif 128 adr r7, Lunmapped 129 bic r7, r7, #0xf0000000 130 orr r7, r7, #PHYSADDR 131 132 133disable_mmu: 134 /* Disable MMU for a while */ 135 mrc p15, 0, r2, c1, c0, 0 136 bic r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\ 137 CPU_CONTROL_WBUF_ENABLE) 138 bic r2, r2, #(CPU_CONTROL_IC_ENABLE) 139 bic r2, r2, #(CPU_CONTROL_BPRD_ENABLE) 140 mcr p15, 0, r2, c1, c0, 0 141 142 nop 143 nop 144 nop 145 mov pc, r7 146Lunmapped: 147#ifdef STARTUP_PAGETABLE_ADDR 148 /* build page table from scratch */ 149 ldr r0, Lstartup_pagetable 150 adr r4, mmu_init_table 151 b 3f 152 1532: 154 str r3, [r0, r2] 155 add r2, r2, #4 156 add r3, r3, #(L1_S_SIZE) 157 adds r1, r1, #-1 158 bhi 2b 1593: 160 ldmia r4!, {r1,r2,r3} /* # of sections, VA, PA|attr */ 161 cmp r1, #0 162 adrne r5, 2b 163 bicne r5, r5, #0xf0000000 164 orrne r5, r5, #PHYSADDR 165 movne pc, r5 166 167#if defined(SMP) 168 orr r0, r0, #2 /* Set TTB shared memory flag */ 169#endif 170 mcr p15, 0, r0, c2, c0, 0 /* Set TTB */ 171 mcr p15, 0, r0, c8, c7, 0 /* Flush TLB */ 172 173#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_CORTEXA) || defined(CPU_MV_PJ4B) || defined(CPU_KRAIT) 174 mov r0, #0 175 mcr p15, 0, r0, c13, c0, 1 /* Set ASID to 0 */ 176#endif 177 178 /* Set the Domain Access register. Very important! */ 179 mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT) 180 mcr p15, 0, r0, c3, c0, 0 181 /* 182 * Enable MMU. 183 * On armv6 enable extended page tables, and set alignment checking 184 * to modulo-4 (CPU_CONTROL_UNAL_ENABLE) for the ldrd/strd 185 * instructions emitted by clang. 186 */ 187 mrc p15, 0, r0, c1, c0, 0 188#ifdef _ARM_ARCH_6 189 orr r0, r0, #(CPU_CONTROL_V6_EXTPAGE | CPU_CONTROL_UNAL_ENABLE) 190 orr r0, r0, #(CPU_CONTROL_AFLT_ENABLE) 191 orr r0, r0, #(CPU_CONTROL_AF_ENABLE) 192#endif 193 orr r0, r0, #(CPU_CONTROL_MMU_ENABLE) 194 mcr p15, 0, r0, c1, c0, 0 195 nop 196 nop 197 nop 198 CPWAIT(r0) 199 200#endif 201mmu_done: 202 nop 203 adr r1, .Lstart 204 ldmia r1, {r1, r2, sp} /* Set initial stack and */ 205 sub r2, r2, r1 /* get zero init data */ 206 mov r3, #0 207.L1: 208 str r3, [r1], #0x0004 /* get zero init data */ 209 subs r2, r2, #4 210 bgt .L1 211 ldr pc, .Lvirt_done 212 213virt_done: 214 mov r1, #20 /* loader info size is 20 bytes also second arg */ 215 subs sp, sp, r1 /* allocate arm_boot_params struct on stack */ 216 bic sp, sp, #7 /* align stack to 8 bytes */ 217 mov r0, sp /* loader info pointer is first arg */ 218 str r1, [r0] /* Store length of loader info */ 219 str r9, [r0, #4] /* Store r0 from boot loader */ 220 str r8, [r0, #8] /* Store r1 from boot loader */ 221 str ip, [r0, #12] /* store r2 from boot loader */ 222 str fp, [r0, #16] /* store r3 from boot loader */ 223 mov fp, #0 /* trace back starts here */ 224 bl _C_LABEL(initarm) /* Off we go */ 225 226 /* init arm will return the new stack pointer. */ 227 mov sp, r0 228 229 bl _C_LABEL(mi_startup) /* call mi_startup()! */ 230 231 adr r0, .Lmainreturned 232 b _C_LABEL(panic) 233 /* NOTREACHED */ 234#ifdef STARTUP_PAGETABLE_ADDR 235#define MMU_INIT(va,pa,n_sec,attr) \ 236 .word n_sec ; \ 237 .word 4*((va)>>L1_S_SHIFT) ; \ 238 .word (pa)|(attr) ; 239 240Lvirtaddr: 241 .word KERNVIRTADDR 242Lphysaddr: 243 .word KERNPHYSADDR 244Lreal_start: 245 .word _start 246Lend: 247 .word _edata 248Lstartup_pagetable: 249 .word STARTUP_PAGETABLE_ADDR 250#ifdef SMP 251Lstartup_pagetable_secondary: 252 .word temp_pagetable 253#endif 254END(btext) 255END(_start) 256 257mmu_init_table: 258 /* fill all table VA==PA */ 259 /* map SDRAM VA==PA, WT cacheable */ 260#if !defined(SMP) 261 MMU_INIT(PHYSADDR, PHYSADDR, 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)) 262 /* map VA 0xc0000000..0xc3ffffff to PA */ 263 MMU_INIT(KERNBASE, PHYSADDR, 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)) 264#if defined(SOCDEV_PA) && defined(SOCKDEV_VA) 265 /* Map in 0x04000000 worth of the SoC's devices for bootstrap debugging */ 266 MMU_INIT(SOCKDEV_VA, SOCDEV_PA, 64, L1_TYPE_S|L1_S_C|L1_S_AP(AP_KRW)) 267#endif 268#else 269 MMU_INIT(PHYSADDR, PHYSADDR , 64, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW)) 270 /* map VA 0xc0000000..0xc3ffffff to PA */ 271 MMU_INIT(KERNBASE, PHYSADDR, 64, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW)) 272 MMU_INIT(0x48000000, 0x48000000, 1, L1_TYPE_S|L1_SHARED|L1_S_C|L1_S_AP(AP_KRW)) 273#endif /* SMP */ 274 .word 0 /* end of table */ 275#endif 276.Lstart: 277 .word _edata 278 .word _end 279 .word svcstk + INIT_ARM_STACK_SIZE 280 281.Lvirt_done: 282 .word virt_done 283#if defined(SMP) 284.Lmpvirt_done: 285 .word mpvirt_done 286#endif 287 288.Lmainreturned: 289 .asciz "main() returned" 290 .align 0 291 292 .bss 293svcstk: 294 .space INIT_ARM_STACK_SIZE 295 296 .text 297 .align 0 298 299.Lcpufuncs: 300 .word _C_LABEL(cpufuncs) 301 302#if defined(SMP) 303Lsramaddr: 304 .word 0xffff0080 305 306#if 0 307#define AP_DEBUG(tmp) \ 308 mrc p15, 0, r1, c0, c0, 5; \ 309 ldr r0, Lsramaddr; \ 310 add r0, r1, lsl #2; \ 311 mov r1, tmp; \ 312 str r1, [r0], #0x0000; 313#else 314#define AP_DEBUG(tmp) 315#endif 316 317 318ASENTRY_NP(mptramp) 319 mov r0, #0 320 mcr p15, 0, r0, c7, c7, 0 321 322 AP_DEBUG(#1) 323 324 mrs r3, cpsr_all 325 bic r3, r3, #(PSR_MODE) 326 orr r3, r3, #(PSR_SVC32_MODE) 327 msr cpsr_all, r3 328 329 mrc p15, 0, r0, c0, c0, 5 330 and r0, #0x0f /* Get CPU ID */ 331 332 /* Read boot address for CPU */ 333 mov r1, #0x100 334 mul r2, r0, r1 335 ldr r1, Lpmureg 336 add r0, r2, r1 337 ldr r1, [r0], #0x00 338 339 mov pc, r1 340 341Lpmureg: 342 .word 0xd0022124 343END(mptramp) 344 345ASENTRY_NP(mpentry) 346 347 AP_DEBUG(#2) 348 349 /* Make sure interrupts are disabled. */ 350 mrs r7, cpsr 351 orr r7, r7, #(I32_bit|F32_bit) 352 msr cpsr_c, r7 353 354 355 adr r7, Ltag 356 bic r7, r7, #0xf0000000 357 orr r7, r7, #PHYSADDR 358 359 /* Disable MMU for a while */ 360 mrc p15, 0, r2, c1, c0, 0 361 bic r2, r2, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE |\ 362 CPU_CONTROL_WBUF_ENABLE) 363 bic r2, r2, #(CPU_CONTROL_IC_ENABLE) 364 bic r2, r2, #(CPU_CONTROL_BPRD_ENABLE) 365 mcr p15, 0, r2, c1, c0, 0 366 367 nop 368 nop 369 nop 370 371 AP_DEBUG(#3) 372 373Ltag: 374 ldr r0, Lstartup_pagetable_secondary 375 bic r0, r0, #0xf0000000 376 orr r0, r0, #PHYSADDR 377 ldr r0, [r0] 378#if defined(SMP) 379 orr r0, r0, #0 /* Set TTB shared memory flag */ 380#endif 381 mcr p15, 0, r0, c2, c0, 0 /* Set TTB */ 382 mcr p15, 0, r0, c8, c7, 0 /* Flush TLB */ 383 384#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA) || defined(CPU_KRAIT) 385 mov r0, #0 386 mcr p15, 0, r0, c13, c0, 1 /* Set ASID to 0 */ 387#endif 388 389 AP_DEBUG(#4) 390 391 /* Set the Domain Access register. Very important! */ 392 mov r0, #((DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL*2)) | DOMAIN_CLIENT) 393 mcr p15, 0, r0, c3, c0, 0 394 /* Enable MMU */ 395 mrc p15, 0, r0, c1, c0, 0 396#if defined(CPU_ARM1136) || defined(CPU_ARM1176) || defined(CPU_MV_PJ4B) || defined(CPU_CORTEXA) || defined(CPU_KRAIT) 397 orr r0, r0, #CPU_CONTROL_V6_EXTPAGE 398 orr r0, r0, #CPU_CONTROL_AF_ENABLE 399#endif 400 orr r0, r0, #(CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_DC_ENABLE) 401 mcr p15, 0, r0, c1, c0, 0 402 nop 403 nop 404 nop 405 CPWAIT(r0) 406 407 adr r1, .Lstart 408 ldmia r1, {r1, r2, sp} /* Set initial stack and */ 409 mrc p15, 0, r0, c0, c0, 5 410 and r0, r0, #15 411 mov r1, #2048 412 mul r2, r1, r0 413 sub sp, sp, r2 414 str r1, [sp] 415 ldr pc, .Lmpvirt_done 416 417mpvirt_done: 418 419 mov fp, #0 /* trace back starts here */ 420 bl _C_LABEL(init_secondary) /* Off we go */ 421 422 adr r0, .Lmpreturned 423 b _C_LABEL(panic) 424 /* NOTREACHED */ 425 426.Lmpreturned: 427 .asciz "main() returned" 428 .align 0 429END(mpentry) 430#endif 431 432ENTRY_NP(cpu_halt) 433 mrs r2, cpsr 434 bic r2, r2, #(PSR_MODE) 435 orr r2, r2, #(PSR_SVC32_MODE) 436 orr r2, r2, #(I32_bit | F32_bit) 437 msr cpsr_all, r2 438 439 ldr r4, .Lcpu_reset_address 440 ldr r4, [r4] 441 442 ldr r0, .Lcpufuncs 443 mov lr, pc 444 ldr pc, [r0, #CF_IDCACHE_WBINV_ALL] 445 mov lr, pc 446 ldr pc, [r0, #CF_L2CACHE_WBINV_ALL] 447 448 /* 449 * Load the cpu_reset_needs_v4_MMU_disable flag to determine if it's 450 * necessary. 451 */ 452 453 ldr r1, .Lcpu_reset_needs_v4_MMU_disable 454 ldr r1, [r1] 455 cmp r1, #0 456 mov r2, #0 457 458 /* 459 * MMU & IDC off, 32 bit program & data space 460 * Hurl ourselves into the ROM 461 */ 462 mov r0, #(CPU_CONTROL_32BP_ENABLE | CPU_CONTROL_32BD_ENABLE) 463 mcr 15, 0, r0, c1, c0, 0 464 mcrne 15, 0, r2, c8, c7, 0 /* nail I+D TLB on ARMv4 and greater */ 465 mov pc, r4 466 467 /* 468 * _cpu_reset_address contains the address to branch to, to complete 469 * the cpu reset after turning the MMU off 470 * This variable is provided by the hardware specific code 471 */ 472.Lcpu_reset_address: 473 .word _C_LABEL(cpu_reset_address) 474 475 /* 476 * cpu_reset_needs_v4_MMU_disable contains a flag that signals if the 477 * v4 MMU disable instruction needs executing... it is an illegal instruction 478 * on f.e. ARM6/7 that locks up the computer in an endless illegal 479 * instruction / data-abort / reset loop. 480 */ 481.Lcpu_reset_needs_v4_MMU_disable: 482 .word _C_LABEL(cpu_reset_needs_v4_MMU_disable) 483END(cpu_halt) 484 485 486/* 487 * setjump + longjmp 488 */ 489ENTRY(setjmp) 490 stmia r0, {r4-r14} 491 mov r0, #0x00000000 492 RET 493END(setjmp) 494 495ENTRY(longjmp) 496 ldmia r0, {r4-r14} 497 mov r0, #0x00000001 498 RET 499END(longjmp) 500 501 .data 502 .global _C_LABEL(esym) 503_C_LABEL(esym): .word _C_LABEL(end) 504 505ENTRY_NP(abort) 506 b _C_LABEL(abort) 507END(abort) 508 509ENTRY_NP(sigcode) 510 mov r0, sp 511 512 /* 513 * Call the sigreturn system call. 514 * 515 * We have to load r7 manually rather than using 516 * "ldr r7, =SYS_sigreturn" to ensure the value of szsigcode is 517 * correct. Using the alternative places esigcode at the address 518 * of the data rather than the address one past the data. 519 */ 520 521 ldr r7, [pc, #12] /* Load SYS_sigreturn */ 522 swi SYS_sigreturn 523 524 /* Well if that failed we better exit quick ! */ 525 526 ldr r7, [pc, #8] /* Load SYS_exit */ 527 swi SYS_exit 528 529 /* Branch back to retry SYS_sigreturn */ 530 b . - 16 531 532 .word SYS_sigreturn 533 .word SYS_exit 534 535 .align 0 536 .global _C_LABEL(esigcode) 537 _C_LABEL(esigcode): 538 539 .data 540 .global szsigcode 541szsigcode: 542 .long esigcode-sigcode 543END(sigcode) 544/* End of locore.S */ 545