1/* Modified by Broadcom Corp. Portions Copyright (c) Broadcom Corp, 2012. */ 2/* 3 * linux/arch/arm/kernel/head.S 4 * 5 * Copyright (C) 1994-2002 Russell King 6 * Copyright (c) 2003 ARM Limited 7 * All Rights Reserved 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 * 13 * Kernel startup code for all 32-bit CPUs 14 */ 15#include <linux/linkage.h> 16#include <linux/init.h> 17 18#include <asm/assembler.h> 19#include <asm/domain.h> 20#include <asm/ptrace.h> 21#include <asm/asm-offsets.h> 22#include <asm/memory.h> 23#include <asm/thread_info.h> 24#include <asm/system.h> 25 26#if (PHYS_OFFSET & 0x001fffff) 27#error "PHYS_OFFSET must be at an even 2MiB boundary!" 28#endif 29 30#define KERNEL_RAM_VADDR (PAGE_OFFSET + TEXT_OFFSET) 31#define KERNEL_RAM_PADDR (PHYS_OFFSET + TEXT_OFFSET) 32 33 34/* 35 * swapper_pg_dir is the virtual address of the initial page table. 36 * We place the page tables 16K below KERNEL_RAM_VADDR. Therefore, we must 37 * make sure that KERNEL_RAM_VADDR is correctly set. Currently, we expect 38 * the least significant 16 bits to be 0x8000, but we could probably 39 * relax this restriction to KERNEL_RAM_VADDR >= PAGE_OFFSET + 0x4000. 40 */ 41#if (KERNEL_RAM_VADDR & 0xffff) != 0x8000 42#error KERNEL_RAM_VADDR must start at 0xXXXX8000 43#endif 44 45 .globl swapper_pg_dir 46 .equ swapper_pg_dir, KERNEL_RAM_VADDR - 0x4000 47 48 .macro pgtbl, rd 49 ldr \rd, =(KERNEL_RAM_PADDR - 0x4000) 50 .endm 51 52#ifdef CONFIG_XIP_KERNEL 53#define KERNEL_START XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR) 54#define KERNEL_END _edata_loc 55#else 56#define KERNEL_START KERNEL_RAM_VADDR 57#define KERNEL_END _end 58#endif 59 60/* 61 * Kernel startup entry point. 62 * --------------------------- 63 * 64 * This is normally called from the decompressor code. The requirements 65 * are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0, 66 * r1 = machine nr, r2 = atags pointer. 67 * 68 * This code is mostly position independent, so if you link the kernel at 69 * 0xc0008000, you call this at __pa(0xc0008000). 70 * 71 * See linux/arch/arm/tools/mach-types for the complete list of machine 72 * numbers for r1. 73 * 74 * We're trying to keep crap to a minimum; DO NOT add any machine specific 75 * crap here - that's what the boot loader (or in extreme, well justified 76 * circumstances, zImage) is for. 77 */ 78 __HEAD 79ENTRY(stext) 80 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode 81 @ and irqs disabled 82#ifdef CONFIG_ARCH_HAS_HEAD_FIXUP 83 bl __mach_head_fixup 84#endif 85 mrc p15, 0, r9, c0, c0 @ get processor id 86 bl __lookup_processor_type @ r5=procinfo r9=cpuid 87 movs r10, r5 @ invalid processor (r5=0)? 88 beq __error_p @ yes, error 'p' 89 bl __lookup_machine_type @ r5=machinfo 90 movs r8, r5 @ invalid machine (r5=0)? 91 beq __error_a @ yes, error 'a' 92 bl __vet_atags 93 bl __create_page_tables 94 95 /* 96 * The following calls CPU specific code in a position independent 97 * manner. See arch/arm/mm/proc-*.S for details. r10 = base of 98 * xxx_proc_info structure selected by __lookup_machine_type 99 * above. On return, the CPU will be ready for the MMU to be 100 * turned on, and r0 will hold the CPU control register value. 101 */ 102 ldr r13, __switch_data @ address to jump to after 103 @ mmu has been enabled 104 adr lr, BSYM(__enable_mmu) @ return (PIC) address 105 ARM( add pc, r10, #PROCINFO_INITFUNC ) 106 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 107 THUMB( mov pc, r12 ) 108ENDPROC(stext) 109 110#if defined(CONFIG_SMP) 111ENTRY(secondary_startup) 112 /* 113 * Common entry point for secondary CPUs. 114 * 115 * Ensure that we're in SVC mode, and IRQs are disabled. Lookup 116 * the processor type - there is no need to check the machine type 117 * as it has already been validated by the primary processor. 118 */ 119 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 120 mrc p15, 0, r9, c0, c0 @ get processor id 121 bl __lookup_processor_type 122 movs r10, r5 @ invalid processor? 123 moveq r0, #'p' @ yes, error 'p' 124 beq __error 125 126 /* 127 * Use the page tables supplied from __cpu_up. 128 */ 129 adr r4, __secondary_data 130 ldmia r4, {r5, r7, r12} @ address to jump to after 131 sub r4, r4, r5 @ mmu has been enabled 132 ldr r4, [r7, r4] @ get secondary_data.pgdir 133 adr lr, BSYM(__enable_mmu) @ return address 134 mov r13, r12 @ __secondary_switched address 135 ARM( add pc, r10, #PROCINFO_INITFUNC ) @ initialise processor 136 @ (return control reg) 137 THUMB( add r12, r10, #PROCINFO_INITFUNC ) 138 THUMB( mov pc, r12 ) 139ENDPROC(secondary_startup) 140 141 /* 142 * r6 = &secondary_data 143 */ 144ENTRY(__secondary_switched) 145 ldr sp, [r7, #4] @ get secondary_data.stack 146 mov fp, #0 147 b secondary_start_kernel 148ENDPROC(__secondary_switched) 149 150 .type __secondary_data, %object 151__secondary_data: 152 .long . 153 .long secondary_data 154 .long __secondary_switched 155#endif /* defined(CONFIG_SMP) */ 156 157 158 159/* 160 * Setup common bits before finally enabling the MMU. Essentially 161 * this is just loading the page table pointer and domain access 162 * registers. 163 */ 164__enable_mmu: 165#ifdef CONFIG_ALIGNMENT_TRAP 166 orr r0, r0, #CR_A 167#else 168 bic r0, r0, #CR_A 169#endif 170#ifdef CONFIG_CPU_DCACHE_DISABLE 171 bic r0, r0, #CR_C 172#endif 173#ifdef CONFIG_CPU_BPREDICT_DISABLE 174 bic r0, r0, #CR_Z 175#endif 176#ifdef CONFIG_CPU_ICACHE_DISABLE 177 bic r0, r0, #CR_I 178#endif 179 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ 180 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ 181 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ 182 domain_val(DOMAIN_IO, DOMAIN_CLIENT)) 183 mcr p15, 0, r5, c3, c0, 0 @ load domain access register 184 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer 185 b __turn_mmu_on 186ENDPROC(__enable_mmu) 187 188/* 189 * Enable the MMU. This completely changes the structure of the visible 190 * memory space. You will not be able to trace execution through this. 191 * If you have an enquiry about this, *please* check the linux-arm-kernel 192 * mailing list archives BEFORE sending another post to the list. 193 * 194 * r0 = cp#15 control register 195 * r13 = *virtual* address to jump to upon completion 196 * 197 * other registers depend on the function called upon completion 198 */ 199 .align 5 200__turn_mmu_on: 201 mov r0, r0 202 mcr p15, 0, r0, c1, c0, 0 @ write control reg 203 mrc p15, 0, r3, c0, c0, 0 @ read id reg 204 mov r3, r3 205 mov r3, r13 206 mov pc, r3 207ENDPROC(__turn_mmu_on) 208 209 210/* 211 * Setup the initial page tables. We only setup the barest 212 * amount which are required to get the kernel running, which 213 * generally means mapping in the kernel code. 214 * 215 * r8 = machinfo 216 * r9 = cpuid 217 * r10 = procinfo 218 * 219 * Returns: 220 * r0, r3, r6, r7 corrupted 221 * r4 = physical page table address 222 */ 223__create_page_tables: 224 pgtbl r4 @ page table address 225 226 /* 227 * Clear the 16K level 1 swapper page table 228 */ 229 mov r0, r4 230 mov r3, #0 231 add r6, r0, #0x4000 2321: str r3, [r0], #4 233 str r3, [r0], #4 234 str r3, [r0], #4 235 str r3, [r0], #4 236 teq r0, r6 237 bne 1b 238 239 ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags 240 241 /* 242 * Create identity mapping for first MB of kernel to 243 * cater for the MMU enable. This identity mapping 244 * will be removed by paging_init(). We use our current program 245 * counter to determine corresponding section base address. 246 */ 247 mov r6, pc 248 mov r6, r6, lsr #20 @ start of kernel section 249 orr r3, r7, r6, lsl #20 @ flags + kernel base 250 str r3, [r4, r6, lsl #2] @ identity mapping 251 252 /* 253 * Now setup the pagetables for our kernel direct 254 * mapped region. 255 */ 256 add r0, r4, #(KERNEL_START & 0xff000000) >> 18 257 str r3, [r0, #(KERNEL_START & 0x00f00000) >> 18]! 258 ldr r6, =(KERNEL_END - 1) 259 add r0, r0, #4 260 add r6, r4, r6, lsr #18 2611: cmp r0, r6 262 add r3, r3, #1 << 20 263 strls r3, [r0], #4 264 bls 1b 265 266#ifdef CONFIG_XIP_KERNEL 267 /* 268 * Map some ram to cover our .data and .bss areas. 269 */ 270 orr r3, r7, #(KERNEL_RAM_PADDR & 0xff000000) 271 .if (KERNEL_RAM_PADDR & 0x00f00000) 272 orr r3, r3, #(KERNEL_RAM_PADDR & 0x00f00000) 273 .endif 274 add r0, r4, #(KERNEL_RAM_VADDR & 0xff000000) >> 18 275 str r3, [r0, #(KERNEL_RAM_VADDR & 0x00f00000) >> 18]! 276 ldr r6, =(_end - 1) 277 add r0, r0, #4 278 add r6, r4, r6, lsr #18 2791: cmp r0, r6 280 add r3, r3, #1 << 20 281 strls r3, [r0], #4 282 bls 1b 283#endif 284 285 /* 286 * Then map first 1MB of ram in case it contains our boot params. 287 */ 288 add r0, r4, #PAGE_OFFSET >> 18 289 orr r6, r7, #(PHYS_OFFSET & 0xff000000) 290 .if (PHYS_OFFSET & 0x00f00000) 291 orr r6, r6, #(PHYS_OFFSET & 0x00f00000) 292 .endif 293 str r6, [r0] 294 295#ifdef CONFIG_DEBUG_LL 296 ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags 297 /* 298 * Map in IO space for serial debugging. 299 * This allows debug messages to be output 300 * via a serial console before paging_init. 301 */ 302 ldr r3, [r8, #MACHINFO_PGOFFIO] 303 add r0, r4, r3 304 rsb r3, r3, #0x4000 @ PTRS_PER_PGD*sizeof(long) 305 cmp r3, #0x0800 @ limit to 512MB 306 movhi r3, #0x0800 307 add r6, r0, r3 308 ldr r3, [r8, #MACHINFO_PHYSIO] 309 orr r3, r3, r7 3101: str r3, [r0], #4 311 add r3, r3, #1 << 20 312 teq r0, r6 313 bne 1b 314#if defined(CONFIG_ARCH_NETWINDER) || defined(CONFIG_ARCH_CATS) 315 /* 316 * If we're using the NetWinder or CATS, we also need to map 317 * in the 16550-type serial port for the debug messages 318 */ 319 add r0, r4, #0xff000000 >> 18 320 orr r3, r7, #0x7c000000 321 str r3, [r0] 322#endif 323#ifdef CONFIG_ARCH_RPC 324 /* 325 * Map in screen at 0x02000000 & SCREEN2_BASE 326 * Similar reasons here - for debug. This is 327 * only for Acorn RiscPC architectures. 328 */ 329 add r0, r4, #0x02000000 >> 18 330 orr r3, r7, #0x02000000 331 str r3, [r0] 332 add r0, r4, #0xd8000000 >> 18 333 str r3, [r0] 334#endif 335#endif 336 mov pc, lr 337ENDPROC(__create_page_tables) 338 .ltorg 339 340#include "head-common.S" 341