1/* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * arch/sh64/kernel/head.S 7 * 8 * Copyright (C) 2000, 2001 Paolo Alberelli 9 * Copyright (C) 2003, 2004 Paul Mundt 10 * 11 * 12 * benedict.gaster@superh.com: 2nd May 2002 13 * Moved definition of empty_zero_page to its own section allowing 14 * it to be placed at an absolute address known at load time. 15 * 16 * lethal@linux-sh.org: 9th May 2003 17 * Kill off GLOBAL_NAME() usage. 18 * 19 * lethal@linux-sh.org: 8th May 2004 20 * Add early SCIF console DTLB mapping. 21 */ 22 23 24#include <asm/page.h> 25#include <asm/mmu_context.h> 26#include <asm/cache.h> 27#include <asm/tlb.h> 28#include <asm/processor.h> 29#include <asm/registers.h> 30#include <asm/thread_info.h> 31 32/* 33 * MMU defines: TLB boundaries. 34 */ 35 36#define MMUIR_FIRST ITLB_FIXED 37#define MMUIR_END ITLB_LAST_VAR_UNRESTRICTED+TLB_STEP 38#define MMUIR_STEP TLB_STEP 39 40#define MMUDR_FIRST DTLB_FIXED 41#define MMUDR_END DTLB_LAST_VAR_UNRESTRICTED+TLB_STEP 42#define MMUDR_STEP TLB_STEP 43 44/* Safety check : CONFIG_CACHED_MEMORY_OFFSET has to be a multiple of 512Mb */ 45#if (CONFIG_CACHED_MEMORY_OFFSET & ((1UL<<29)-1)) 46#error "CONFIG_CACHED_MEMORY_OFFSET must be a multiple of 512Mb" 47#endif 48 49/* 50 * MMU defines: Fixed TLBs. 51 */ 52/* Deal safely with the case where the base of RAM is not 512Mb aligned */ 53 54#define ALIGN_512M_MASK (0xffffffffe0000000) 55#define ALIGNED_EFFECTIVE ((CONFIG_CACHED_MEMORY_OFFSET + CONFIG_MEMORY_START) & ALIGN_512M_MASK) 56#define ALIGNED_PHYSICAL (CONFIG_MEMORY_START & ALIGN_512M_MASK) 57 58#define MMUIR_TEXT_H (0x0000000000000003 | ALIGNED_EFFECTIVE) 59 /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */ 60 61#define MMUIR_TEXT_L (0x000000000000009a | ALIGNED_PHYSICAL) 62 /* 512 Mb, Cacheable, Write-back, execute, Not User, Ph. Add. */ 63 64#define MMUDR_CACHED_H 0x0000000000000003 | ALIGNED_EFFECTIVE 65 /* Enabled, Shared, ASID 0, Eff. Add. 0xA0000000 */ 66#define MMUDR_CACHED_L 0x000000000000015a | ALIGNED_PHYSICAL 67 /* 512 Mb, Cacheable, Write-back, read/write, Not User, Ph. Add. */ 68 69#ifdef CONFIG_ICACHE_DISABLED 70#define ICCR0_INIT_VAL ICCR0_OFF /* ICACHE off */ 71#else 72#define ICCR0_INIT_VAL ICCR0_ON | ICCR0_ICI /* ICE + ICI */ 73#endif 74#define ICCR1_INIT_VAL ICCR1_NOLOCK /* No locking */ 75 76#if defined(CONFIG_DCACHE_DISABLED) 77#define OCCR0_INIT_VAL OCCR0_OFF /* D-cache: off */ 78#elif defined(CONFIG_DCACHE_WRITE_THROUGH) 79#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WT /* D-cache: on, */ 80 /* WT, invalidate */ 81#elif defined(CONFIG_DCACHE_WRITE_BACK) 82#define OCCR0_INIT_VAL OCCR0_ON | OCCR0_OCI | OCCR0_WB /* D-cache: on, */ 83 /* WB, invalidate */ 84#else 85#error preprocessor flag CONFIG_DCACHE_... not recognized! 86#endif 87 88#define OCCR1_INIT_VAL OCCR1_NOLOCK /* No locking */ 89 90 .section .empty_zero_page, "aw" 91 .global empty_zero_page 92 93empty_zero_page: 94 .long 1 /* MOUNT_ROOT_RDONLY */ 95 .long 0 /* RAMDISK_FLAGS */ 96 .long 0x0200 /* ORIG_ROOT_DEV */ 97 .long 1 /* LOADER_TYPE */ 98 .long 0x00800000 /* INITRD_START */ 99 .long 0x00800000 /* INITRD_SIZE */ 100 .long 0 101 102 .text 103 .balign 4096,0,4096 104 105 .section .data, "aw" 106 .balign PAGE_SIZE 107 108 .section .data, "aw" 109 .balign PAGE_SIZE 110 111 .global swapper_pg_dir 112swapper_pg_dir: 113 .space PAGE_SIZE, 0 114 115 .global empty_bad_page 116empty_bad_page: 117 .space PAGE_SIZE, 0 118 119 .global empty_bad_pte_table 120empty_bad_pte_table: 121 .space PAGE_SIZE, 0 122 123 .global fpu_in_use 124fpu_in_use: .quad 0 125 126 127 .section .text, "ax" 128 .balign L1_CACHE_BYTES 129/* 130 * Condition at the entry of __stext: 131 * . Reset state: 132 * . SR.FD = 1 (FPU disabled) 133 * . SR.BL = 1 (Exceptions disabled) 134 * . SR.MD = 1 (Privileged Mode) 135 * . SR.MMU = 0 (MMU Disabled) 136 * . SR.CD = 0 (CTC User Visible) 137 * . SR.IMASK = Undefined (Interrupt Mask) 138 * 139 * Operations supposed to be performed by __stext: 140 * . prevent speculative fetch onto device memory while MMU is off 141 * . reflect as much as possible SH5 ABI (r15, r26, r27, r18) 142 * . first, save CPU state and set it to something harmless 143 * . any CPU detection and/or endianness settings (?) 144 * . initialize EMI/LMI (but not TMU/RTC/INTC/SCIF): TBD 145 * . set initial TLB entries for cached and uncached regions 146 * (no fine granularity paging) 147 * . set initial cache state 148 * . enable MMU and caches 149 * . set CPU to a consistent state 150 * . registers (including stack pointer and current/KCR0) 151 * . NOT expecting to set Exception handling nor VBR/RESVEC/DCR 152 * at this stage. This is all to later Linux initialization steps. 153 * . initialize FPU 154 * . clear BSS 155 * . jump into start_kernel() 156 * . be prepared to hopeless start_kernel() returns. 157 * 158 */ 159 .global _stext 160_stext: 161 /* 162 * Prevent speculative fetch on device memory due to 163 * uninitialized target registers. 164 */ 165 ptabs/u ZERO, tr0 166 ptabs/u ZERO, tr1 167 ptabs/u ZERO, tr2 168 ptabs/u ZERO, tr3 169 ptabs/u ZERO, tr4 170 ptabs/u ZERO, tr5 171 ptabs/u ZERO, tr6 172 ptabs/u ZERO, tr7 173 synci 174 175 /* 176 * Read/Set CPU state. After this block: 177 * r29 = Initial SR 178 */ 179 getcon SR, r29 180 movi SR_HARMLESS, r20 181 putcon r20, SR 182 183 /* 184 * Initialize EMI/LMI. To Be Done. 185 */ 186 187 /* 188 * CPU detection and/or endianness settings (?). To Be Done. 189 * Pure PIC code here, please ! Just save state into r30. 190 * After this block: 191 * r30 = CPU type/Platform Endianness 192 */ 193 194 /* 195 * Set initial TLB entries for cached and uncached regions. 196 * Note: PTA/BLINK is PIC code, PTABS/BLINK isn't ! 197 */ 198 /* Clear ITLBs */ 199 pta clear_ITLB, tr1 200 movi MMUIR_FIRST, r21 201 movi MMUIR_END, r22 202clear_ITLB: 203 putcfg r21, 0, ZERO /* Clear MMUIR[n].PTEH.V */ 204 addi r21, MMUIR_STEP, r21 205 bne r21, r22, tr1 206 207 /* Clear DTLBs */ 208 pta clear_DTLB, tr1 209 movi MMUDR_FIRST, r21 210 movi MMUDR_END, r22 211clear_DTLB: 212 putcfg r21, 0, ZERO /* Clear MMUDR[n].PTEH.V */ 213 addi r21, MMUDR_STEP, r21 214 bne r21, r22, tr1 215 216 /* Map one big (512Mb) page for ITLB */ 217 movi MMUIR_FIRST, r21 218 movi MMUIR_TEXT_L, r22 /* PTEL first */ 219 add.l r22, r63, r22 /* Sign extend */ 220 putcfg r21, 1, r22 /* Set MMUIR[0].PTEL */ 221 movi MMUIR_TEXT_H, r22 /* PTEH last */ 222 add.l r22, r63, r22 /* Sign extend */ 223 putcfg r21, 0, r22 /* Set MMUIR[0].PTEH */ 224 225 /* Map one big CACHED (512Mb) page for DTLB */ 226 movi MMUDR_FIRST, r21 227 movi MMUDR_CACHED_L, r22 /* PTEL first */ 228 add.l r22, r63, r22 /* Sign extend */ 229 putcfg r21, 1, r22 /* Set MMUDR[0].PTEL */ 230 movi MMUDR_CACHED_H, r22 /* PTEH last */ 231 add.l r22, r63, r22 /* Sign extend */ 232 putcfg r21, 0, r22 /* Set MMUDR[0].PTEH */ 233 234#ifdef CONFIG_EARLY_PRINTK 235 /* 236 * Setup a DTLB translation for SCIF phys. 237 */ 238 addi r21, MMUDR_STEP, r21 239 movi 0x0a03, r22 /* SCIF phys */ 240 shori 0x0148, r22 241 putcfg r21, 1, r22 /* PTEL first */ 242 movi 0xfa03, r22 /* 0xfa030000, fixed SCIF virt */ 243 shori 0x0003, r22 244 putcfg r21, 0, r22 /* PTEH last */ 245#endif 246 247 /* 248 * Set cache behaviours. 249 */ 250 /* ICache */ 251 movi ICCR_BASE, r21 252 movi ICCR0_INIT_VAL, r22 253 movi ICCR1_INIT_VAL, r23 254 putcfg r21, ICCR_REG0, r22 255 putcfg r21, ICCR_REG1, r23 256 257 /* OCache */ 258 movi OCCR_BASE, r21 259 movi OCCR0_INIT_VAL, r22 260 movi OCCR1_INIT_VAL, r23 261 putcfg r21, OCCR_REG0, r22 262 putcfg r21, OCCR_REG1, r23 263 264 265 /* 266 * Enable Caches and MMU. Do the first non-PIC jump. 267 * Now head.S global variables, constants and externs 268 * can be used. 269 */ 270 getcon SR, r21 271 movi SR_ENABLE_MMU, r22 272 or r21, r22, r21 273 putcon r21, SSR 274 movi hyperspace, r22 275 ori r22, 1, r22 /* Make it SHmedia, not required but..*/ 276 putcon r22, SPC 277 synco 278 rte /* And now go into the hyperspace ... */ 279hyperspace: /* ... that's the next instruction ! */ 280 281 /* 282 * Set CPU to a consistent state. 283 * r31 = FPU support flag 284 * tr0/tr7 in use. Others give a chance to loop somewhere safe 285 */ 286 movi start_kernel, r32 287 ori r32, 1, r32 288 289 ptabs r32, tr0 /* r32 = _start_kernel address */ 290 pta/u hopeless, tr1 291 pta/u hopeless, tr2 292 pta/u hopeless, tr3 293 pta/u hopeless, tr4 294 pta/u hopeless, tr5 295 pta/u hopeless, tr6 296 pta/u hopeless, tr7 297 gettr tr1, r28 /* r28 = hopeless address */ 298 299 /* Set initial stack pointer */ 300 movi init_thread_union, SP 301 putcon SP, KCR0 /* Set current to init_task */ 302 movi THREAD_SIZE, r22 /* Point to the end */ 303 add SP, r22, SP 304 305 /* 306 * Initialize FPU. 307 * Keep FPU flag in r31. After this block: 308 * r31 = FPU flag 309 */ 310 movi fpu_in_use, r31 /* Temporary */ 311 312#ifdef CONFIG_SH_FPU 313 getcon SR, r21 314 movi SR_ENABLE_FPU, r22 315 and r21, r22, r22 316 putcon r22, SR /* Try to enable */ 317 getcon SR, r22 318 xor r21, r22, r21 319 shlri r21, 15, r21 /* Supposedly 0/1 */ 320 st.q r31, 0 , r21 /* Set fpu_in_use */ 321#else 322 movi 0, r21 323 st.q r31, 0 , r21 /* Set fpu_in_use */ 324#endif 325 or r21, ZERO, r31 /* Set FPU flag at last */ 326 327#ifndef CONFIG_SH_NO_BSS_INIT 328/* Don't clear BSS if running on slow platforms such as an RTL simulation, 329 remote memory via SHdebug link, etc. For these the memory can be guaranteed 330 to be all zero on boot anyway. */ 331 /* 332 * Clear bss 333 */ 334 pta clear_quad, tr1 335 movi __bss_start, r22 336 movi _end, r23 337clear_quad: 338 st.q r22, 0, ZERO 339 addi r22, 8, r22 340 bne r22, r23, tr1 /* Both quad aligned, see vmlinux.lds.S */ 341#endif 342 pta/u hopeless, tr1 343 344 /* Say bye to head.S but be prepared to wrongly get back ... */ 345 blink tr0, LINK 346 347 /* If we ever get back here through LINK/tr1-tr7 */ 348 pta/u hopeless, tr7 349 350hopeless: 351 /* 352 * Something's badly wrong here. Loop endlessly, 353 * there's nothing more we can do about it. 354 * 355 * Note on hopeless: it can be jumped into invariably 356 * before or after jumping into hyperspace. The only 357 * requirement is to be PIC called (PTA) before and 358 * any way (PTA/PTABS) after. According to Virtual 359 * to Physical mapping a simulator/emulator can easily 360 * tell where we came here from just looking at hopeless 361 * (PC) address. 362 * 363 * For debugging purposes: 364 * (r28) hopeless/loop address 365 * (r29) Original SR 366 * (r30) CPU type/Platform endianness 367 * (r31) FPU Support 368 * (r32) _start_kernel address 369 */ 370 blink tr7, ZERO 371