1 2 3#include <linux/linkage.h> 4#include <linux/init.h> 5#include <asm/assembler.h> 6#include <asm/elf.h> 7#include <asm/pgtable-hwdef.h> 8#include <asm/pgtable.h> 9#include <asm/page.h> 10#include <asm/ptrace.h> 11#include "proc-macros.S" 12 13/* 14 * The size of one data cache line. 15 */ 16#define CACHE_DLINESIZE 16 17 18/* 19 * The number of data cache segments. 20 */ 21#define CACHE_DSEGMENTS 2 22 23/* 24 * The number of lines in a cache segment. 25 */ 26#define CACHE_DENTRIES 256 27 28/* 29 * This is the size at which it becomes more efficient to 30 * clean the whole cache, rather than using the individual 31 * cache line maintainence instructions. 32 */ 33#define CACHE_DLIMIT 8192 34 35 .text 36/* 37 * cpu_arm925_proc_init() 38 */ 39ENTRY(cpu_arm925_proc_init) 40 mov pc, lr 41 42/* 43 * cpu_arm925_proc_fin() 44 */ 45ENTRY(cpu_arm925_proc_fin) 46 stmfd sp!, {lr} 47 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE 48 msr cpsr_c, ip 49 bl arm925_flush_kern_cache_all 50 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 51 bic r0, r0, #0x1000 @ ...i............ 52 bic r0, r0, #0x000e @ ............wca. 53 mcr p15, 0, r0, c1, c0, 0 @ disable caches 54 ldmfd sp!, {pc} 55 56/* 57 * cpu_arm925_reset(loc) 58 * 59 * Perform a soft reset of the system. Put the CPU into the 60 * same state as it would be if it had been reset, and branch 61 * to what would be the reset vector. 62 * 63 * loc: location to jump to for soft reset 64 */ 65 .align 5 66ENTRY(cpu_arm925_reset) 67 /* Send software reset to MPU and DSP */ 68 mov ip, #0xff000000 69 orr ip, ip, #0x00fe0000 70 orr ip, ip, #0x0000ce00 71 mov r4, #1 72 strh r4, [ip, #0x10] 73 74 mov ip, #0 75 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 76 mcr p15, 0, ip, c7, c10, 4 @ drain WB 77#ifdef CONFIG_MMU 78 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 79#endif 80 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 81 bic ip, ip, #0x000f @ ............wcam 82 bic ip, ip, #0x1100 @ ...i...s........ 83 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 84 mov pc, r0 85 86/* 87 * cpu_arm925_do_idle() 88 * 89 * Called with IRQs disabled 90 */ 91 .align 10 92ENTRY(cpu_arm925_do_idle) 93 mov r0, #0 94 mrc p15, 0, r1, c1, c0, 0 @ Read control register 95 mcr p15, 0, r0, c7, c10, 4 @ Drain write buffer 96 bic r2, r1, #1 << 12 97 mcr p15, 0, r2, c1, c0, 0 @ Disable I cache 98 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 99 mcr p15, 0, r1, c1, c0, 0 @ Restore ICache enable 100 mov pc, lr 101 102/* 103 * flush_user_cache_all() 104 * 105 * Clean and invalidate all cache entries in a particular 106 * address space. 107 */ 108ENTRY(arm925_flush_user_cache_all) 109 /* FALLTHROUGH */ 110 111/* 112 * flush_kern_cache_all() 113 * 114 * Clean and invalidate the entire cache. 115 */ 116ENTRY(arm925_flush_kern_cache_all) 117 mov r2, #VM_EXEC 118 mov ip, #0 119__flush_whole_cache: 120#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 121 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 122#else 123 /* Flush entries in both segments at once, see NOTE1 above */ 124 mov r3, #(CACHE_DENTRIES - 1) << 4 @ 256 entries in segment 1252: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index 126 subs r3, r3, #1 << 4 127 bcs 2b @ entries 255 to 0 128#endif 129 tst r2, #VM_EXEC 130 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 131 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 132 mov pc, lr 133 134/* 135 * flush_user_cache_range(start, end, flags) 136 * 137 * Clean and invalidate a range of cache entries in the 138 * specified address range. 139 * 140 * - start - start address (inclusive) 141 * - end - end address (exclusive) 142 * - flags - vm_flags describing address space 143 */ 144ENTRY(arm925_flush_user_cache_range) 145 mov ip, #0 146 sub r3, r1, r0 @ calculate total size 147 cmp r3, #CACHE_DLIMIT 148 bgt __flush_whole_cache 1491: tst r2, #VM_EXEC 150#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 151 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 152 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 153 add r0, r0, #CACHE_DLINESIZE 154 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 155 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 156 add r0, r0, #CACHE_DLINESIZE 157#else 158 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry 159 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 160 add r0, r0, #CACHE_DLINESIZE 161 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry 162 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry 163 add r0, r0, #CACHE_DLINESIZE 164#endif 165 cmp r0, r1 166 blo 1b 167 tst r2, #VM_EXEC 168 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 169 mov pc, lr 170 171/* 172 * coherent_kern_range(start, end) 173 * 174 * Ensure coherency between the Icache and the Dcache in the 175 * region described by start, end. If you have non-snooping 176 * Harvard caches, you need to implement this function. 177 * 178 * - start - virtual start address 179 * - end - virtual end address 180 */ 181ENTRY(arm925_coherent_kern_range) 182 /* FALLTHROUGH */ 183 184/* 185 * coherent_user_range(start, end) 186 * 187 * Ensure coherency between the Icache and the Dcache in the 188 * region described by start, end. If you have non-snooping 189 * Harvard caches, you need to implement this function. 190 * 191 * - start - virtual start address 192 * - end - virtual end address 193 */ 194ENTRY(arm925_coherent_user_range) 195 bic r0, r0, #CACHE_DLINESIZE - 1 1961: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 197 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 198 add r0, r0, #CACHE_DLINESIZE 199 cmp r0, r1 200 blo 1b 201 mcr p15, 0, r0, c7, c10, 4 @ drain WB 202 mov pc, lr 203 204/* 205 * flush_kern_dcache_page(void *page) 206 * 207 * Ensure no D cache aliasing occurs, either with itself or 208 * the I cache 209 * 210 * - addr - page aligned address 211 */ 212ENTRY(arm925_flush_kern_dcache_page) 213 add r1, r0, #PAGE_SZ 2141: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 215 add r0, r0, #CACHE_DLINESIZE 216 cmp r0, r1 217 blo 1b 218 mov r0, #0 219 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache 220 mcr p15, 0, r0, c7, c10, 4 @ drain WB 221 mov pc, lr 222 223/* 224 * dma_inv_range(start, end) 225 * 226 * Invalidate (discard) the specified virtual address range. 227 * May not write back any entries. If 'start' or 'end' 228 * are not cache line aligned, those lines must be written 229 * back. 230 * 231 * - start - virtual start address 232 * - end - virtual end address 233 * 234 * (same as v4wb) 235 */ 236ENTRY(arm925_dma_inv_range) 237#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 238 tst r0, #CACHE_DLINESIZE - 1 239 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 240 tst r1, #CACHE_DLINESIZE - 1 241 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 242#endif 243 bic r0, r0, #CACHE_DLINESIZE - 1 2441: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 245 add r0, r0, #CACHE_DLINESIZE 246 cmp r0, r1 247 blo 1b 248 mcr p15, 0, r0, c7, c10, 4 @ drain WB 249 mov pc, lr 250 251/* 252 * dma_clean_range(start, end) 253 * 254 * Clean the specified virtual address range. 255 * 256 * - start - virtual start address 257 * - end - virtual end address 258 * 259 * (same as v4wb) 260 */ 261ENTRY(arm925_dma_clean_range) 262#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 263 bic r0, r0, #CACHE_DLINESIZE - 1 2641: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 265 add r0, r0, #CACHE_DLINESIZE 266 cmp r0, r1 267 blo 1b 268#endif 269 mcr p15, 0, r0, c7, c10, 4 @ drain WB 270 mov pc, lr 271 272/* 273 * dma_flush_range(start, end) 274 * 275 * Clean and invalidate the specified virtual address range. 276 * 277 * - start - virtual start address 278 * - end - virtual end address 279 */ 280ENTRY(arm925_dma_flush_range) 281 bic r0, r0, #CACHE_DLINESIZE - 1 2821: 283#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 284 mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 285#else 286 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 287#endif 288 add r0, r0, #CACHE_DLINESIZE 289 cmp r0, r1 290 blo 1b 291 mcr p15, 0, r0, c7, c10, 4 @ drain WB 292 mov pc, lr 293 294ENTRY(arm925_cache_fns) 295 .long arm925_flush_kern_cache_all 296 .long arm925_flush_user_cache_all 297 .long arm925_flush_user_cache_range 298 .long arm925_coherent_kern_range 299 .long arm925_coherent_user_range 300 .long arm925_flush_kern_dcache_page 301 .long arm925_dma_inv_range 302 .long arm925_dma_clean_range 303 .long arm925_dma_flush_range 304 305ENTRY(cpu_arm925_dcache_clean_area) 306#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 3071: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 308 add r0, r0, #CACHE_DLINESIZE 309 subs r1, r1, #CACHE_DLINESIZE 310 bhi 1b 311#endif 312 mcr p15, 0, r0, c7, c10, 4 @ drain WB 313 mov pc, lr 314 315/* =============================== PageTable ============================== */ 316 317/* 318 * cpu_arm925_switch_mm(pgd) 319 * 320 * Set the translation base pointer to be as described by pgd. 321 * 322 * pgd: new page tables 323 */ 324 .align 5 325ENTRY(cpu_arm925_switch_mm) 326#ifdef CONFIG_MMU 327 mov ip, #0 328#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 329 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 330#else 331 /* Flush entries in bothe segments at once, see NOTE1 above */ 332 mov r3, #(CACHE_DENTRIES - 1) << 4 @ 256 entries in segment 3332: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index 334 subs r3, r3, #1 << 4 335 bcs 2b @ entries 255 to 0 336#endif 337 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache 338 mcr p15, 0, ip, c7, c10, 4 @ drain WB 339 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 340 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 341#endif 342 mov pc, lr 343 344/* 345 * cpu_arm925_set_pte_ext(ptep, pte, ext) 346 * 347 * Set a PTE and flush it out 348 */ 349 .align 5 350ENTRY(cpu_arm925_set_pte_ext) 351#ifdef CONFIG_MMU 352 str r1, [r0], #-2048 @ linux version 353 354 eor r1, r1, #L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_WRITE | L_PTE_DIRTY 355 356 bic r2, r1, #PTE_SMALL_AP_MASK 357 bic r2, r2, #PTE_TYPE_MASK 358 orr r2, r2, #PTE_TYPE_SMALL 359 360 tst r1, #L_PTE_USER @ User? 361 orrne r2, r2, #PTE_SMALL_AP_URO_SRW 362 363 tst r1, #L_PTE_WRITE | L_PTE_DIRTY @ Write and Dirty? 364 orreq r2, r2, #PTE_SMALL_AP_UNO_SRW 365 366 tst r1, #L_PTE_PRESENT | L_PTE_YOUNG @ Present and Young? 367 movne r2, #0 368 369#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 370 eor r3, r2, #0x0a @ C & small page? 371 tst r3, #0x0b 372 biceq r2, r2, #4 373#endif 374 str r2, [r0] @ hardware version 375 mov r0, r0 376#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH 377 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 378#endif 379 mcr p15, 0, r0, c7, c10, 4 @ drain WB 380#endif /* CONFIG_MMU */ 381 mov pc, lr 382 383 __INIT 384 385 .type __arm925_setup, #function 386__arm925_setup: 387 mov r0, #0 388#if defined(CONFIG_CPU_ICACHE_STREAMING_DISABLE) 389 orr r0,r0,#1 << 7 390#endif 391 392 /* Transparent on, D-cache clean & flush mode. See NOTE2 above */ 393 orr r0,r0,#1 << 1 @ transparent mode on 394 mcr p15, 0, r0, c15, c1, 0 @ write TI config register 395 396 mov r0, #0 397 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 398 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 399#ifdef CONFIG_MMU 400 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 401#endif 402 403#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 404 mov r0, #4 @ disable write-back on caches explicitly 405 mcr p15, 7, r0, c15, c0, 0 406#endif 407 408 adr r5, arm925_crval 409 ldmia r5, {r5, r6} 410 mrc p15, 0, r0, c1, c0 @ get control register v4 411 bic r0, r0, r5 412 orr r0, r0, r6 413#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 414 orr r0, r0, #0x4000 @ .1.. .... .... .... 415#endif 416 mov pc, lr 417 .size __arm925_setup, . - __arm925_setup 418 419 /* 420 * R 421 * .RVI ZFRS BLDP WCAM 422 * .011 0001 ..11 1101 423 * 424 */ 425 .type arm925_crval, #object 426arm925_crval: 427 crval clear=0x00007f3f, mmuset=0x0000313d, ucset=0x00001130 428 429 __INITDATA 430 431/* 432 * Purpose : Function pointers used to access above functions - all calls 433 * come through these 434 */ 435 .type arm925_processor_functions, #object 436arm925_processor_functions: 437 .word v4t_early_abort 438 .word cpu_arm925_proc_init 439 .word cpu_arm925_proc_fin 440 .word cpu_arm925_reset 441 .word cpu_arm925_do_idle 442 .word cpu_arm925_dcache_clean_area 443 .word cpu_arm925_switch_mm 444 .word cpu_arm925_set_pte_ext 445 .size arm925_processor_functions, . - arm925_processor_functions 446 447 .section ".rodata" 448 449 .type cpu_arch_name, #object 450cpu_arch_name: 451 .asciz "armv4t" 452 .size cpu_arch_name, . - cpu_arch_name 453 454 .type cpu_elf_name, #object 455cpu_elf_name: 456 .asciz "v4" 457 .size cpu_elf_name, . - cpu_elf_name 458 459 .type cpu_arm925_name, #object 460cpu_arm925_name: 461 .asciz "ARM925T" 462 .size cpu_arm925_name, . - cpu_arm925_name 463 464 .align 465 466 .section ".proc.info.init", #alloc, #execinstr 467 468 .type __arm925_proc_info,#object 469__arm925_proc_info: 470 .long 0x54029250 471 .long 0xfffffff0 472 .long PMD_TYPE_SECT | \ 473 PMD_BIT4 | \ 474 PMD_SECT_AP_WRITE | \ 475 PMD_SECT_AP_READ 476 .long PMD_TYPE_SECT | \ 477 PMD_BIT4 | \ 478 PMD_SECT_AP_WRITE | \ 479 PMD_SECT_AP_READ 480 b __arm925_setup 481 .long cpu_arch_name 482 .long cpu_elf_name 483 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB 484 .long cpu_arm925_name 485 .long arm925_processor_functions 486 .long v4wbi_tlb_fns 487 .long v4wb_user_fns 488 .long arm925_cache_fns 489 .size __arm925_proc_info, . - __arm925_proc_info 490 491 .type __arm915_proc_info,#object 492__arm915_proc_info: 493 .long 0x54029150 494 .long 0xfffffff0 495 .long PMD_TYPE_SECT | \ 496 PMD_BIT4 | \ 497 PMD_SECT_AP_WRITE | \ 498 PMD_SECT_AP_READ 499 .long PMD_TYPE_SECT | \ 500 PMD_BIT4 | \ 501 PMD_SECT_AP_WRITE | \ 502 PMD_SECT_AP_READ 503 b __arm925_setup 504 .long cpu_arch_name 505 .long cpu_elf_name 506 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB 507 .long cpu_arm925_name 508 .long arm925_processor_functions 509 .long v4wbi_tlb_fns 510 .long v4wb_user_fns 511 .long arm925_cache_fns 512 .size __arm925_proc_info, . - __arm925_proc_info 513