1/* 2 * linux/arch/arm/mm/proc-arm1020.S: MMU functions for ARM1020 3 * 4 * Copyright (C) 2000 ARM Limited 5 * Copyright (C) 2000 Deep Blue Solutions Ltd. 6 * hacked for non-paged-MM by Hyok S. Choi, 2003. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 21 * 22 * 23 * These are the low level assembler for performing cache and TLB 24 * functions on the arm1020. 25 * 26 * CONFIG_CPU_ARM1020_CPU_IDLE -> nohlt 27 */ 28#include <linux/linkage.h> 29#include <linux/init.h> 30#include <asm/assembler.h> 31#include <asm/asm-offsets.h> 32#include <asm/hwcap.h> 33#include <asm/pgtable-hwdef.h> 34#include <asm/pgtable.h> 35#include <asm/ptrace.h> 36 37#include "proc-macros.S" 38 39/* 40 * This is the maximum size of an area which will be invalidated 41 * using the single invalidate entry instructions. Anything larger 42 * than this, and we go for the whole cache. 43 * 44 * This value should be chosen such that we choose the cheapest 45 * alternative. 46 */ 47#define MAX_AREA_SIZE 32768 48 49/* 50 * The size of one data cache line. 51 */ 52#define CACHE_DLINESIZE 32 53 54/* 55 * The number of data cache segments. 56 */ 57#define CACHE_DSEGMENTS 16 58 59/* 60 * The number of lines in a cache segment. 61 */ 62#define CACHE_DENTRIES 64 63 64/* 65 * This is the size at which it becomes more efficient to 66 * clean the whole cache, rather than using the individual 67 * cache line maintainence instructions. 68 */ 69#define CACHE_DLIMIT 32768 70 71 .text 72/* 73 * cpu_arm1020_proc_init() 74 */ 75ENTRY(cpu_arm1020_proc_init) 76 mov pc, lr 77 78/* 79 * cpu_arm1020_proc_fin() 80 */ 81ENTRY(cpu_arm1020_proc_fin) 82 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 83 bic r0, r0, #0x1000 @ ...i............ 84 bic r0, r0, #0x000e @ ............wca. 85 mcr p15, 0, r0, c1, c0, 0 @ disable caches 86 mov pc, lr 87 88/* 89 * cpu_arm1020_reset(loc) 90 * 91 * Perform a soft reset of the system. Put the CPU into the 92 * same state as it would be if it had been reset, and branch 93 * to what would be the reset vector. 94 * 95 * loc: location to jump to for soft reset 96 */ 97 .align 5 98ENTRY(cpu_arm1020_reset) 99 mov ip, #0 100 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches 101 mcr p15, 0, ip, c7, c10, 4 @ drain WB 102#ifdef CONFIG_MMU 103 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 104#endif 105 mrc p15, 0, ip, c1, c0, 0 @ ctrl register 106 bic ip, ip, #0x000f @ ............wcam 107 bic ip, ip, #0x1100 @ ...i...s........ 108 mcr p15, 0, ip, c1, c0, 0 @ ctrl register 109 mov pc, r0 110 111/* 112 * cpu_arm1020_do_idle() 113 */ 114 .align 5 115ENTRY(cpu_arm1020_do_idle) 116 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt 117 mov pc, lr 118 119/* ================================= CACHE ================================ */ 120 121 .align 5 122/* 123 * flush_user_cache_all() 124 * 125 * Invalidate all cache entries in a particular address 126 * space. 127 */ 128ENTRY(arm1020_flush_user_cache_all) 129 /* FALLTHROUGH */ 130/* 131 * flush_kern_cache_all() 132 * 133 * Clean and invalidate the entire cache. 134 */ 135ENTRY(arm1020_flush_kern_cache_all) 136 mov r2, #VM_EXEC 137 mov ip, #0 138__flush_whole_cache: 139#ifndef CONFIG_CPU_DCACHE_DISABLE 140 mcr p15, 0, ip, c7, c10, 4 @ drain WB 141 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments 1421: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries 1432: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index 144 mcr p15, 0, ip, c7, c10, 4 @ drain WB 145 subs r3, r3, #1 << 26 146 bcs 2b @ entries 63 to 0 147 subs r1, r1, #1 << 5 148 bcs 1b @ segments 15 to 0 149#endif 150 tst r2, #VM_EXEC 151#ifndef CONFIG_CPU_ICACHE_DISABLE 152 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 153#endif 154 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 155 mov pc, lr 156 157/* 158 * flush_user_cache_range(start, end, flags) 159 * 160 * Invalidate a range of cache entries in the specified 161 * address space. 162 * 163 * - start - start address (inclusive) 164 * - end - end address (exclusive) 165 * - flags - vm_flags for this space 166 */ 167ENTRY(arm1020_flush_user_cache_range) 168 mov ip, #0 169 sub r3, r1, r0 @ calculate total size 170 cmp r3, #CACHE_DLIMIT 171 bhs __flush_whole_cache 172 173#ifndef CONFIG_CPU_DCACHE_DISABLE 174 mcr p15, 0, ip, c7, c10, 4 1751: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 176 mcr p15, 0, ip, c7, c10, 4 @ drain WB 177 add r0, r0, #CACHE_DLINESIZE 178 cmp r0, r1 179 blo 1b 180#endif 181 tst r2, #VM_EXEC 182#ifndef CONFIG_CPU_ICACHE_DISABLE 183 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache 184#endif 185 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 186 mov pc, lr 187 188/* 189 * coherent_kern_range(start, end) 190 * 191 * Ensure coherency between the Icache and the Dcache in the 192 * region described by start. If you have non-snooping 193 * Harvard caches, you need to implement this function. 194 * 195 * - start - virtual start address 196 * - end - virtual end address 197 */ 198ENTRY(arm1020_coherent_kern_range) 199 /* FALLTRHOUGH */ 200 201/* 202 * coherent_user_range(start, end) 203 * 204 * Ensure coherency between the Icache and the Dcache in the 205 * region described by start. If you have non-snooping 206 * Harvard caches, you need to implement this function. 207 * 208 * - start - virtual start address 209 * - end - virtual end address 210 */ 211ENTRY(arm1020_coherent_user_range) 212 mov ip, #0 213 bic r0, r0, #CACHE_DLINESIZE - 1 214 mcr p15, 0, ip, c7, c10, 4 2151: 216#ifndef CONFIG_CPU_DCACHE_DISABLE 217 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 218 mcr p15, 0, ip, c7, c10, 4 @ drain WB 219#endif 220#ifndef CONFIG_CPU_ICACHE_DISABLE 221 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry 222#endif 223 add r0, r0, #CACHE_DLINESIZE 224 cmp r0, r1 225 blo 1b 226 mcr p15, 0, ip, c7, c10, 4 @ drain WB 227 mov pc, lr 228 229/* 230 * flush_kern_dcache_area(void *addr, size_t size) 231 * 232 * Ensure no D cache aliasing occurs, either with itself or 233 * the I cache 234 * 235 * - addr - kernel address 236 * - size - region size 237 */ 238ENTRY(arm1020_flush_kern_dcache_area) 239 mov ip, #0 240#ifndef CONFIG_CPU_DCACHE_DISABLE 241 add r1, r0, r1 2421: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 243 mcr p15, 0, ip, c7, c10, 4 @ drain WB 244 add r0, r0, #CACHE_DLINESIZE 245 cmp r0, r1 246 blo 1b 247#endif 248 mcr p15, 0, ip, c7, c10, 4 @ drain WB 249 mov pc, lr 250 251/* 252 * dma_inv_range(start, end) 253 * 254 * Invalidate (discard) the specified virtual address range. 255 * May not write back any entries. If 'start' or 'end' 256 * are not cache line aligned, those lines must be written 257 * back. 258 * 259 * - start - virtual start address 260 * - end - virtual end address 261 * 262 * (same as v4wb) 263 */ 264arm1020_dma_inv_range: 265 mov ip, #0 266#ifndef CONFIG_CPU_DCACHE_DISABLE 267 tst r0, #CACHE_DLINESIZE - 1 268 bic r0, r0, #CACHE_DLINESIZE - 1 269 mcrne p15, 0, ip, c7, c10, 4 270 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 271 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 272 tst r1, #CACHE_DLINESIZE - 1 273 mcrne p15, 0, ip, c7, c10, 4 274 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 275 mcrne p15, 0, ip, c7, c10, 4 @ drain WB 2761: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 277 add r0, r0, #CACHE_DLINESIZE 278 cmp r0, r1 279 blo 1b 280#endif 281 mcr p15, 0, ip, c7, c10, 4 @ drain WB 282 mov pc, lr 283 284/* 285 * dma_clean_range(start, end) 286 * 287 * Clean the specified virtual address range. 288 * 289 * - start - virtual start address 290 * - end - virtual end address 291 * 292 * (same as v4wb) 293 */ 294arm1020_dma_clean_range: 295 mov ip, #0 296#ifndef CONFIG_CPU_DCACHE_DISABLE 297 bic r0, r0, #CACHE_DLINESIZE - 1 2981: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 299 mcr p15, 0, ip, c7, c10, 4 @ drain WB 300 add r0, r0, #CACHE_DLINESIZE 301 cmp r0, r1 302 blo 1b 303#endif 304 mcr p15, 0, ip, c7, c10, 4 @ drain WB 305 mov pc, lr 306 307/* 308 * dma_flush_range(start, end) 309 * 310 * Clean and invalidate the specified virtual address range. 311 * 312 * - start - virtual start address 313 * - end - virtual end address 314 */ 315ENTRY(arm1020_dma_flush_range) 316 mov ip, #0 317#ifndef CONFIG_CPU_DCACHE_DISABLE 318 bic r0, r0, #CACHE_DLINESIZE - 1 319 mcr p15, 0, ip, c7, c10, 4 3201: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry 321 mcr p15, 0, ip, c7, c10, 4 @ drain WB 322 add r0, r0, #CACHE_DLINESIZE 323 cmp r0, r1 324 blo 1b 325#endif 326 mcr p15, 0, ip, c7, c10, 4 @ drain WB 327 mov pc, lr 328 329/* 330 * dma_map_area(start, size, dir) 331 * - start - kernel virtual start address 332 * - size - size of region 333 * - dir - DMA direction 334 */ 335ENTRY(arm1020_dma_map_area) 336 add r1, r1, r0 337 cmp r2, #DMA_TO_DEVICE 338 beq arm1020_dma_clean_range 339 bcs arm1020_dma_inv_range 340 b arm1020_dma_flush_range 341ENDPROC(arm1020_dma_map_area) 342 343/* 344 * dma_unmap_area(start, size, dir) 345 * - start - kernel virtual start address 346 * - size - size of region 347 * - dir - DMA direction 348 */ 349ENTRY(arm1020_dma_unmap_area) 350 mov pc, lr 351ENDPROC(arm1020_dma_unmap_area) 352 353ENTRY(arm1020_cache_fns) 354 .long arm1020_flush_kern_cache_all 355 .long arm1020_flush_user_cache_all 356 .long arm1020_flush_user_cache_range 357 .long arm1020_coherent_kern_range 358 .long arm1020_coherent_user_range 359 .long arm1020_flush_kern_dcache_area 360 .long arm1020_dma_map_area 361 .long arm1020_dma_unmap_area 362 .long arm1020_dma_flush_range 363 364 .align 5 365ENTRY(cpu_arm1020_dcache_clean_area) 366#ifndef CONFIG_CPU_DCACHE_DISABLE 367 mov ip, #0 3681: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 369 mcr p15, 0, ip, c7, c10, 4 @ drain WB 370 add r0, r0, #CACHE_DLINESIZE 371 subs r1, r1, #CACHE_DLINESIZE 372 bhi 1b 373#endif 374 mov pc, lr 375 376/* =============================== PageTable ============================== */ 377 378/* 379 * cpu_arm1020_switch_mm(pgd) 380 * 381 * Set the translation base pointer to be as described by pgd. 382 * 383 * pgd: new page tables 384 */ 385 .align 5 386ENTRY(cpu_arm1020_switch_mm) 387#ifdef CONFIG_MMU 388#ifndef CONFIG_CPU_DCACHE_DISABLE 389 mcr p15, 0, r3, c7, c10, 4 390 mov r1, #0xF @ 16 segments 3911: mov r3, #0x3F @ 64 entries 3922: mov ip, r3, LSL #26 @ shift up entry 393 orr ip, ip, r1, LSL #5 @ shift in/up index 394 mcr p15, 0, ip, c7, c14, 2 @ Clean & Inval DCache entry 395 mov ip, #0 396 mcr p15, 0, ip, c7, c10, 4 397 subs r3, r3, #1 398 cmp r3, #0 399 bge 2b @ entries 3F to 0 400 subs r1, r1, #1 401 cmp r1, #0 402 bge 1b @ segments 15 to 0 403 404#endif 405 mov r1, #0 406#ifndef CONFIG_CPU_ICACHE_DISABLE 407 mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache 408#endif 409 mcr p15, 0, r1, c7, c10, 4 @ drain WB 410 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 411 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs 412#endif /* CONFIG_MMU */ 413 mov pc, lr 414 415/* 416 * cpu_arm1020_set_pte(ptep, pte) 417 * 418 * Set a PTE and flush it out 419 */ 420 .align 5 421ENTRY(cpu_arm1020_set_pte_ext) 422#ifdef CONFIG_MMU 423 armv3_set_pte_ext 424 mov r0, r0 425#ifndef CONFIG_CPU_DCACHE_DISABLE 426 mcr p15, 0, r0, c7, c10, 4 427 mcr p15, 0, r0, c7, c10, 1 @ clean D entry 428#endif 429 mcr p15, 0, r0, c7, c10, 4 @ drain WB 430#endif /* CONFIG_MMU */ 431 mov pc, lr 432 433 __INIT 434 435 .type __arm1020_setup, #function 436__arm1020_setup: 437 mov r0, #0 438 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4 439 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4 440#ifdef CONFIG_MMU 441 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4 442#endif 443 444 adr r5, arm1020_crval 445 ldmia r5, {r5, r6} 446 mrc p15, 0, r0, c1, c0 @ get control register v4 447 bic r0, r0, r5 448 orr r0, r0, r6 449#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 450 orr r0, r0, #0x4000 @ .R.. .... .... .... 451#endif 452 mov pc, lr 453 .size __arm1020_setup, . - __arm1020_setup 454 455 /* 456 * R 457 * .RVI ZFRS BLDP WCAM 458 * .011 1001 ..11 0101 459 */ 460 .type arm1020_crval, #object 461arm1020_crval: 462 crval clear=0x0000593f, mmuset=0x00003935, ucset=0x00001930 463 464 __INITDATA 465 466/* 467 * Purpose : Function pointers used to access above functions - all calls 468 * come through these 469 */ 470 .type arm1020_processor_functions, #object 471arm1020_processor_functions: 472 .word v4t_early_abort 473 .word legacy_pabort 474 .word cpu_arm1020_proc_init 475 .word cpu_arm1020_proc_fin 476 .word cpu_arm1020_reset 477 .word cpu_arm1020_do_idle 478 .word cpu_arm1020_dcache_clean_area 479 .word cpu_arm1020_switch_mm 480 .word cpu_arm1020_set_pte_ext 481 .size arm1020_processor_functions, . - arm1020_processor_functions 482 483 .section ".rodata" 484 485 .type cpu_arch_name, #object 486cpu_arch_name: 487 .asciz "armv5t" 488 .size cpu_arch_name, . - cpu_arch_name 489 490 .type cpu_elf_name, #object 491cpu_elf_name: 492 .asciz "v5" 493 .size cpu_elf_name, . - cpu_elf_name 494 495 .type cpu_arm1020_name, #object 496cpu_arm1020_name: 497 .ascii "ARM1020" 498#ifndef CONFIG_CPU_ICACHE_DISABLE 499 .ascii "i" 500#endif 501#ifndef CONFIG_CPU_DCACHE_DISABLE 502 .ascii "d" 503#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH 504 .ascii "(wt)" 505#else 506 .ascii "(wb)" 507#endif 508#endif 509#ifndef CONFIG_CPU_BPREDICT_DISABLE 510 .ascii "B" 511#endif 512#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN 513 .ascii "RR" 514#endif 515 .ascii "\0" 516 .size cpu_arm1020_name, . - cpu_arm1020_name 517 518 .align 519 520 .section ".proc.info.init", #alloc, #execinstr 521 522 .type __arm1020_proc_info,#object 523__arm1020_proc_info: 524 .long 0x4104a200 @ ARM 1020T (Architecture v5T) 525 .long 0xff0ffff0 526 .long PMD_TYPE_SECT | \ 527 PMD_SECT_AP_WRITE | \ 528 PMD_SECT_AP_READ 529 .long PMD_TYPE_SECT | \ 530 PMD_SECT_AP_WRITE | \ 531 PMD_SECT_AP_READ 532 b __arm1020_setup 533 .long cpu_arch_name 534 .long cpu_elf_name 535 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB 536 .long cpu_arm1020_name 537 .long arm1020_processor_functions 538 .long v4wbi_tlb_fns 539 .long v4wb_user_fns 540 .long arm1020_cache_fns 541 .size __arm1020_proc_info, . - __arm1020_proc_info 542