1/* 2 * linux/arch/arm/mm/proc-xscale.S 3 * 4 * Author: Nicolas Pitre 5 * Created: November 2000 6 * Copyright: (C) 2000, 2001 MontaVista Software Inc. 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 * 12 * MMU functions for the Intel XScale CPUs 13 * 14 * 2001 Aug 21: 15 * some contributions by Brett Gaines <brett.w.gaines@intel.com> 16 * Copyright 2001 by Intel Corp. 17 * 18 * 2001 Sep 08: 19 * Completely revisited, many important fixes 20 * Nicolas Pitre <nico@fluxnic.net> 21 */ 22 23#include <linux/linkage.h> 24#include <linux/init.h> 25#include <asm/assembler.h> 26#include <asm/hwcap.h> 27#include <asm/pgtable.h> 28#include <asm/pgtable-hwdef.h> 29#include <asm/page.h> 30#include <asm/ptrace.h> 31#include "proc-macros.S" 32 33/* 34 * This is the maximum size of an area which will be flushed. If the area 35 * is larger than this, then we flush the whole cache 36 */ 37#define MAX_AREA_SIZE 32768 38 39/* 40 * the cache line size of the I and D cache 41 */ 42#define CACHELINESIZE 32 43 44/* 45 * the size of the data cache 46 */ 47#define CACHESIZE 32768 48 49/* 50 * Virtual address used to allocate the cache when flushed 51 * 52 * This must be an address range which is _never_ used. It should 53 * apparently have a mapping in the corresponding page table for 54 * compatibility with future CPUs that _could_ require it. For instance we 55 * don't care. 56 * 57 * This must be aligned on a 2*CACHESIZE boundary. The code selects one of 58 * the 2 areas in alternance each time the clean_d_cache macro is used. 59 * Without this the XScale core exhibits cache eviction problems and no one 60 * knows why. 61 * 62 * Reminder: the vector table is located at 0xffff0000-0xffff0fff. 63 */ 64#define CLEAN_ADDR 0xfffe0000 65 66/* 67 * This macro is used to wait for a CP15 write and is needed 68 * when we have to ensure that the last operation to the co-pro 69 * was completed before continuing with operation. 70 */ 71 .macro cpwait, rd 72 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 73 mov \rd, \rd @ wait for completion 74 sub pc, pc, #4 @ flush instruction pipeline 75 .endm 76 77 .macro cpwait_ret, lr, rd 78 mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15 79 sub pc, \lr, \rd, LSR #32 @ wait for completion and 80 @ flush instruction pipeline 81 .endm 82 83/* 84 * This macro cleans the entire dcache using line allocate. 85 * The main loop has been unrolled to reduce loop overhead. 86 * rd and rs are two scratch registers. 87 */ 88 .macro clean_d_cache, rd, rs 89 ldr \rs, =clean_addr 90 ldr \rd, [\rs] 91 eor \rd, \rd, #CACHESIZE 92 str \rd, [\rs] 93 add \rs, \rd, #CACHESIZE 941: mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 95 add \rd, \rd, #CACHELINESIZE 96 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 97 add \rd, \rd, #CACHELINESIZE 98 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 99 add \rd, \rd, #CACHELINESIZE 100 mcr p15, 0, \rd, c7, c2, 5 @ allocate D cache line 101 add \rd, \rd, #CACHELINESIZE 102 teq \rd, \rs 103 bne 1b 104 .endm 105 106 .data 107clean_addr: .word CLEAN_ADDR 108 109 .text 110 111/* 112 * cpu_xscale_proc_init() 113 * 114 * Nothing too exciting at the moment 115 */ 116ENTRY(cpu_xscale_proc_init) 117 @ enable write buffer coalescing. Some bootloader disable it 118 mrc p15, 0, r1, c1, c0, 1 119 bic r1, r1, #1 120 mcr p15, 0, r1, c1, c0, 1 121 mov pc, lr 122 123/* 124 * cpu_xscale_proc_fin() 125 */ 126ENTRY(cpu_xscale_proc_fin) 127 mrc p15, 0, r0, c1, c0, 0 @ ctrl register 128 bic r0, r0, #0x1800 @ ...IZ........... 129 bic r0, r0, #0x0006 @ .............CA. 130 mcr p15, 0, r0, c1, c0, 0 @ disable caches 131 mov pc, lr 132 133/* 134 * cpu_xscale_reset(loc) 135 * 136 * Perform a soft reset of the system. Put the CPU into the 137 * same state as it would be if it had been reset, and branch 138 * to what would be the reset vector. 139 * 140 * loc: location to jump to for soft reset 141 * 142 * Beware PXA270 erratum E7. 143 */ 144 .align 5 145ENTRY(cpu_xscale_reset) 146 mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE 147 msr cpsr_c, r1 @ reset CPSR 148 mcr p15, 0, r1, c10, c4, 1 @ unlock I-TLB 149 mcr p15, 0, r1, c8, c5, 0 @ invalidate I-TLB 150 mrc p15, 0, r1, c1, c0, 0 @ ctrl register 151 bic r1, r1, #0x0086 @ ........B....CA. 152 bic r1, r1, #0x3900 @ ..VIZ..S........ 153 sub pc, pc, #4 @ flush pipeline 154 @ *** cache line aligned *** 155 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 156 bic r1, r1, #0x0001 @ ...............M 157 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches & BTB 158 mcr p15, 0, r1, c1, c0, 0 @ ctrl register 159 @ CAUTION: MMU turned off from this point. We count on the pipeline 160 @ already containing those two last instructions to survive. 161 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 162 mov pc, r0 163 164/* 165 * cpu_xscale_do_idle() 166 * 167 * Cause the processor to idle 168 * 169 * For now we do nothing but go to idle mode for every case 170 * 171 * XScale supports clock switching, but using idle mode support 172 * allows external hardware to react to system state changes. 173 */ 174 .align 5 175 176ENTRY(cpu_xscale_do_idle) 177 mov r0, #1 178 mcr p14, 0, r0, c7, c0, 0 @ Go to IDLE 179 mov pc, lr 180 181/* ================================= CACHE ================================ */ 182 183/* 184 * flush_user_cache_all() 185 * 186 * Invalidate all cache entries in a particular address 187 * space. 188 */ 189ENTRY(xscale_flush_user_cache_all) 190 /* FALLTHROUGH */ 191 192/* 193 * flush_kern_cache_all() 194 * 195 * Clean and invalidate the entire cache. 196 */ 197ENTRY(xscale_flush_kern_cache_all) 198 mov r2, #VM_EXEC 199 mov ip, #0 200__flush_whole_cache: 201 clean_d_cache r0, r1 202 tst r2, #VM_EXEC 203 mcrne p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 204 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 205 mov pc, lr 206 207/* 208 * flush_user_cache_range(start, end, vm_flags) 209 * 210 * Invalidate a range of cache entries in the specified 211 * address space. 212 * 213 * - start - start address (may not be aligned) 214 * - end - end address (exclusive, may not be aligned) 215 * - vma - vma_area_struct describing address space 216 */ 217 .align 5 218ENTRY(xscale_flush_user_cache_range) 219 mov ip, #0 220 sub r3, r1, r0 @ calculate total size 221 cmp r3, #MAX_AREA_SIZE 222 bhs __flush_whole_cache 223 2241: tst r2, #VM_EXEC 225 mcrne p15, 0, r0, c7, c5, 1 @ Invalidate I cache line 226 mcr p15, 0, r0, c7, c10, 1 @ Clean D cache line 227 mcr p15, 0, r0, c7, c6, 1 @ Invalidate D cache line 228 add r0, r0, #CACHELINESIZE 229 cmp r0, r1 230 blo 1b 231 tst r2, #VM_EXEC 232 mcrne p15, 0, ip, c7, c5, 6 @ Invalidate BTB 233 mcrne p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 234 mov pc, lr 235 236/* 237 * coherent_kern_range(start, end) 238 * 239 * Ensure coherency between the Icache and the Dcache in the 240 * region described by start. If you have non-snooping 241 * Harvard caches, you need to implement this function. 242 * 243 * - start - virtual start address 244 * - end - virtual end address 245 * 246 * Note: single I-cache line invalidation isn't used here since 247 * it also trashes the mini I-cache used by JTAG debuggers. 248 */ 249ENTRY(xscale_coherent_kern_range) 250 bic r0, r0, #CACHELINESIZE - 1 2511: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 252 add r0, r0, #CACHELINESIZE 253 cmp r0, r1 254 blo 1b 255 mov r0, #0 256 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 257 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 258 mov pc, lr 259 260/* 261 * coherent_user_range(start, end) 262 * 263 * Ensure coherency between the Icache and the Dcache in the 264 * region described by start. If you have non-snooping 265 * Harvard caches, you need to implement this function. 266 * 267 * - start - virtual start address 268 * - end - virtual end address 269 */ 270ENTRY(xscale_coherent_user_range) 271 bic r0, r0, #CACHELINESIZE - 1 2721: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 273 mcr p15, 0, r0, c7, c5, 1 @ Invalidate I cache entry 274 add r0, r0, #CACHELINESIZE 275 cmp r0, r1 276 blo 1b 277 mov r0, #0 278 mcr p15, 0, r0, c7, c5, 6 @ Invalidate BTB 279 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 280 mov pc, lr 281 282/* 283 * flush_kern_dcache_area(void *addr, size_t size) 284 * 285 * Ensure no D cache aliasing occurs, either with itself or 286 * the I cache 287 * 288 * - addr - kernel address 289 * - size - region size 290 */ 291ENTRY(xscale_flush_kern_dcache_area) 292 add r1, r0, r1 2931: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 294 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 295 add r0, r0, #CACHELINESIZE 296 cmp r0, r1 297 blo 1b 298 mov r0, #0 299 mcr p15, 0, r0, c7, c5, 0 @ Invalidate I cache & BTB 300 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 301 mov pc, lr 302 303/* 304 * dma_inv_range(start, end) 305 * 306 * Invalidate (discard) the specified virtual address range. 307 * May not write back any entries. If 'start' or 'end' 308 * are not cache line aligned, those lines must be written 309 * back. 310 * 311 * - start - virtual start address 312 * - end - virtual end address 313 */ 314xscale_dma_inv_range: 315 tst r0, #CACHELINESIZE - 1 316 bic r0, r0, #CACHELINESIZE - 1 317 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 318 tst r1, #CACHELINESIZE - 1 319 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry 3201: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 321 add r0, r0, #CACHELINESIZE 322 cmp r0, r1 323 blo 1b 324 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 325 mov pc, lr 326 327/* 328 * dma_clean_range(start, end) 329 * 330 * Clean the specified virtual address range. 331 * 332 * - start - virtual start address 333 * - end - virtual end address 334 */ 335xscale_dma_clean_range: 336 bic r0, r0, #CACHELINESIZE - 1 3371: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 338 add r0, r0, #CACHELINESIZE 339 cmp r0, r1 340 blo 1b 341 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 342 mov pc, lr 343 344/* 345 * dma_flush_range(start, end) 346 * 347 * Clean and invalidate the specified virtual address range. 348 * 349 * - start - virtual start address 350 * - end - virtual end address 351 */ 352ENTRY(xscale_dma_flush_range) 353 bic r0, r0, #CACHELINESIZE - 1 3541: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 355 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry 356 add r0, r0, #CACHELINESIZE 357 cmp r0, r1 358 blo 1b 359 mcr p15, 0, r0, c7, c10, 4 @ Drain Write (& Fill) Buffer 360 mov pc, lr 361 362/* 363 * dma_map_area(start, size, dir) 364 * - start - kernel virtual start address 365 * - size - size of region 366 * - dir - DMA direction 367 */ 368ENTRY(xscale_dma_map_area) 369 add r1, r1, r0 370 cmp r2, #DMA_TO_DEVICE 371 beq xscale_dma_clean_range 372 bcs xscale_dma_inv_range 373 b xscale_dma_flush_range 374ENDPROC(xscale_dma_map_area) 375 376/* 377 * dma_map_area(start, size, dir) 378 * - start - kernel virtual start address 379 * - size - size of region 380 * - dir - DMA direction 381 */ 382ENTRY(xscale_dma_a0_map_area) 383 add r1, r1, r0 384 teq r2, #DMA_TO_DEVICE 385 beq xscale_dma_clean_range 386 b xscale_dma_flush_range 387ENDPROC(xscsale_dma_a0_map_area) 388 389/* 390 * dma_unmap_area(start, size, dir) 391 * - start - kernel virtual start address 392 * - size - size of region 393 * - dir - DMA direction 394 */ 395ENTRY(xscale_dma_unmap_area) 396 mov pc, lr 397ENDPROC(xscale_dma_unmap_area) 398 399ENTRY(xscale_cache_fns) 400 .long xscale_flush_kern_cache_all 401 .long xscale_flush_user_cache_all 402 .long xscale_flush_user_cache_range 403 .long xscale_coherent_kern_range 404 .long xscale_coherent_user_range 405 .long xscale_flush_kern_dcache_area 406 .long xscale_dma_map_area 407 .long xscale_dma_unmap_area 408 .long xscale_dma_flush_range 409 410ENTRY(xscale_80200_A0_A1_cache_fns) 411 .long xscale_flush_kern_cache_all 412 .long xscale_flush_user_cache_all 413 .long xscale_flush_user_cache_range 414 .long xscale_coherent_kern_range 415 .long xscale_coherent_user_range 416 .long xscale_flush_kern_dcache_area 417 .long xscale_dma_a0_map_area 418 .long xscale_dma_unmap_area 419 .long xscale_dma_flush_range 420 421ENTRY(cpu_xscale_dcache_clean_area) 4221: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 423 add r0, r0, #CACHELINESIZE 424 subs r1, r1, #CACHELINESIZE 425 bhi 1b 426 mov pc, lr 427 428/* =============================== PageTable ============================== */ 429 430/* 431 * cpu_xscale_switch_mm(pgd) 432 * 433 * Set the translation base pointer to be as described by pgd. 434 * 435 * pgd: new page tables 436 */ 437 .align 5 438ENTRY(cpu_xscale_switch_mm) 439 clean_d_cache r1, r2 440 mcr p15, 0, ip, c7, c5, 0 @ Invalidate I cache & BTB 441 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 442 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer 443 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs 444 cpwait_ret lr, ip 445 446/* 447 * cpu_xscale_set_pte_ext(ptep, pte, ext) 448 * 449 * Set a PTE and flush it out 450 * 451 * Errata 40: must set memory to write-through for user read-only pages. 452 */ 453cpu_xscale_mt_table: 454 .long 0x00 @ L_PTE_MT_UNCACHED 455 .long PTE_BUFFERABLE @ L_PTE_MT_BUFFERABLE 456 .long PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH 457 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK 458 .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED 459 .long 0x00 @ unused 460 .long PTE_EXT_TEX(1) | PTE_CACHEABLE @ L_PTE_MT_MINICACHE 461 .long PTE_EXT_TEX(1) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC 462 .long 0x00 @ unused 463 .long PTE_BUFFERABLE @ L_PTE_MT_DEV_WC 464 .long 0x00 @ unused 465 .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED 466 .long 0x00 @ L_PTE_MT_DEV_NONSHARED 467 .long 0x00 @ unused 468 .long 0x00 @ unused 469 .long 0x00 @ unused 470 471 .align 5 472ENTRY(cpu_xscale_set_pte_ext) 473 xscale_set_pte_ext_prologue 474 475 @ 476 @ Erratum 40: must set memory to write-through for user read-only pages 477 @ 478 and ip, r1, #(L_PTE_MT_MASK | L_PTE_USER | L_PTE_WRITE) & ~(4 << 2) 479 teq ip, #L_PTE_MT_WRITEBACK | L_PTE_USER 480 481 moveq r1, #L_PTE_MT_WRITETHROUGH 482 and r1, r1, #L_PTE_MT_MASK 483 adr ip, cpu_xscale_mt_table 484 ldr ip, [ip, r1] 485 bic r2, r2, #0x0c 486 orr r2, r2, ip 487 488 xscale_set_pte_ext_epilogue 489 mov pc, lr 490 491 492 .ltorg 493 494 .align 495 496 __INIT 497 498 .type __xscale_setup, #function 499__xscale_setup: 500 mcr p15, 0, ip, c7, c7, 0 @ invalidate I, D caches & BTB 501 mcr p15, 0, ip, c7, c10, 4 @ Drain Write (& Fill) Buffer 502 mcr p15, 0, ip, c8, c7, 0 @ invalidate I, D TLBs 503 mov r0, #1 << 6 @ cp6 for IOP3xx and Bulverde 504 orr r0, r0, #1 << 13 @ Its undefined whether this 505 mcr p15, 0, r0, c15, c1, 0 @ affects USR or SVC modes 506 507 adr r5, xscale_crval 508 ldmia r5, {r5, r6} 509 mrc p15, 0, r0, c1, c0, 0 @ get control register 510 bic r0, r0, r5 511 orr r0, r0, r6 512 mov pc, lr 513 .size __xscale_setup, . - __xscale_setup 514 515 /* 516 * R 517 * .RVI ZFRS BLDP WCAM 518 * ..11 1.01 .... .101 519 * 520 */ 521 .type xscale_crval, #object 522xscale_crval: 523 crval clear=0x00003b07, mmuset=0x00003905, ucset=0x00001900 524 525 __INITDATA 526 527/* 528 * Purpose : Function pointers used to access above functions - all calls 529 * come through these 530 */ 531 532 .type xscale_processor_functions, #object 533ENTRY(xscale_processor_functions) 534 .word v5t_early_abort 535 .word legacy_pabort 536 .word cpu_xscale_proc_init 537 .word cpu_xscale_proc_fin 538 .word cpu_xscale_reset 539 .word cpu_xscale_do_idle 540 .word cpu_xscale_dcache_clean_area 541 .word cpu_xscale_switch_mm 542 .word cpu_xscale_set_pte_ext 543 .size xscale_processor_functions, . - xscale_processor_functions 544 545 .section ".rodata" 546 547 .type cpu_arch_name, #object 548cpu_arch_name: 549 .asciz "armv5te" 550 .size cpu_arch_name, . - cpu_arch_name 551 552 .type cpu_elf_name, #object 553cpu_elf_name: 554 .asciz "v5" 555 .size cpu_elf_name, . - cpu_elf_name 556 557 .type cpu_80200_A0_A1_name, #object 558cpu_80200_A0_A1_name: 559 .asciz "XScale-80200 A0/A1" 560 .size cpu_80200_A0_A1_name, . - cpu_80200_A0_A1_name 561 562 .type cpu_80200_name, #object 563cpu_80200_name: 564 .asciz "XScale-80200" 565 .size cpu_80200_name, . - cpu_80200_name 566 567 .type cpu_80219_name, #object 568cpu_80219_name: 569 .asciz "XScale-80219" 570 .size cpu_80219_name, . - cpu_80219_name 571 572 .type cpu_8032x_name, #object 573cpu_8032x_name: 574 .asciz "XScale-IOP8032x Family" 575 .size cpu_8032x_name, . - cpu_8032x_name 576 577 .type cpu_8033x_name, #object 578cpu_8033x_name: 579 .asciz "XScale-IOP8033x Family" 580 .size cpu_8033x_name, . - cpu_8033x_name 581 582 .type cpu_pxa250_name, #object 583cpu_pxa250_name: 584 .asciz "XScale-PXA250" 585 .size cpu_pxa250_name, . - cpu_pxa250_name 586 587 .type cpu_pxa210_name, #object 588cpu_pxa210_name: 589 .asciz "XScale-PXA210" 590 .size cpu_pxa210_name, . - cpu_pxa210_name 591 592 .type cpu_ixp42x_name, #object 593cpu_ixp42x_name: 594 .asciz "XScale-IXP42x Family" 595 .size cpu_ixp42x_name, . - cpu_ixp42x_name 596 597 .type cpu_ixp43x_name, #object 598cpu_ixp43x_name: 599 .asciz "XScale-IXP43x Family" 600 .size cpu_ixp43x_name, . - cpu_ixp43x_name 601 602 .type cpu_ixp46x_name, #object 603cpu_ixp46x_name: 604 .asciz "XScale-IXP46x Family" 605 .size cpu_ixp46x_name, . - cpu_ixp46x_name 606 607 .type cpu_ixp2400_name, #object 608cpu_ixp2400_name: 609 .asciz "XScale-IXP2400" 610 .size cpu_ixp2400_name, . - cpu_ixp2400_name 611 612 .type cpu_ixp2800_name, #object 613cpu_ixp2800_name: 614 .asciz "XScale-IXP2800" 615 .size cpu_ixp2800_name, . - cpu_ixp2800_name 616 617 .type cpu_pxa255_name, #object 618cpu_pxa255_name: 619 .asciz "XScale-PXA255" 620 .size cpu_pxa255_name, . - cpu_pxa255_name 621 622 .type cpu_pxa270_name, #object 623cpu_pxa270_name: 624 .asciz "XScale-PXA270" 625 .size cpu_pxa270_name, . - cpu_pxa270_name 626 627 .align 628 629 .section ".proc.info.init", #alloc, #execinstr 630 631 .type __80200_A0_A1_proc_info,#object 632__80200_A0_A1_proc_info: 633 .long 0x69052000 634 .long 0xfffffffe 635 .long PMD_TYPE_SECT | \ 636 PMD_SECT_BUFFERABLE | \ 637 PMD_SECT_CACHEABLE | \ 638 PMD_SECT_AP_WRITE | \ 639 PMD_SECT_AP_READ 640 .long PMD_TYPE_SECT | \ 641 PMD_SECT_AP_WRITE | \ 642 PMD_SECT_AP_READ 643 b __xscale_setup 644 .long cpu_arch_name 645 .long cpu_elf_name 646 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 647 .long cpu_80200_name 648 .long xscale_processor_functions 649 .long v4wbi_tlb_fns 650 .long xscale_mc_user_fns 651 .long xscale_80200_A0_A1_cache_fns 652 .size __80200_A0_A1_proc_info, . - __80200_A0_A1_proc_info 653 654 .type __80200_proc_info,#object 655__80200_proc_info: 656 .long 0x69052000 657 .long 0xfffffff0 658 .long PMD_TYPE_SECT | \ 659 PMD_SECT_BUFFERABLE | \ 660 PMD_SECT_CACHEABLE | \ 661 PMD_SECT_AP_WRITE | \ 662 PMD_SECT_AP_READ 663 .long PMD_TYPE_SECT | \ 664 PMD_SECT_AP_WRITE | \ 665 PMD_SECT_AP_READ 666 b __xscale_setup 667 .long cpu_arch_name 668 .long cpu_elf_name 669 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 670 .long cpu_80200_name 671 .long xscale_processor_functions 672 .long v4wbi_tlb_fns 673 .long xscale_mc_user_fns 674 .long xscale_cache_fns 675 .size __80200_proc_info, . - __80200_proc_info 676 677 .type __80219_proc_info,#object 678__80219_proc_info: 679 .long 0x69052e20 680 .long 0xffffffe0 681 .long PMD_TYPE_SECT | \ 682 PMD_SECT_BUFFERABLE | \ 683 PMD_SECT_CACHEABLE | \ 684 PMD_SECT_AP_WRITE | \ 685 PMD_SECT_AP_READ 686 .long PMD_TYPE_SECT | \ 687 PMD_SECT_AP_WRITE | \ 688 PMD_SECT_AP_READ 689 b __xscale_setup 690 .long cpu_arch_name 691 .long cpu_elf_name 692 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 693 .long cpu_80219_name 694 .long xscale_processor_functions 695 .long v4wbi_tlb_fns 696 .long xscale_mc_user_fns 697 .long xscale_cache_fns 698 .size __80219_proc_info, . - __80219_proc_info 699 700 .type __8032x_proc_info,#object 701__8032x_proc_info: 702 .long 0x69052420 703 .long 0xfffff7e0 704 .long PMD_TYPE_SECT | \ 705 PMD_SECT_BUFFERABLE | \ 706 PMD_SECT_CACHEABLE | \ 707 PMD_SECT_AP_WRITE | \ 708 PMD_SECT_AP_READ 709 .long PMD_TYPE_SECT | \ 710 PMD_SECT_AP_WRITE | \ 711 PMD_SECT_AP_READ 712 b __xscale_setup 713 .long cpu_arch_name 714 .long cpu_elf_name 715 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 716 .long cpu_8032x_name 717 .long xscale_processor_functions 718 .long v4wbi_tlb_fns 719 .long xscale_mc_user_fns 720 .long xscale_cache_fns 721 .size __8032x_proc_info, . - __8032x_proc_info 722 723 .type __8033x_proc_info,#object 724__8033x_proc_info: 725 .long 0x69054010 726 .long 0xfffffd30 727 .long PMD_TYPE_SECT | \ 728 PMD_SECT_BUFFERABLE | \ 729 PMD_SECT_CACHEABLE | \ 730 PMD_SECT_AP_WRITE | \ 731 PMD_SECT_AP_READ 732 .long PMD_TYPE_SECT | \ 733 PMD_SECT_AP_WRITE | \ 734 PMD_SECT_AP_READ 735 b __xscale_setup 736 .long cpu_arch_name 737 .long cpu_elf_name 738 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 739 .long cpu_8033x_name 740 .long xscale_processor_functions 741 .long v4wbi_tlb_fns 742 .long xscale_mc_user_fns 743 .long xscale_cache_fns 744 .size __8033x_proc_info, . - __8033x_proc_info 745 746 .type __pxa250_proc_info,#object 747__pxa250_proc_info: 748 .long 0x69052100 749 .long 0xfffff7f0 750 .long PMD_TYPE_SECT | \ 751 PMD_SECT_BUFFERABLE | \ 752 PMD_SECT_CACHEABLE | \ 753 PMD_SECT_AP_WRITE | \ 754 PMD_SECT_AP_READ 755 .long PMD_TYPE_SECT | \ 756 PMD_SECT_AP_WRITE | \ 757 PMD_SECT_AP_READ 758 b __xscale_setup 759 .long cpu_arch_name 760 .long cpu_elf_name 761 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 762 .long cpu_pxa250_name 763 .long xscale_processor_functions 764 .long v4wbi_tlb_fns 765 .long xscale_mc_user_fns 766 .long xscale_cache_fns 767 .size __pxa250_proc_info, . - __pxa250_proc_info 768 769 .type __pxa210_proc_info,#object 770__pxa210_proc_info: 771 .long 0x69052120 772 .long 0xfffff3f0 773 .long PMD_TYPE_SECT | \ 774 PMD_SECT_BUFFERABLE | \ 775 PMD_SECT_CACHEABLE | \ 776 PMD_SECT_AP_WRITE | \ 777 PMD_SECT_AP_READ 778 .long PMD_TYPE_SECT | \ 779 PMD_SECT_AP_WRITE | \ 780 PMD_SECT_AP_READ 781 b __xscale_setup 782 .long cpu_arch_name 783 .long cpu_elf_name 784 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 785 .long cpu_pxa210_name 786 .long xscale_processor_functions 787 .long v4wbi_tlb_fns 788 .long xscale_mc_user_fns 789 .long xscale_cache_fns 790 .size __pxa210_proc_info, . - __pxa210_proc_info 791 792 .type __ixp2400_proc_info, #object 793__ixp2400_proc_info: 794 .long 0x69054190 795 .long 0xfffffff0 796 .long PMD_TYPE_SECT | \ 797 PMD_SECT_BUFFERABLE | \ 798 PMD_SECT_CACHEABLE | \ 799 PMD_SECT_AP_WRITE | \ 800 PMD_SECT_AP_READ 801 .long PMD_TYPE_SECT | \ 802 PMD_SECT_AP_WRITE | \ 803 PMD_SECT_AP_READ 804 b __xscale_setup 805 .long cpu_arch_name 806 .long cpu_elf_name 807 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 808 .long cpu_ixp2400_name 809 .long xscale_processor_functions 810 .long v4wbi_tlb_fns 811 .long xscale_mc_user_fns 812 .long xscale_cache_fns 813 .size __ixp2400_proc_info, . - __ixp2400_proc_info 814 815 .type __ixp2800_proc_info, #object 816__ixp2800_proc_info: 817 .long 0x690541a0 818 .long 0xfffffff0 819 .long PMD_TYPE_SECT | \ 820 PMD_SECT_BUFFERABLE | \ 821 PMD_SECT_CACHEABLE | \ 822 PMD_SECT_AP_WRITE | \ 823 PMD_SECT_AP_READ 824 .long PMD_TYPE_SECT | \ 825 PMD_SECT_AP_WRITE | \ 826 PMD_SECT_AP_READ 827 b __xscale_setup 828 .long cpu_arch_name 829 .long cpu_elf_name 830 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 831 .long cpu_ixp2800_name 832 .long xscale_processor_functions 833 .long v4wbi_tlb_fns 834 .long xscale_mc_user_fns 835 .long xscale_cache_fns 836 .size __ixp2800_proc_info, . - __ixp2800_proc_info 837 838 .type __ixp42x_proc_info, #object 839__ixp42x_proc_info: 840 .long 0x690541c0 841 .long 0xffffffc0 842 .long PMD_TYPE_SECT | \ 843 PMD_SECT_BUFFERABLE | \ 844 PMD_SECT_CACHEABLE | \ 845 PMD_SECT_AP_WRITE | \ 846 PMD_SECT_AP_READ 847 .long PMD_TYPE_SECT | \ 848 PMD_SECT_AP_WRITE | \ 849 PMD_SECT_AP_READ 850 b __xscale_setup 851 .long cpu_arch_name 852 .long cpu_elf_name 853 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 854 .long cpu_ixp42x_name 855 .long xscale_processor_functions 856 .long v4wbi_tlb_fns 857 .long xscale_mc_user_fns 858 .long xscale_cache_fns 859 .size __ixp42x_proc_info, . - __ixp42x_proc_info 860 861 .type __ixp43x_proc_info, #object 862__ixp43x_proc_info: 863 .long 0x69054040 864 .long 0xfffffff0 865 .long PMD_TYPE_SECT | \ 866 PMD_SECT_BUFFERABLE | \ 867 PMD_SECT_CACHEABLE | \ 868 PMD_SECT_AP_WRITE | \ 869 PMD_SECT_AP_READ 870 .long PMD_TYPE_SECT | \ 871 PMD_SECT_AP_WRITE | \ 872 PMD_SECT_AP_READ 873 b __xscale_setup 874 .long cpu_arch_name 875 .long cpu_elf_name 876 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 877 .long cpu_ixp43x_name 878 .long xscale_processor_functions 879 .long v4wbi_tlb_fns 880 .long xscale_mc_user_fns 881 .long xscale_cache_fns 882 .size __ixp43x_proc_info, . - __ixp43x_proc_info 883 884 .type __ixp46x_proc_info, #object 885__ixp46x_proc_info: 886 .long 0x69054200 887 .long 0xffffff00 888 .long PMD_TYPE_SECT | \ 889 PMD_SECT_BUFFERABLE | \ 890 PMD_SECT_CACHEABLE | \ 891 PMD_SECT_AP_WRITE | \ 892 PMD_SECT_AP_READ 893 .long PMD_TYPE_SECT | \ 894 PMD_SECT_AP_WRITE | \ 895 PMD_SECT_AP_READ 896 b __xscale_setup 897 .long cpu_arch_name 898 .long cpu_elf_name 899 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 900 .long cpu_ixp46x_name 901 .long xscale_processor_functions 902 .long v4wbi_tlb_fns 903 .long xscale_mc_user_fns 904 .long xscale_cache_fns 905 .size __ixp46x_proc_info, . - __ixp46x_proc_info 906 907 .type __pxa255_proc_info,#object 908__pxa255_proc_info: 909 .long 0x69052d00 910 .long 0xfffffff0 911 .long PMD_TYPE_SECT | \ 912 PMD_SECT_BUFFERABLE | \ 913 PMD_SECT_CACHEABLE | \ 914 PMD_SECT_AP_WRITE | \ 915 PMD_SECT_AP_READ 916 .long PMD_TYPE_SECT | \ 917 PMD_SECT_AP_WRITE | \ 918 PMD_SECT_AP_READ 919 b __xscale_setup 920 .long cpu_arch_name 921 .long cpu_elf_name 922 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 923 .long cpu_pxa255_name 924 .long xscale_processor_functions 925 .long v4wbi_tlb_fns 926 .long xscale_mc_user_fns 927 .long xscale_cache_fns 928 .size __pxa255_proc_info, . - __pxa255_proc_info 929 930 .type __pxa270_proc_info,#object 931__pxa270_proc_info: 932 .long 0x69054110 933 .long 0xfffffff0 934 .long PMD_TYPE_SECT | \ 935 PMD_SECT_BUFFERABLE | \ 936 PMD_SECT_CACHEABLE | \ 937 PMD_SECT_AP_WRITE | \ 938 PMD_SECT_AP_READ 939 .long PMD_TYPE_SECT | \ 940 PMD_SECT_AP_WRITE | \ 941 PMD_SECT_AP_READ 942 b __xscale_setup 943 .long cpu_arch_name 944 .long cpu_elf_name 945 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 946 .long cpu_pxa270_name 947 .long xscale_processor_functions 948 .long v4wbi_tlb_fns 949 .long xscale_mc_user_fns 950 .long xscale_cache_fns 951 .size __pxa270_proc_info, . - __pxa270_proc_info 952