1/* 2 * srmmu.c: SRMMU specific routines for memory management. 3 * 4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) 5 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com) 6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) 7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) 8 * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org) 9 */ 10 11#include <linux/kernel.h> 12#include <linux/mm.h> 13#include <linux/vmalloc.h> 14#include <linux/pagemap.h> 15#include <linux/init.h> 16#include <linux/spinlock.h> 17#include <linux/bootmem.h> 18#include <linux/fs.h> 19#include <linux/seq_file.h> 20#include <linux/kdebug.h> 21#include <linux/log2.h> 22#include <linux/gfp.h> 23 24#include <asm/bitext.h> 25#include <asm/page.h> 26#include <asm/pgalloc.h> 27#include <asm/pgtable.h> 28#include <asm/io.h> 29#include <asm/vaddrs.h> 30#include <asm/traps.h> 31#include <asm/smp.h> 32#include <asm/mbus.h> 33#include <asm/cache.h> 34#include <asm/oplib.h> 35#include <asm/asi.h> 36#include <asm/msi.h> 37#include <asm/mmu_context.h> 38#include <asm/io-unit.h> 39#include <asm/cacheflush.h> 40#include <asm/tlbflush.h> 41 42/* Now the cpu specific definitions. */ 43#include <asm/viking.h> 44#include <asm/mxcc.h> 45#include <asm/ross.h> 46#include <asm/tsunami.h> 47#include <asm/swift.h> 48#include <asm/turbosparc.h> 49#include <asm/leon.h> 50 51#include <asm/btfixup.h> 52 53enum mbus_module srmmu_modtype; 54static unsigned int hwbug_bitmask; 55int vac_cache_size; 56int vac_line_size; 57 58extern struct resource sparc_iomap; 59 60extern unsigned long last_valid_pfn; 61 62extern unsigned long page_kernel; 63 64static pgd_t *srmmu_swapper_pg_dir; 65 66#ifdef CONFIG_SMP 67#define FLUSH_BEGIN(mm) 68#define FLUSH_END 69#else 70#define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) { 71#define FLUSH_END } 72#endif 73 74BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long) 75#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page) 76 77int flush_page_for_dma_global = 1; 78 79#ifdef CONFIG_SMP 80BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long) 81#define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page) 82#endif 83 84char *srmmu_name; 85 86ctxd_t *srmmu_ctx_table_phys; 87static ctxd_t *srmmu_context_table; 88 89int viking_mxcc_present; 90static DEFINE_SPINLOCK(srmmu_context_spinlock); 91 92static int is_hypersparc; 93 94/* 95 * In general all page table modifications should use the V8 atomic 96 * swap instruction. This insures the mmu and the cpu are in sync 97 * with respect to ref/mod bits in the page tables. 98 */ 99static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value) 100{ 101 __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr)); 102 return value; 103} 104 105static inline void srmmu_set_pte(pte_t *ptep, pte_t pteval) 106{ 107 srmmu_swap((unsigned long *)ptep, pte_val(pteval)); 108} 109 110/* The very generic SRMMU page table operations. */ 111static inline int srmmu_device_memory(unsigned long x) 112{ 113 return ((x & 0xF0000000) != 0); 114} 115 116static int srmmu_cache_pagetables; 117 118/* these will be initialized in srmmu_nocache_calcsize() */ 119static unsigned long srmmu_nocache_size; 120static unsigned long srmmu_nocache_end; 121 122/* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */ 123#define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4) 124 125/* The context table is a nocache user with the biggest alignment needs. */ 126#define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS) 127 128void *srmmu_nocache_pool; 129void *srmmu_nocache_bitmap; 130static struct bit_map srmmu_nocache_map; 131 132static unsigned long srmmu_pte_pfn(pte_t pte) 133{ 134 if (srmmu_device_memory(pte_val(pte))) { 135 /* Just return something that will cause 136 * pfn_valid() to return false. This makes 137 * copy_one_pte() to just directly copy to 138 * PTE over. 139 */ 140 return ~0UL; 141 } 142 return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4); 143} 144 145static struct page *srmmu_pmd_page(pmd_t pmd) 146{ 147 148 if (srmmu_device_memory(pmd_val(pmd))) 149 BUG(); 150 return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4)); 151} 152 153static inline unsigned long srmmu_pgd_page(pgd_t pgd) 154{ return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); } 155 156 157static inline int srmmu_pte_none(pte_t pte) 158{ return !(pte_val(pte) & 0xFFFFFFF); } 159 160static inline int srmmu_pte_present(pte_t pte) 161{ return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); } 162 163static inline void srmmu_pte_clear(pte_t *ptep) 164{ srmmu_set_pte(ptep, __pte(0)); } 165 166static inline int srmmu_pmd_none(pmd_t pmd) 167{ return !(pmd_val(pmd) & 0xFFFFFFF); } 168 169static inline int srmmu_pmd_bad(pmd_t pmd) 170{ return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; } 171 172static inline int srmmu_pmd_present(pmd_t pmd) 173{ return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); } 174 175static inline void srmmu_pmd_clear(pmd_t *pmdp) { 176 int i; 177 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) 178 srmmu_set_pte((pte_t *)&pmdp->pmdv[i], __pte(0)); 179} 180 181static inline int srmmu_pgd_none(pgd_t pgd) 182{ return !(pgd_val(pgd) & 0xFFFFFFF); } 183 184static inline int srmmu_pgd_bad(pgd_t pgd) 185{ return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; } 186 187static inline int srmmu_pgd_present(pgd_t pgd) 188{ return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); } 189 190static inline void srmmu_pgd_clear(pgd_t * pgdp) 191{ srmmu_set_pte((pte_t *)pgdp, __pte(0)); } 192 193static inline pte_t srmmu_pte_wrprotect(pte_t pte) 194{ return __pte(pte_val(pte) & ~SRMMU_WRITE);} 195 196static inline pte_t srmmu_pte_mkclean(pte_t pte) 197{ return __pte(pte_val(pte) & ~SRMMU_DIRTY);} 198 199static inline pte_t srmmu_pte_mkold(pte_t pte) 200{ return __pte(pte_val(pte) & ~SRMMU_REF);} 201 202static inline pte_t srmmu_pte_mkwrite(pte_t pte) 203{ return __pte(pte_val(pte) | SRMMU_WRITE);} 204 205static inline pte_t srmmu_pte_mkdirty(pte_t pte) 206{ return __pte(pte_val(pte) | SRMMU_DIRTY);} 207 208static inline pte_t srmmu_pte_mkyoung(pte_t pte) 209{ return __pte(pte_val(pte) | SRMMU_REF);} 210 211/* 212 * Conversion functions: convert a page and protection to a page entry, 213 * and a page entry and page directory to the page they refer to. 214 */ 215static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot) 216{ return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); } 217 218static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot) 219{ return __pte(((page) >> 4) | pgprot_val(pgprot)); } 220 221static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space) 222{ return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot)); } 223 224static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) 225{ srmmu_set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); } 226 227static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp) 228{ srmmu_set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); } 229 230static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep) 231{ 232 unsigned long ptp; /* Physical address, shifted right by 4 */ 233 int i; 234 235 ptp = __nocache_pa((unsigned long) ptep) >> 4; 236 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { 237 srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); 238 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); 239 } 240} 241 242static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep) 243{ 244 unsigned long ptp; /* Physical address, shifted right by 4 */ 245 int i; 246 247 ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */ 248 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { 249 srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); 250 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); 251 } 252} 253 254static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot) 255{ return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); } 256 257/* to find an entry in a top-level page table... */ 258static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address) 259{ return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); } 260 261/* Find an entry in the second-level page table.. */ 262static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address) 263{ 264 return (pmd_t *) srmmu_pgd_page(*dir) + 265 ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); 266} 267 268/* Find an entry in the third-level page table.. */ 269static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address) 270{ 271 void *pte; 272 273 pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4); 274 return (pte_t *) pte + 275 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); 276} 277 278static unsigned long srmmu_swp_type(swp_entry_t entry) 279{ 280 return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK; 281} 282 283static unsigned long srmmu_swp_offset(swp_entry_t entry) 284{ 285 return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK; 286} 287 288static swp_entry_t srmmu_swp_entry(unsigned long type, unsigned long offset) 289{ 290 return (swp_entry_t) { 291 (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT 292 | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT }; 293} 294 295/* 296 * size: bytes to allocate in the nocache area. 297 * align: bytes, number to align at. 298 * Returns the virtual address of the allocated area. 299 */ 300static unsigned long __srmmu_get_nocache(int size, int align) 301{ 302 int offset; 303 304 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { 305 printk("Size 0x%x too small for nocache request\n", size); 306 size = SRMMU_NOCACHE_BITMAP_SHIFT; 307 } 308 if (size & (SRMMU_NOCACHE_BITMAP_SHIFT-1)) { 309 printk("Size 0x%x unaligned int nocache request\n", size); 310 size += SRMMU_NOCACHE_BITMAP_SHIFT-1; 311 } 312 BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX); 313 314 offset = bit_map_string_get(&srmmu_nocache_map, 315 size >> SRMMU_NOCACHE_BITMAP_SHIFT, 316 align >> SRMMU_NOCACHE_BITMAP_SHIFT); 317 if (offset == -1) { 318 printk("srmmu: out of nocache %d: %d/%d\n", 319 size, (int) srmmu_nocache_size, 320 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); 321 return 0; 322 } 323 324 return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT)); 325} 326 327static unsigned long srmmu_get_nocache(int size, int align) 328{ 329 unsigned long tmp; 330 331 tmp = __srmmu_get_nocache(size, align); 332 333 if (tmp) 334 memset((void *)tmp, 0, size); 335 336 return tmp; 337} 338 339static void srmmu_free_nocache(unsigned long vaddr, int size) 340{ 341 int offset; 342 343 if (vaddr < SRMMU_NOCACHE_VADDR) { 344 printk("Vaddr %lx is smaller than nocache base 0x%lx\n", 345 vaddr, (unsigned long)SRMMU_NOCACHE_VADDR); 346 BUG(); 347 } 348 if (vaddr+size > srmmu_nocache_end) { 349 printk("Vaddr %lx is bigger than nocache end 0x%lx\n", 350 vaddr, srmmu_nocache_end); 351 BUG(); 352 } 353 if (!is_power_of_2(size)) { 354 printk("Size 0x%x is not a power of 2\n", size); 355 BUG(); 356 } 357 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { 358 printk("Size 0x%x is too small\n", size); 359 BUG(); 360 } 361 if (vaddr & (size-1)) { 362 printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size); 363 BUG(); 364 } 365 366 offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT; 367 size = size >> SRMMU_NOCACHE_BITMAP_SHIFT; 368 369 bit_map_clear(&srmmu_nocache_map, offset, size); 370} 371 372static void srmmu_early_allocate_ptable_skeleton(unsigned long start, 373 unsigned long end); 374 375extern unsigned long probe_memory(void); /* in fault.c */ 376 377/* 378 * Reserve nocache dynamically proportionally to the amount of 379 * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002 380 */ 381static void srmmu_nocache_calcsize(void) 382{ 383 unsigned long sysmemavail = probe_memory() / 1024; 384 int srmmu_nocache_npages; 385 386 srmmu_nocache_npages = 387 sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256; 388 389 // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256; 390 if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES) 391 srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES; 392 393 /* anything above 1280 blows up */ 394 if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES) 395 srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES; 396 397 srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE; 398 srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size; 399} 400 401static void __init srmmu_nocache_init(void) 402{ 403 unsigned int bitmap_bits; 404 pgd_t *pgd; 405 pmd_t *pmd; 406 pte_t *pte; 407 unsigned long paddr, vaddr; 408 unsigned long pteval; 409 410 bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT; 411 412 srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size, 413 SRMMU_NOCACHE_ALIGN_MAX, 0UL); 414 memset(srmmu_nocache_pool, 0, srmmu_nocache_size); 415 416 srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL); 417 bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); 418 419 srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); 420 memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE); 421 init_mm.pgd = srmmu_swapper_pg_dir; 422 423 srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end); 424 425 paddr = __pa((unsigned long)srmmu_nocache_pool); 426 vaddr = SRMMU_NOCACHE_VADDR; 427 428 while (vaddr < srmmu_nocache_end) { 429 pgd = pgd_offset_k(vaddr); 430 pmd = srmmu_pmd_offset(__nocache_fix(pgd), vaddr); 431 pte = srmmu_pte_offset(__nocache_fix(pmd), vaddr); 432 433 pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV); 434 435 if (srmmu_cache_pagetables) 436 pteval |= SRMMU_CACHE; 437 438 srmmu_set_pte(__nocache_fix(pte), __pte(pteval)); 439 440 vaddr += PAGE_SIZE; 441 paddr += PAGE_SIZE; 442 } 443 444 flush_cache_all(); 445 flush_tlb_all(); 446} 447 448static inline pgd_t *srmmu_get_pgd_fast(void) 449{ 450 pgd_t *pgd = NULL; 451 452 pgd = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); 453 if (pgd) { 454 pgd_t *init = pgd_offset_k(0); 455 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); 456 memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, 457 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); 458 } 459 460 return pgd; 461} 462 463static void srmmu_free_pgd_fast(pgd_t *pgd) 464{ 465 srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE); 466} 467 468static pmd_t *srmmu_pmd_alloc_one(struct mm_struct *mm, unsigned long address) 469{ 470 return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); 471} 472 473static void srmmu_pmd_free(pmd_t * pmd) 474{ 475 srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE); 476} 477 478static pte_t * 479srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) 480{ 481 return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE); 482} 483 484static pgtable_t 485srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address) 486{ 487 unsigned long pte; 488 struct page *page; 489 490 if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0) 491 return NULL; 492 page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT ); 493 pgtable_page_ctor(page); 494 return page; 495} 496 497static void srmmu_free_pte_fast(pte_t *pte) 498{ 499 srmmu_free_nocache((unsigned long)pte, PTE_SIZE); 500} 501 502static void srmmu_pte_free(pgtable_t pte) 503{ 504 unsigned long p; 505 506 pgtable_page_dtor(pte); 507 p = (unsigned long)page_address(pte); /* Cached address (for test) */ 508 if (p == 0) 509 BUG(); 510 p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */ 511 p = (unsigned long) __nocache_va(p); /* Nocached virtual */ 512 srmmu_free_nocache(p, PTE_SIZE); 513} 514 515/* 516 */ 517static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm) 518{ 519 struct ctx_list *ctxp; 520 521 ctxp = ctx_free.next; 522 if(ctxp != &ctx_free) { 523 remove_from_ctx_list(ctxp); 524 add_to_used_ctxlist(ctxp); 525 mm->context = ctxp->ctx_number; 526 ctxp->ctx_mm = mm; 527 return; 528 } 529 ctxp = ctx_used.next; 530 if(ctxp->ctx_mm == old_mm) 531 ctxp = ctxp->next; 532 if(ctxp == &ctx_used) 533 panic("out of mmu contexts"); 534 flush_cache_mm(ctxp->ctx_mm); 535 flush_tlb_mm(ctxp->ctx_mm); 536 remove_from_ctx_list(ctxp); 537 add_to_used_ctxlist(ctxp); 538 ctxp->ctx_mm->context = NO_CONTEXT; 539 ctxp->ctx_mm = mm; 540 mm->context = ctxp->ctx_number; 541} 542 543static inline void free_context(int context) 544{ 545 struct ctx_list *ctx_old; 546 547 ctx_old = ctx_list_pool + context; 548 remove_from_ctx_list(ctx_old); 549 add_to_free_ctxlist(ctx_old); 550} 551 552 553static void srmmu_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, 554 struct task_struct *tsk, int cpu) 555{ 556 if(mm->context == NO_CONTEXT) { 557 spin_lock(&srmmu_context_spinlock); 558 alloc_context(old_mm, mm); 559 spin_unlock(&srmmu_context_spinlock); 560 srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd); 561 } 562 563 if (sparc_cpu_model == sparc_leon) 564 leon_switch_mm(); 565 566 if (is_hypersparc) 567 hyper_flush_whole_icache(); 568 569 srmmu_set_context(mm->context); 570} 571 572/* Low level IO area allocation on the SRMMU. */ 573static inline void srmmu_mapioaddr(unsigned long physaddr, 574 unsigned long virt_addr, int bus_type) 575{ 576 pgd_t *pgdp; 577 pmd_t *pmdp; 578 pte_t *ptep; 579 unsigned long tmp; 580 581 physaddr &= PAGE_MASK; 582 pgdp = pgd_offset_k(virt_addr); 583 pmdp = srmmu_pmd_offset(pgdp, virt_addr); 584 ptep = srmmu_pte_offset(pmdp, virt_addr); 585 tmp = (physaddr >> 4) | SRMMU_ET_PTE; 586 587 /* 588 * I need to test whether this is consistent over all 589 * sun4m's. The bus_type represents the upper 4 bits of 590 * 36-bit physical address on the I/O space lines... 591 */ 592 tmp |= (bus_type << 28); 593 tmp |= SRMMU_PRIV; 594 __flush_page_to_ram(virt_addr); 595 srmmu_set_pte(ptep, __pte(tmp)); 596} 597 598static void srmmu_mapiorange(unsigned int bus, unsigned long xpa, 599 unsigned long xva, unsigned int len) 600{ 601 while (len != 0) { 602 len -= PAGE_SIZE; 603 srmmu_mapioaddr(xpa, xva, bus); 604 xva += PAGE_SIZE; 605 xpa += PAGE_SIZE; 606 } 607 flush_tlb_all(); 608} 609 610static inline void srmmu_unmapioaddr(unsigned long virt_addr) 611{ 612 pgd_t *pgdp; 613 pmd_t *pmdp; 614 pte_t *ptep; 615 616 pgdp = pgd_offset_k(virt_addr); 617 pmdp = srmmu_pmd_offset(pgdp, virt_addr); 618 ptep = srmmu_pte_offset(pmdp, virt_addr); 619 620 /* No need to flush uncacheable page. */ 621 srmmu_pte_clear(ptep); 622} 623 624static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len) 625{ 626 while (len != 0) { 627 len -= PAGE_SIZE; 628 srmmu_unmapioaddr(virt_addr); 629 virt_addr += PAGE_SIZE; 630 } 631 flush_tlb_all(); 632} 633 634/* 635 * On the SRMMU we do not have the problems with limited tlb entries 636 * for mapping kernel pages, so we just take things from the free page 637 * pool. As a side effect we are putting a little too much pressure 638 * on the gfp() subsystem. This setup also makes the logic of the 639 * iommu mapping code a lot easier as we can transparently handle 640 * mappings on the kernel stack without any special code as we did 641 * need on the sun4c. 642 */ 643static struct thread_info *srmmu_alloc_thread_info(void) 644{ 645 struct thread_info *ret; 646 647 ret = (struct thread_info *)__get_free_pages(GFP_KERNEL, 648 THREAD_INFO_ORDER); 649#ifdef CONFIG_DEBUG_STACK_USAGE 650 if (ret) 651 memset(ret, 0, PAGE_SIZE << THREAD_INFO_ORDER); 652#endif /* DEBUG_STACK_USAGE */ 653 654 return ret; 655} 656 657static void srmmu_free_thread_info(struct thread_info *ti) 658{ 659 free_pages((unsigned long)ti, THREAD_INFO_ORDER); 660} 661 662/* tsunami.S */ 663extern void tsunami_flush_cache_all(void); 664extern void tsunami_flush_cache_mm(struct mm_struct *mm); 665extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 666extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page); 667extern void tsunami_flush_page_to_ram(unsigned long page); 668extern void tsunami_flush_page_for_dma(unsigned long page); 669extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); 670extern void tsunami_flush_tlb_all(void); 671extern void tsunami_flush_tlb_mm(struct mm_struct *mm); 672extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 673extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 674extern void tsunami_setup_blockops(void); 675 676static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t *ptep) 677{ 678} 679 680/* swift.S */ 681extern void swift_flush_cache_all(void); 682extern void swift_flush_cache_mm(struct mm_struct *mm); 683extern void swift_flush_cache_range(struct vm_area_struct *vma, 684 unsigned long start, unsigned long end); 685extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page); 686extern void swift_flush_page_to_ram(unsigned long page); 687extern void swift_flush_page_for_dma(unsigned long page); 688extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); 689extern void swift_flush_tlb_all(void); 690extern void swift_flush_tlb_mm(struct mm_struct *mm); 691extern void swift_flush_tlb_range(struct vm_area_struct *vma, 692 unsigned long start, unsigned long end); 693extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 694 695 696/* 697 * The following are all MBUS based SRMMU modules, and therefore could 698 * be found in a multiprocessor configuration. On the whole, these 699 * chips seems to be much more touchy about DVMA and page tables 700 * with respect to cache coherency. 701 */ 702 703/* Cypress flushes. */ 704static void cypress_flush_cache_all(void) 705{ 706 volatile unsigned long cypress_sucks; 707 unsigned long faddr, tagval; 708 709 flush_user_windows(); 710 for(faddr = 0; faddr < 0x10000; faddr += 0x20) { 711 __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" : 712 "=r" (tagval) : 713 "r" (faddr), "r" (0x40000), 714 "i" (ASI_M_DATAC_TAG)); 715 716 /* If modified and valid, kick it. */ 717 if((tagval & 0x60) == 0x60) 718 cypress_sucks = *(unsigned long *)(0xf0020000 + faddr); 719 } 720} 721 722static void cypress_flush_cache_mm(struct mm_struct *mm) 723{ 724 register unsigned long a, b, c, d, e, f, g; 725 unsigned long flags, faddr; 726 int octx; 727 728 FLUSH_BEGIN(mm) 729 flush_user_windows(); 730 local_irq_save(flags); 731 octx = srmmu_get_context(); 732 srmmu_set_context(mm->context); 733 a = 0x20; b = 0x40; c = 0x60; 734 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; 735 736 faddr = (0x10000 - 0x100); 737 goto inside; 738 do { 739 faddr -= 0x100; 740 inside: 741 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" 742 "sta %%g0, [%0 + %2] %1\n\t" 743 "sta %%g0, [%0 + %3] %1\n\t" 744 "sta %%g0, [%0 + %4] %1\n\t" 745 "sta %%g0, [%0 + %5] %1\n\t" 746 "sta %%g0, [%0 + %6] %1\n\t" 747 "sta %%g0, [%0 + %7] %1\n\t" 748 "sta %%g0, [%0 + %8] %1\n\t" : : 749 "r" (faddr), "i" (ASI_M_FLUSH_CTX), 750 "r" (a), "r" (b), "r" (c), "r" (d), 751 "r" (e), "r" (f), "r" (g)); 752 } while(faddr); 753 srmmu_set_context(octx); 754 local_irq_restore(flags); 755 FLUSH_END 756} 757 758static void cypress_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 759{ 760 struct mm_struct *mm = vma->vm_mm; 761 register unsigned long a, b, c, d, e, f, g; 762 unsigned long flags, faddr; 763 int octx; 764 765 FLUSH_BEGIN(mm) 766 flush_user_windows(); 767 local_irq_save(flags); 768 octx = srmmu_get_context(); 769 srmmu_set_context(mm->context); 770 a = 0x20; b = 0x40; c = 0x60; 771 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; 772 773 start &= SRMMU_REAL_PMD_MASK; 774 while(start < end) { 775 faddr = (start + (0x10000 - 0x100)); 776 goto inside; 777 do { 778 faddr -= 0x100; 779 inside: 780 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" 781 "sta %%g0, [%0 + %2] %1\n\t" 782 "sta %%g0, [%0 + %3] %1\n\t" 783 "sta %%g0, [%0 + %4] %1\n\t" 784 "sta %%g0, [%0 + %5] %1\n\t" 785 "sta %%g0, [%0 + %6] %1\n\t" 786 "sta %%g0, [%0 + %7] %1\n\t" 787 "sta %%g0, [%0 + %8] %1\n\t" : : 788 "r" (faddr), 789 "i" (ASI_M_FLUSH_SEG), 790 "r" (a), "r" (b), "r" (c), "r" (d), 791 "r" (e), "r" (f), "r" (g)); 792 } while (faddr != start); 793 start += SRMMU_REAL_PMD_SIZE; 794 } 795 srmmu_set_context(octx); 796 local_irq_restore(flags); 797 FLUSH_END 798} 799 800static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page) 801{ 802 register unsigned long a, b, c, d, e, f, g; 803 struct mm_struct *mm = vma->vm_mm; 804 unsigned long flags, line; 805 int octx; 806 807 FLUSH_BEGIN(mm) 808 flush_user_windows(); 809 local_irq_save(flags); 810 octx = srmmu_get_context(); 811 srmmu_set_context(mm->context); 812 a = 0x20; b = 0x40; c = 0x60; 813 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; 814 815 page &= PAGE_MASK; 816 line = (page + PAGE_SIZE) - 0x100; 817 goto inside; 818 do { 819 line -= 0x100; 820 inside: 821 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" 822 "sta %%g0, [%0 + %2] %1\n\t" 823 "sta %%g0, [%0 + %3] %1\n\t" 824 "sta %%g0, [%0 + %4] %1\n\t" 825 "sta %%g0, [%0 + %5] %1\n\t" 826 "sta %%g0, [%0 + %6] %1\n\t" 827 "sta %%g0, [%0 + %7] %1\n\t" 828 "sta %%g0, [%0 + %8] %1\n\t" : : 829 "r" (line), 830 "i" (ASI_M_FLUSH_PAGE), 831 "r" (a), "r" (b), "r" (c), "r" (d), 832 "r" (e), "r" (f), "r" (g)); 833 } while(line != page); 834 srmmu_set_context(octx); 835 local_irq_restore(flags); 836 FLUSH_END 837} 838 839/* Cypress is copy-back, at least that is how we configure it. */ 840static void cypress_flush_page_to_ram(unsigned long page) 841{ 842 register unsigned long a, b, c, d, e, f, g; 843 unsigned long line; 844 845 a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; 846 page &= PAGE_MASK; 847 line = (page + PAGE_SIZE) - 0x100; 848 goto inside; 849 do { 850 line -= 0x100; 851 inside: 852 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" 853 "sta %%g0, [%0 + %2] %1\n\t" 854 "sta %%g0, [%0 + %3] %1\n\t" 855 "sta %%g0, [%0 + %4] %1\n\t" 856 "sta %%g0, [%0 + %5] %1\n\t" 857 "sta %%g0, [%0 + %6] %1\n\t" 858 "sta %%g0, [%0 + %7] %1\n\t" 859 "sta %%g0, [%0 + %8] %1\n\t" : : 860 "r" (line), 861 "i" (ASI_M_FLUSH_PAGE), 862 "r" (a), "r" (b), "r" (c), "r" (d), 863 "r" (e), "r" (f), "r" (g)); 864 } while(line != page); 865} 866 867/* Cypress is also IO cache coherent. */ 868static void cypress_flush_page_for_dma(unsigned long page) 869{ 870} 871 872/* Cypress has unified L2 VIPT, from which both instructions and data 873 * are stored. It does not have an onboard icache of any sort, therefore 874 * no flush is necessary. 875 */ 876static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) 877{ 878} 879 880static void cypress_flush_tlb_all(void) 881{ 882 srmmu_flush_whole_tlb(); 883} 884 885static void cypress_flush_tlb_mm(struct mm_struct *mm) 886{ 887 FLUSH_BEGIN(mm) 888 __asm__ __volatile__( 889 "lda [%0] %3, %%g5\n\t" 890 "sta %2, [%0] %3\n\t" 891 "sta %%g0, [%1] %4\n\t" 892 "sta %%g5, [%0] %3\n" 893 : /* no outputs */ 894 : "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context), 895 "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE) 896 : "g5"); 897 FLUSH_END 898} 899 900static void cypress_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 901{ 902 struct mm_struct *mm = vma->vm_mm; 903 unsigned long size; 904 905 FLUSH_BEGIN(mm) 906 start &= SRMMU_PGDIR_MASK; 907 size = SRMMU_PGDIR_ALIGN(end) - start; 908 __asm__ __volatile__( 909 "lda [%0] %5, %%g5\n\t" 910 "sta %1, [%0] %5\n" 911 "1:\n\t" 912 "subcc %3, %4, %3\n\t" 913 "bne 1b\n\t" 914 " sta %%g0, [%2 + %3] %6\n\t" 915 "sta %%g5, [%0] %5\n" 916 : /* no outputs */ 917 : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200), 918 "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS), 919 "i" (ASI_M_FLUSH_PROBE) 920 : "g5", "cc"); 921 FLUSH_END 922} 923 924static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 925{ 926 struct mm_struct *mm = vma->vm_mm; 927 928 FLUSH_BEGIN(mm) 929 __asm__ __volatile__( 930 "lda [%0] %3, %%g5\n\t" 931 "sta %1, [%0] %3\n\t" 932 "sta %%g0, [%2] %4\n\t" 933 "sta %%g5, [%0] %3\n" 934 : /* no outputs */ 935 : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK), 936 "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE) 937 : "g5"); 938 FLUSH_END 939} 940 941/* viking.S */ 942extern void viking_flush_cache_all(void); 943extern void viking_flush_cache_mm(struct mm_struct *mm); 944extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start, 945 unsigned long end); 946extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page); 947extern void viking_flush_page_to_ram(unsigned long page); 948extern void viking_flush_page_for_dma(unsigned long page); 949extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr); 950extern void viking_flush_page(unsigned long page); 951extern void viking_mxcc_flush_page(unsigned long page); 952extern void viking_flush_tlb_all(void); 953extern void viking_flush_tlb_mm(struct mm_struct *mm); 954extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 955 unsigned long end); 956extern void viking_flush_tlb_page(struct vm_area_struct *vma, 957 unsigned long page); 958extern void sun4dsmp_flush_tlb_all(void); 959extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm); 960extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, 961 unsigned long end); 962extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma, 963 unsigned long page); 964 965/* hypersparc.S */ 966extern void hypersparc_flush_cache_all(void); 967extern void hypersparc_flush_cache_mm(struct mm_struct *mm); 968extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 969extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page); 970extern void hypersparc_flush_page_to_ram(unsigned long page); 971extern void hypersparc_flush_page_for_dma(unsigned long page); 972extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); 973extern void hypersparc_flush_tlb_all(void); 974extern void hypersparc_flush_tlb_mm(struct mm_struct *mm); 975extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); 976extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); 977extern void hypersparc_setup_blockops(void); 978 979/* 980 * NOTE: All of this startup code assumes the low 16mb (approx.) of 981 * kernel mappings are done with one single contiguous chunk of 982 * ram. On small ram machines (classics mainly) we only get 983 * around 8mb mapped for us. 984 */ 985 986static void __init early_pgtable_allocfail(char *type) 987{ 988 prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type); 989 prom_halt(); 990} 991 992static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, 993 unsigned long end) 994{ 995 pgd_t *pgdp; 996 pmd_t *pmdp; 997 pte_t *ptep; 998 999 while(start < end) { 1000 pgdp = pgd_offset_k(start); 1001 if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { 1002 pmdp = (pmd_t *) __srmmu_get_nocache( 1003 SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); 1004 if (pmdp == NULL) 1005 early_pgtable_allocfail("pmd"); 1006 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); 1007 srmmu_pgd_set(__nocache_fix(pgdp), pmdp); 1008 } 1009 pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); 1010 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { 1011 ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE); 1012 if (ptep == NULL) 1013 early_pgtable_allocfail("pte"); 1014 memset(__nocache_fix(ptep), 0, PTE_SIZE); 1015 srmmu_pmd_set(__nocache_fix(pmdp), ptep); 1016 } 1017 if (start > (0xffffffffUL - PMD_SIZE)) 1018 break; 1019 start = (start + PMD_SIZE) & PMD_MASK; 1020 } 1021} 1022 1023static void __init srmmu_allocate_ptable_skeleton(unsigned long start, 1024 unsigned long end) 1025{ 1026 pgd_t *pgdp; 1027 pmd_t *pmdp; 1028 pte_t *ptep; 1029 1030 while(start < end) { 1031 pgdp = pgd_offset_k(start); 1032 if(srmmu_pgd_none(*pgdp)) { 1033 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); 1034 if (pmdp == NULL) 1035 early_pgtable_allocfail("pmd"); 1036 memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE); 1037 srmmu_pgd_set(pgdp, pmdp); 1038 } 1039 pmdp = srmmu_pmd_offset(pgdp, start); 1040 if(srmmu_pmd_none(*pmdp)) { 1041 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, 1042 PTE_SIZE); 1043 if (ptep == NULL) 1044 early_pgtable_allocfail("pte"); 1045 memset(ptep, 0, PTE_SIZE); 1046 srmmu_pmd_set(pmdp, ptep); 1047 } 1048 if (start > (0xffffffffUL - PMD_SIZE)) 1049 break; 1050 start = (start + PMD_SIZE) & PMD_MASK; 1051 } 1052} 1053 1054/* 1055 * This is much cleaner than poking around physical address space 1056 * looking at the prom's page table directly which is what most 1057 * other OS's do. Yuck... this is much better. 1058 */ 1059static void __init srmmu_inherit_prom_mappings(unsigned long start, 1060 unsigned long end) 1061{ 1062 pgd_t *pgdp; 1063 pmd_t *pmdp; 1064 pte_t *ptep; 1065 int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */ 1066 unsigned long prompte; 1067 1068 while(start <= end) { 1069 if (start == 0) 1070 break; /* probably wrap around */ 1071 if(start == 0xfef00000) 1072 start = KADB_DEBUGGER_BEGVM; 1073 if(!(prompte = srmmu_hwprobe(start))) { 1074 start += PAGE_SIZE; 1075 continue; 1076 } 1077 1078 /* A red snapper, see what it really is. */ 1079 what = 0; 1080 1081 if(!(start & ~(SRMMU_REAL_PMD_MASK))) { 1082 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte) 1083 what = 1; 1084 } 1085 1086 if(!(start & ~(SRMMU_PGDIR_MASK))) { 1087 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) == 1088 prompte) 1089 what = 2; 1090 } 1091 1092 pgdp = pgd_offset_k(start); 1093 if(what == 2) { 1094 *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte); 1095 start += SRMMU_PGDIR_SIZE; 1096 continue; 1097 } 1098 if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { 1099 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); 1100 if (pmdp == NULL) 1101 early_pgtable_allocfail("pmd"); 1102 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); 1103 srmmu_pgd_set(__nocache_fix(pgdp), pmdp); 1104 } 1105 pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); 1106 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { 1107 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, 1108 PTE_SIZE); 1109 if (ptep == NULL) 1110 early_pgtable_allocfail("pte"); 1111 memset(__nocache_fix(ptep), 0, PTE_SIZE); 1112 srmmu_pmd_set(__nocache_fix(pmdp), ptep); 1113 } 1114 if(what == 1) { 1115 /* 1116 * We bend the rule where all 16 PTPs in a pmd_t point 1117 * inside the same PTE page, and we leak a perfectly 1118 * good hardware PTE piece. Alternatives seem worse. 1119 */ 1120 unsigned int x; /* Index of HW PMD in soft cluster */ 1121 x = (start >> PMD_SHIFT) & 15; 1122 *(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte; 1123 start += SRMMU_REAL_PMD_SIZE; 1124 continue; 1125 } 1126 ptep = srmmu_pte_offset(__nocache_fix(pmdp), start); 1127 *(pte_t *)__nocache_fix(ptep) = __pte(prompte); 1128 start += PAGE_SIZE; 1129 } 1130} 1131 1132#define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID) 1133 1134/* Create a third-level SRMMU 16MB page mapping. */ 1135static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base) 1136{ 1137 pgd_t *pgdp = pgd_offset_k(vaddr); 1138 unsigned long big_pte; 1139 1140 big_pte = KERNEL_PTE(phys_base >> 4); 1141 *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte); 1142} 1143 1144/* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */ 1145static unsigned long __init map_spbank(unsigned long vbase, int sp_entry) 1146{ 1147 unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK); 1148 unsigned long vstart = (vbase & SRMMU_PGDIR_MASK); 1149 unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes); 1150 /* Map "low" memory only */ 1151 const unsigned long min_vaddr = PAGE_OFFSET; 1152 const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM; 1153 1154 if (vstart < min_vaddr || vstart >= max_vaddr) 1155 return vstart; 1156 1157 if (vend > max_vaddr || vend < min_vaddr) 1158 vend = max_vaddr; 1159 1160 while(vstart < vend) { 1161 do_large_mapping(vstart, pstart); 1162 vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE; 1163 } 1164 return vstart; 1165} 1166 1167static inline void memprobe_error(char *msg) 1168{ 1169 prom_printf(msg); 1170 prom_printf("Halting now...\n"); 1171 prom_halt(); 1172} 1173 1174static inline void map_kernel(void) 1175{ 1176 int i; 1177 1178 if (phys_base > 0) { 1179 do_large_mapping(PAGE_OFFSET, phys_base); 1180 } 1181 1182 for (i = 0; sp_banks[i].num_bytes != 0; i++) { 1183 map_spbank((unsigned long)__va(sp_banks[i].base_addr), i); 1184 } 1185 1186 BTFIXUPSET_SIMM13(user_ptrs_per_pgd, PAGE_OFFSET / SRMMU_PGDIR_SIZE); 1187} 1188 1189/* Paging initialization on the Sparc Reference MMU. */ 1190extern void sparc_context_init(int); 1191 1192void (*poke_srmmu)(void) __cpuinitdata = NULL; 1193 1194extern unsigned long bootmem_init(unsigned long *pages_avail); 1195 1196void __init srmmu_paging_init(void) 1197{ 1198 int i, cpunode; 1199 char node_str[128]; 1200 pgd_t *pgd; 1201 pmd_t *pmd; 1202 pte_t *pte; 1203 unsigned long pages_avail; 1204 1205 sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */ 1206 1207 if (sparc_cpu_model == sun4d) 1208 num_contexts = 65536; /* We know it is Viking */ 1209 else { 1210 /* Find the number of contexts on the srmmu. */ 1211 cpunode = prom_getchild(prom_root_node); 1212 num_contexts = 0; 1213 while(cpunode != 0) { 1214 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); 1215 if(!strcmp(node_str, "cpu")) { 1216 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8); 1217 break; 1218 } 1219 cpunode = prom_getsibling(cpunode); 1220 } 1221 } 1222 1223 if(!num_contexts) { 1224 prom_printf("Something wrong, can't find cpu node in paging_init.\n"); 1225 prom_halt(); 1226 } 1227 1228 pages_avail = 0; 1229 last_valid_pfn = bootmem_init(&pages_avail); 1230 1231 srmmu_nocache_calcsize(); 1232 srmmu_nocache_init(); 1233 srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE)); 1234 map_kernel(); 1235 1236 /* ctx table has to be physically aligned to its size */ 1237 srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t)); 1238 srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table); 1239 1240 for(i = 0; i < num_contexts; i++) 1241 srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir); 1242 1243 flush_cache_all(); 1244 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys); 1245#ifdef CONFIG_SMP 1246 /* Stop from hanging here... */ 1247 local_flush_tlb_all(); 1248#else 1249 flush_tlb_all(); 1250#endif 1251 poke_srmmu(); 1252 1253 srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END); 1254 srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END); 1255 1256 srmmu_allocate_ptable_skeleton( 1257 __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP); 1258 srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END); 1259 1260 pgd = pgd_offset_k(PKMAP_BASE); 1261 pmd = srmmu_pmd_offset(pgd, PKMAP_BASE); 1262 pte = srmmu_pte_offset(pmd, PKMAP_BASE); 1263 pkmap_page_table = pte; 1264 1265 flush_cache_all(); 1266 flush_tlb_all(); 1267 1268 sparc_context_init(num_contexts); 1269 1270 kmap_init(); 1271 1272 { 1273 unsigned long zones_size[MAX_NR_ZONES]; 1274 unsigned long zholes_size[MAX_NR_ZONES]; 1275 unsigned long npages; 1276 int znum; 1277 1278 for (znum = 0; znum < MAX_NR_ZONES; znum++) 1279 zones_size[znum] = zholes_size[znum] = 0; 1280 1281 npages = max_low_pfn - pfn_base; 1282 1283 zones_size[ZONE_DMA] = npages; 1284 zholes_size[ZONE_DMA] = npages - pages_avail; 1285 1286 npages = highend_pfn - max_low_pfn; 1287 zones_size[ZONE_HIGHMEM] = npages; 1288 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages(); 1289 1290 free_area_init_node(0, zones_size, pfn_base, zholes_size); 1291 } 1292} 1293 1294static void srmmu_mmu_info(struct seq_file *m) 1295{ 1296 seq_printf(m, 1297 "MMU type\t: %s\n" 1298 "contexts\t: %d\n" 1299 "nocache total\t: %ld\n" 1300 "nocache used\t: %d\n", 1301 srmmu_name, 1302 num_contexts, 1303 srmmu_nocache_size, 1304 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); 1305} 1306 1307static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte) 1308{ 1309} 1310 1311static void srmmu_destroy_context(struct mm_struct *mm) 1312{ 1313 1314 if(mm->context != NO_CONTEXT) { 1315 flush_cache_mm(mm); 1316 srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir); 1317 flush_tlb_mm(mm); 1318 spin_lock(&srmmu_context_spinlock); 1319 free_context(mm->context); 1320 spin_unlock(&srmmu_context_spinlock); 1321 mm->context = NO_CONTEXT; 1322 } 1323} 1324 1325/* Init various srmmu chip types. */ 1326static void __init srmmu_is_bad(void) 1327{ 1328 prom_printf("Could not determine SRMMU chip type.\n"); 1329 prom_halt(); 1330} 1331 1332static void __init init_vac_layout(void) 1333{ 1334 int nd, cache_lines; 1335 char node_str[128]; 1336#ifdef CONFIG_SMP 1337 int cpu = 0; 1338 unsigned long max_size = 0; 1339 unsigned long min_line_size = 0x10000000; 1340#endif 1341 1342 nd = prom_getchild(prom_root_node); 1343 while((nd = prom_getsibling(nd)) != 0) { 1344 prom_getstring(nd, "device_type", node_str, sizeof(node_str)); 1345 if(!strcmp(node_str, "cpu")) { 1346 vac_line_size = prom_getint(nd, "cache-line-size"); 1347 if (vac_line_size == -1) { 1348 prom_printf("can't determine cache-line-size, " 1349 "halting.\n"); 1350 prom_halt(); 1351 } 1352 cache_lines = prom_getint(nd, "cache-nlines"); 1353 if (cache_lines == -1) { 1354 prom_printf("can't determine cache-nlines, halting.\n"); 1355 prom_halt(); 1356 } 1357 1358 vac_cache_size = cache_lines * vac_line_size; 1359#ifdef CONFIG_SMP 1360 if(vac_cache_size > max_size) 1361 max_size = vac_cache_size; 1362 if(vac_line_size < min_line_size) 1363 min_line_size = vac_line_size; 1364 cpu++; 1365 if (cpu >= nr_cpu_ids || !cpu_online(cpu)) 1366 break; 1367#else 1368 break; 1369#endif 1370 } 1371 } 1372 if(nd == 0) { 1373 prom_printf("No CPU nodes found, halting.\n"); 1374 prom_halt(); 1375 } 1376#ifdef CONFIG_SMP 1377 vac_cache_size = max_size; 1378 vac_line_size = min_line_size; 1379#endif 1380 printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n", 1381 (int)vac_cache_size, (int)vac_line_size); 1382} 1383 1384static void __cpuinit poke_hypersparc(void) 1385{ 1386 volatile unsigned long clear; 1387 unsigned long mreg = srmmu_get_mmureg(); 1388 1389 hyper_flush_unconditional_combined(); 1390 1391 mreg &= ~(HYPERSPARC_CWENABLE); 1392 mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE); 1393 mreg |= (HYPERSPARC_CMODE); 1394 1395 srmmu_set_mmureg(mreg); 1396 1397 1398 put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE); 1399 hyper_flush_whole_icache(); 1400 clear = srmmu_get_faddr(); 1401 clear = srmmu_get_fstatus(); 1402} 1403 1404static void __init init_hypersparc(void) 1405{ 1406 srmmu_name = "ROSS HyperSparc"; 1407 srmmu_modtype = HyperSparc; 1408 1409 init_vac_layout(); 1410 1411 is_hypersparc = 1; 1412 1413 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); 1414 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); 1415 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); 1416 BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM); 1417 BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM); 1418 BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM); 1419 BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM); 1420 1421 BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM); 1422 BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM); 1423 BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM); 1424 BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM); 1425 1426 BTFIXUPSET_CALL(__flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM); 1427 BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM); 1428 BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP); 1429 1430 1431 poke_srmmu = poke_hypersparc; 1432 1433 hypersparc_setup_blockops(); 1434} 1435 1436static void __cpuinit poke_cypress(void) 1437{ 1438 unsigned long mreg = srmmu_get_mmureg(); 1439 unsigned long faddr, tagval; 1440 volatile unsigned long cypress_sucks; 1441 volatile unsigned long clear; 1442 1443 clear = srmmu_get_faddr(); 1444 clear = srmmu_get_fstatus(); 1445 1446 if (!(mreg & CYPRESS_CENABLE)) { 1447 for(faddr = 0x0; faddr < 0x10000; faddr += 20) { 1448 __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t" 1449 "sta %%g0, [%0] %2\n\t" : : 1450 "r" (faddr), "r" (0x40000), 1451 "i" (ASI_M_DATAC_TAG)); 1452 } 1453 } else { 1454 for(faddr = 0; faddr < 0x10000; faddr += 0x20) { 1455 __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" : 1456 "=r" (tagval) : 1457 "r" (faddr), "r" (0x40000), 1458 "i" (ASI_M_DATAC_TAG)); 1459 1460 /* If modified and valid, kick it. */ 1461 if((tagval & 0x60) == 0x60) 1462 cypress_sucks = *(unsigned long *) 1463 (0xf0020000 + faddr); 1464 } 1465 } 1466 1467 /* And one more, for our good neighbor, Mr. Broken Cypress. */ 1468 clear = srmmu_get_faddr(); 1469 clear = srmmu_get_fstatus(); 1470 1471 mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE); 1472 srmmu_set_mmureg(mreg); 1473} 1474 1475static void __init init_cypress_common(void) 1476{ 1477 init_vac_layout(); 1478 1479 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); 1480 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); 1481 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); 1482 BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM); 1483 BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM); 1484 BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM); 1485 BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM); 1486 1487 BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM); 1488 BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM); 1489 BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM); 1490 BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM); 1491 1492 1493 BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM); 1494 BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP); 1495 BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP); 1496 1497 poke_srmmu = poke_cypress; 1498} 1499 1500static void __init init_cypress_604(void) 1501{ 1502 srmmu_name = "ROSS Cypress-604(UP)"; 1503 srmmu_modtype = Cypress; 1504 init_cypress_common(); 1505} 1506 1507static void __init init_cypress_605(unsigned long mrev) 1508{ 1509 srmmu_name = "ROSS Cypress-605(MP)"; 1510 if(mrev == 0xe) { 1511 srmmu_modtype = Cypress_vE; 1512 hwbug_bitmask |= HWBUG_COPYBACK_BROKEN; 1513 } else { 1514 if(mrev == 0xd) { 1515 srmmu_modtype = Cypress_vD; 1516 hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN; 1517 } else { 1518 srmmu_modtype = Cypress; 1519 } 1520 } 1521 init_cypress_common(); 1522} 1523 1524static void __cpuinit poke_swift(void) 1525{ 1526 unsigned long mreg; 1527 1528 /* Clear any crap from the cache or else... */ 1529 swift_flush_cache_all(); 1530 1531 /* Enable I & D caches */ 1532 mreg = srmmu_get_mmureg(); 1533 mreg |= (SWIFT_IE | SWIFT_DE); 1534 /* 1535 * The Swift branch folding logic is completely broken. At 1536 * trap time, if things are just right, if can mistakenly 1537 * think that a trap is coming from kernel mode when in fact 1538 * it is coming from user mode (it mis-executes the branch in 1539 * the trap code). So you see things like crashme completely 1540 * hosing your machine which is completely unacceptable. Turn 1541 * this shit off... nice job Fujitsu. 1542 */ 1543 mreg &= ~(SWIFT_BF); 1544 srmmu_set_mmureg(mreg); 1545} 1546 1547#define SWIFT_MASKID_ADDR 0x10003018 1548static void __init init_swift(void) 1549{ 1550 unsigned long swift_rev; 1551 1552 __asm__ __volatile__("lda [%1] %2, %0\n\t" 1553 "srl %0, 0x18, %0\n\t" : 1554 "=r" (swift_rev) : 1555 "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS)); 1556 srmmu_name = "Fujitsu Swift"; 1557 switch(swift_rev) { 1558 case 0x11: 1559 case 0x20: 1560 case 0x23: 1561 case 0x30: 1562 srmmu_modtype = Swift_lots_o_bugs; 1563 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN); 1564 break; 1565 case 0x25: 1566 case 0x31: 1567 srmmu_modtype = Swift_bad_c; 1568 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN; 1569 /* 1570 * You see Sun allude to this hardware bug but never 1571 * admit things directly, they'll say things like, 1572 * "the Swift chip cache problems" or similar. 1573 */ 1574 break; 1575 default: 1576 srmmu_modtype = Swift_ok; 1577 break; 1578 }; 1579 1580 BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM); 1581 BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM); 1582 BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM); 1583 BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM); 1584 1585 1586 BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM); 1587 BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM); 1588 BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM); 1589 BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM); 1590 1591 BTFIXUPSET_CALL(__flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM); 1592 BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM); 1593 BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM); 1594 1595 BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM); 1596 1597 flush_page_for_dma_global = 0; 1598 1599 poke_srmmu = poke_swift; 1600} 1601 1602static void turbosparc_flush_cache_all(void) 1603{ 1604 flush_user_windows(); 1605 turbosparc_idflash_clear(); 1606} 1607 1608static void turbosparc_flush_cache_mm(struct mm_struct *mm) 1609{ 1610 FLUSH_BEGIN(mm) 1611 flush_user_windows(); 1612 turbosparc_idflash_clear(); 1613 FLUSH_END 1614} 1615 1616static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 1617{ 1618 FLUSH_BEGIN(vma->vm_mm) 1619 flush_user_windows(); 1620 turbosparc_idflash_clear(); 1621 FLUSH_END 1622} 1623 1624static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page) 1625{ 1626 FLUSH_BEGIN(vma->vm_mm) 1627 flush_user_windows(); 1628 if (vma->vm_flags & VM_EXEC) 1629 turbosparc_flush_icache(); 1630 turbosparc_flush_dcache(); 1631 FLUSH_END 1632} 1633 1634/* TurboSparc is copy-back, if we turn it on, but this does not work. */ 1635static void turbosparc_flush_page_to_ram(unsigned long page) 1636{ 1637#ifdef TURBOSPARC_WRITEBACK 1638 volatile unsigned long clear; 1639 1640 if (srmmu_hwprobe(page)) 1641 turbosparc_flush_page_cache(page); 1642 clear = srmmu_get_fstatus(); 1643#endif 1644} 1645 1646static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) 1647{ 1648} 1649 1650static void turbosparc_flush_page_for_dma(unsigned long page) 1651{ 1652 turbosparc_flush_dcache(); 1653} 1654 1655static void turbosparc_flush_tlb_all(void) 1656{ 1657 srmmu_flush_whole_tlb(); 1658} 1659 1660static void turbosparc_flush_tlb_mm(struct mm_struct *mm) 1661{ 1662 FLUSH_BEGIN(mm) 1663 srmmu_flush_whole_tlb(); 1664 FLUSH_END 1665} 1666 1667static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 1668{ 1669 FLUSH_BEGIN(vma->vm_mm) 1670 srmmu_flush_whole_tlb(); 1671 FLUSH_END 1672} 1673 1674static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) 1675{ 1676 FLUSH_BEGIN(vma->vm_mm) 1677 srmmu_flush_whole_tlb(); 1678 FLUSH_END 1679} 1680 1681 1682static void __cpuinit poke_turbosparc(void) 1683{ 1684 unsigned long mreg = srmmu_get_mmureg(); 1685 unsigned long ccreg; 1686 1687 /* Clear any crap from the cache or else... */ 1688 turbosparc_flush_cache_all(); 1689 mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* Temporarily disable I & D caches */ 1690 mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */ 1691 srmmu_set_mmureg(mreg); 1692 1693 ccreg = turbosparc_get_ccreg(); 1694 1695#ifdef TURBOSPARC_WRITEBACK 1696 ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */ 1697 ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE); 1698 /* Write-back D-cache, emulate VLSI 1699 * abortion number three, not number one */ 1700#else 1701 /* For now let's play safe, optimize later */ 1702 ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE); 1703 /* Do DVMA snooping in Dcache, Write-thru D-cache */ 1704 ccreg &= ~(TURBOSPARC_uS2); 1705 /* Emulate VLSI abortion number three, not number one */ 1706#endif 1707 1708 switch (ccreg & 7) { 1709 case 0: /* No SE cache */ 1710 case 7: /* Test mode */ 1711 break; 1712 default: 1713 ccreg |= (TURBOSPARC_SCENABLE); 1714 } 1715 turbosparc_set_ccreg (ccreg); 1716 1717 mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */ 1718 mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */ 1719 srmmu_set_mmureg(mreg); 1720} 1721 1722static void __init init_turbosparc(void) 1723{ 1724 srmmu_name = "Fujitsu TurboSparc"; 1725 srmmu_modtype = TurboSparc; 1726 1727 BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM); 1728 BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM); 1729 BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM); 1730 BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM); 1731 1732 BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM); 1733 BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM); 1734 BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM); 1735 BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM); 1736 1737 BTFIXUPSET_CALL(__flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM); 1738 1739 BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP); 1740 BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM); 1741 1742 poke_srmmu = poke_turbosparc; 1743} 1744 1745static void __cpuinit poke_tsunami(void) 1746{ 1747 unsigned long mreg = srmmu_get_mmureg(); 1748 1749 tsunami_flush_icache(); 1750 tsunami_flush_dcache(); 1751 mreg &= ~TSUNAMI_ITD; 1752 mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB); 1753 srmmu_set_mmureg(mreg); 1754} 1755 1756static void __init init_tsunami(void) 1757{ 1758 /* 1759 * Tsunami's pretty sane, Sun and TI actually got it 1760 * somewhat right this time. Fujitsu should have 1761 * taken some lessons from them. 1762 */ 1763 1764 srmmu_name = "TI Tsunami"; 1765 srmmu_modtype = Tsunami; 1766 1767 BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM); 1768 BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM); 1769 BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM); 1770 BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM); 1771 1772 1773 BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM); 1774 BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM); 1775 BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM); 1776 BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM); 1777 1778 BTFIXUPSET_CALL(__flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP); 1779 BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM); 1780 BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM); 1781 1782 poke_srmmu = poke_tsunami; 1783 1784 tsunami_setup_blockops(); 1785} 1786 1787static void __cpuinit poke_viking(void) 1788{ 1789 unsigned long mreg = srmmu_get_mmureg(); 1790 static int smp_catch; 1791 1792 if(viking_mxcc_present) { 1793 unsigned long mxcc_control = mxcc_get_creg(); 1794 1795 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE); 1796 mxcc_control &= ~(MXCC_CTL_RRC); 1797 mxcc_set_creg(mxcc_control); 1798 1799 1800 /* We do cache ptables on MXCC. */ 1801 mreg |= VIKING_TCENABLE; 1802 } else { 1803 unsigned long bpreg; 1804 1805 mreg &= ~(VIKING_TCENABLE); 1806 if(smp_catch++) { 1807 /* Must disable mixed-cmd mode here for other cpu's. */ 1808 bpreg = viking_get_bpreg(); 1809 bpreg &= ~(VIKING_ACTION_MIX); 1810 viking_set_bpreg(bpreg); 1811 1812 /* Just in case PROM does something funny. */ 1813 msi_set_sync(); 1814 } 1815 } 1816 1817 mreg |= VIKING_SPENABLE; 1818 mreg |= (VIKING_ICENABLE | VIKING_DCENABLE); 1819 mreg |= VIKING_SBENABLE; 1820 mreg &= ~(VIKING_ACENABLE); 1821 srmmu_set_mmureg(mreg); 1822} 1823 1824static void __init init_viking(void) 1825{ 1826 unsigned long mreg = srmmu_get_mmureg(); 1827 1828 /* Ahhh, the viking. SRMMU VLSI abortion number two... */ 1829 if(mreg & VIKING_MMODE) { 1830 srmmu_name = "TI Viking"; 1831 viking_mxcc_present = 0; 1832 msi_set_sync(); 1833 1834 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); 1835 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); 1836 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); 1837 1838 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM); 1839 1840 flush_page_for_dma_global = 0; 1841 } else { 1842 srmmu_name = "TI Viking/MXCC"; 1843 viking_mxcc_present = 1; 1844 1845 srmmu_cache_pagetables = 1; 1846 1847 /* MXCC vikings lack the DMA snooping bug. */ 1848 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP); 1849 } 1850 1851 BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM); 1852 BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM); 1853 BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM); 1854 BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM); 1855 1856#ifdef CONFIG_SMP 1857 if (sparc_cpu_model == sun4d) { 1858 BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM); 1859 BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM); 1860 BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM); 1861 BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM); 1862 } else 1863#endif 1864 { 1865 BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM); 1866 BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM); 1867 BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM); 1868 BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM); 1869 } 1870 1871 BTFIXUPSET_CALL(__flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP); 1872 BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP); 1873 1874 poke_srmmu = poke_viking; 1875} 1876 1877#ifdef CONFIG_SPARC_LEON 1878 1879void __init poke_leonsparc(void) 1880{ 1881} 1882 1883void __init init_leon(void) 1884{ 1885 1886 srmmu_name = "LEON"; 1887 1888 BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all, 1889 BTFIXUPCALL_NORM); 1890 BTFIXUPSET_CALL(flush_cache_mm, leon_flush_cache_all, 1891 BTFIXUPCALL_NORM); 1892 BTFIXUPSET_CALL(flush_cache_page, leon_flush_pcache_all, 1893 BTFIXUPCALL_NORM); 1894 BTFIXUPSET_CALL(flush_cache_range, leon_flush_cache_all, 1895 BTFIXUPCALL_NORM); 1896 BTFIXUPSET_CALL(flush_page_for_dma, leon_flush_dcache_all, 1897 BTFIXUPCALL_NORM); 1898 1899 BTFIXUPSET_CALL(flush_tlb_all, leon_flush_tlb_all, BTFIXUPCALL_NORM); 1900 BTFIXUPSET_CALL(flush_tlb_mm, leon_flush_tlb_all, BTFIXUPCALL_NORM); 1901 BTFIXUPSET_CALL(flush_tlb_page, leon_flush_tlb_all, BTFIXUPCALL_NORM); 1902 BTFIXUPSET_CALL(flush_tlb_range, leon_flush_tlb_all, BTFIXUPCALL_NORM); 1903 1904 BTFIXUPSET_CALL(__flush_page_to_ram, leon_flush_cache_all, 1905 BTFIXUPCALL_NOP); 1906 BTFIXUPSET_CALL(flush_sig_insns, leon_flush_cache_all, BTFIXUPCALL_NOP); 1907 1908 poke_srmmu = poke_leonsparc; 1909 1910 srmmu_cache_pagetables = 0; 1911 1912 leon_flush_during_switch = leon_flush_needed(); 1913} 1914#endif 1915 1916/* Probe for the srmmu chip version. */ 1917static void __init get_srmmu_type(void) 1918{ 1919 unsigned long mreg, psr; 1920 unsigned long mod_typ, mod_rev, psr_typ, psr_vers; 1921 1922 srmmu_modtype = SRMMU_INVAL_MOD; 1923 hwbug_bitmask = 0; 1924 1925 mreg = srmmu_get_mmureg(); psr = get_psr(); 1926 mod_typ = (mreg & 0xf0000000) >> 28; 1927 mod_rev = (mreg & 0x0f000000) >> 24; 1928 psr_typ = (psr >> 28) & 0xf; 1929 psr_vers = (psr >> 24) & 0xf; 1930 1931 /* First, check for sparc-leon. */ 1932 if (sparc_cpu_model == sparc_leon) { 1933 init_leon(); 1934 return; 1935 } 1936 1937 /* Second, check for HyperSparc or Cypress. */ 1938 if(mod_typ == 1) { 1939 switch(mod_rev) { 1940 case 7: 1941 /* UP or MP Hypersparc */ 1942 init_hypersparc(); 1943 break; 1944 case 0: 1945 case 2: 1946 /* Uniprocessor Cypress */ 1947 init_cypress_604(); 1948 break; 1949 case 10: 1950 case 11: 1951 case 12: 1952 /* _REALLY OLD_ Cypress MP chips... */ 1953 case 13: 1954 case 14: 1955 case 15: 1956 /* MP Cypress mmu/cache-controller */ 1957 init_cypress_605(mod_rev); 1958 break; 1959 default: 1960 /* Some other Cypress revision, assume a 605. */ 1961 init_cypress_605(mod_rev); 1962 break; 1963 }; 1964 return; 1965 } 1966 1967 /* 1968 * Now Fujitsu TurboSparc. It might happen that it is 1969 * in Swift emulation mode, so we will check later... 1970 */ 1971 if (psr_typ == 0 && psr_vers == 5) { 1972 init_turbosparc(); 1973 return; 1974 } 1975 1976 /* Next check for Fujitsu Swift. */ 1977 if(psr_typ == 0 && psr_vers == 4) { 1978 int cpunode; 1979 char node_str[128]; 1980 1981 /* Look if it is not a TurboSparc emulating Swift... */ 1982 cpunode = prom_getchild(prom_root_node); 1983 while((cpunode = prom_getsibling(cpunode)) != 0) { 1984 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); 1985 if(!strcmp(node_str, "cpu")) { 1986 if (!prom_getintdefault(cpunode, "psr-implementation", 1) && 1987 prom_getintdefault(cpunode, "psr-version", 1) == 5) { 1988 init_turbosparc(); 1989 return; 1990 } 1991 break; 1992 } 1993 } 1994 1995 init_swift(); 1996 return; 1997 } 1998 1999 /* Now the Viking family of srmmu. */ 2000 if(psr_typ == 4 && 2001 ((psr_vers == 0) || 2002 ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) { 2003 init_viking(); 2004 return; 2005 } 2006 2007 /* Finally the Tsunami. */ 2008 if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) { 2009 init_tsunami(); 2010 return; 2011 } 2012 2013 /* Oh well */ 2014 srmmu_is_bad(); 2015} 2016 2017/* don't laugh, static pagetables */ 2018static void srmmu_check_pgt_cache(int low, int high) 2019{ 2020} 2021 2022extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme, 2023 tsetup_mmu_patchme, rtrap_mmu_patchme; 2024 2025extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk, 2026 tsetup_srmmu_stackchk, srmmu_rett_stackchk; 2027 2028extern unsigned long srmmu_fault; 2029 2030#define PATCH_BRANCH(insn, dest) do { \ 2031 iaddr = &(insn); \ 2032 daddr = &(dest); \ 2033 *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \ 2034 } while(0) 2035 2036static void __init patch_window_trap_handlers(void) 2037{ 2038 unsigned long *iaddr, *daddr; 2039 2040 PATCH_BRANCH(spwin_mmu_patchme, spwin_srmmu_stackchk); 2041 PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk); 2042 PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk); 2043 PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk); 2044 PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault); 2045 PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault); 2046 PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault); 2047} 2048 2049#ifdef CONFIG_SMP 2050/* Local cross-calls. */ 2051static void smp_flush_page_for_dma(unsigned long page) 2052{ 2053 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page); 2054 local_flush_page_for_dma(page); 2055} 2056 2057#endif 2058 2059static pte_t srmmu_pgoff_to_pte(unsigned long pgoff) 2060{ 2061 return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE); 2062} 2063 2064static unsigned long srmmu_pte_to_pgoff(pte_t pte) 2065{ 2066 return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT; 2067} 2068 2069static pgprot_t srmmu_pgprot_noncached(pgprot_t prot) 2070{ 2071 prot &= ~__pgprot(SRMMU_CACHE); 2072 2073 return prot; 2074} 2075 2076/* Load up routines and constants for sun4m and sun4d mmu */ 2077void __init ld_mmu_srmmu(void) 2078{ 2079 extern void ld_mmu_iommu(void); 2080 extern void ld_mmu_iounit(void); 2081 extern void ___xchg32_sun4md(void); 2082 2083 BTFIXUPSET_SIMM13(pgdir_shift, SRMMU_PGDIR_SHIFT); 2084 BTFIXUPSET_SETHI(pgdir_size, SRMMU_PGDIR_SIZE); 2085 BTFIXUPSET_SETHI(pgdir_mask, SRMMU_PGDIR_MASK); 2086 2087 BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD); 2088 BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD); 2089 2090 BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE)); 2091 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED); 2092 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY)); 2093 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY)); 2094 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL)); 2095 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL); 2096 2097 /* Functions */ 2098 BTFIXUPSET_CALL(pgprot_noncached, srmmu_pgprot_noncached, BTFIXUPCALL_NORM); 2099#ifndef CONFIG_SMP 2100 BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2); 2101#endif 2102 BTFIXUPSET_CALL(do_check_pgt_cache, srmmu_check_pgt_cache, BTFIXUPCALL_NOP); 2103 2104 BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1); 2105 BTFIXUPSET_CALL(switch_mm, srmmu_switch_mm, BTFIXUPCALL_NORM); 2106 2107 BTFIXUPSET_CALL(pte_pfn, srmmu_pte_pfn, BTFIXUPCALL_NORM); 2108 BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM); 2109 BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM); 2110 2111 BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM); 2112 BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0); 2113 2114 BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM); 2115 BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM); 2116 BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_SWAPO0G0); 2117 2118 BTFIXUPSET_CALL(pgd_none, srmmu_pgd_none, BTFIXUPCALL_NORM); 2119 BTFIXUPSET_CALL(pgd_bad, srmmu_pgd_bad, BTFIXUPCALL_NORM); 2120 BTFIXUPSET_CALL(pgd_present, srmmu_pgd_present, BTFIXUPCALL_NORM); 2121 BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0); 2122 2123 BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM); 2124 BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM); 2125 BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM); 2126 BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM); 2127 BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM); 2128 BTFIXUPSET_CALL(pmd_populate, srmmu_pmd_populate, BTFIXUPCALL_NORM); 2129 2130 BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK); 2131 BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM); 2132 BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM); 2133 2134 BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM); 2135 BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM); 2136 BTFIXUPSET_CALL(pte_alloc_one_kernel, srmmu_pte_alloc_one_kernel, BTFIXUPCALL_NORM); 2137 BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM); 2138 BTFIXUPSET_CALL(free_pmd_fast, srmmu_pmd_free, BTFIXUPCALL_NORM); 2139 BTFIXUPSET_CALL(pmd_alloc_one, srmmu_pmd_alloc_one, BTFIXUPCALL_NORM); 2140 BTFIXUPSET_CALL(free_pgd_fast, srmmu_free_pgd_fast, BTFIXUPCALL_NORM); 2141 BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_NORM); 2142 2143 BTFIXUPSET_HALF(pte_writei, SRMMU_WRITE); 2144 BTFIXUPSET_HALF(pte_dirtyi, SRMMU_DIRTY); 2145 BTFIXUPSET_HALF(pte_youngi, SRMMU_REF); 2146 BTFIXUPSET_HALF(pte_filei, SRMMU_FILE); 2147 BTFIXUPSET_HALF(pte_wrprotecti, SRMMU_WRITE); 2148 BTFIXUPSET_HALF(pte_mkcleani, SRMMU_DIRTY); 2149 BTFIXUPSET_HALF(pte_mkoldi, SRMMU_REF); 2150 BTFIXUPSET_CALL(pte_mkwrite, srmmu_pte_mkwrite, BTFIXUPCALL_ORINT(SRMMU_WRITE)); 2151 BTFIXUPSET_CALL(pte_mkdirty, srmmu_pte_mkdirty, BTFIXUPCALL_ORINT(SRMMU_DIRTY)); 2152 BTFIXUPSET_CALL(pte_mkyoung, srmmu_pte_mkyoung, BTFIXUPCALL_ORINT(SRMMU_REF)); 2153 BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP); 2154 BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM); 2155 2156 BTFIXUPSET_CALL(sparc_mapiorange, srmmu_mapiorange, BTFIXUPCALL_NORM); 2157 BTFIXUPSET_CALL(sparc_unmapiorange, srmmu_unmapiorange, BTFIXUPCALL_NORM); 2158 2159 BTFIXUPSET_CALL(__swp_type, srmmu_swp_type, BTFIXUPCALL_NORM); 2160 BTFIXUPSET_CALL(__swp_offset, srmmu_swp_offset, BTFIXUPCALL_NORM); 2161 BTFIXUPSET_CALL(__swp_entry, srmmu_swp_entry, BTFIXUPCALL_NORM); 2162 2163 BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM); 2164 2165 BTFIXUPSET_CALL(alloc_thread_info, srmmu_alloc_thread_info, BTFIXUPCALL_NORM); 2166 BTFIXUPSET_CALL(free_thread_info, srmmu_free_thread_info, BTFIXUPCALL_NORM); 2167 2168 BTFIXUPSET_CALL(pte_to_pgoff, srmmu_pte_to_pgoff, BTFIXUPCALL_NORM); 2169 BTFIXUPSET_CALL(pgoff_to_pte, srmmu_pgoff_to_pte, BTFIXUPCALL_NORM); 2170 2171 get_srmmu_type(); 2172 patch_window_trap_handlers(); 2173 2174#ifdef CONFIG_SMP 2175 /* El switcheroo... */ 2176 2177 BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all); 2178 BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm); 2179 BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range); 2180 BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page); 2181 BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all); 2182 BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm); 2183 BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range); 2184 BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page); 2185 BTFIXUPCOPY_CALL(local_flush_page_to_ram, __flush_page_to_ram); 2186 BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns); 2187 BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma); 2188 2189 BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM); 2190 BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM); 2191 BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM); 2192 BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM); 2193 if (sparc_cpu_model != sun4d && 2194 sparc_cpu_model != sparc_leon) { 2195 BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM); 2196 BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM); 2197 BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM); 2198 BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM); 2199 } 2200 BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM); 2201 BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM); 2202 BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM); 2203 2204 if (poke_srmmu == poke_viking) { 2205 /* Avoid unnecessary cross calls. */ 2206 BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all); 2207 BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm); 2208 BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range); 2209 BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page); 2210 BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram); 2211 BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns); 2212 BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma); 2213 } 2214#endif 2215 2216 if (sparc_cpu_model == sun4d) 2217 ld_mmu_iounit(); 2218 else 2219 ld_mmu_iommu(); 2220#ifdef CONFIG_SMP 2221 if (sparc_cpu_model == sun4d) 2222 sun4d_init_smp(); 2223 else if (sparc_cpu_model == sparc_leon) 2224 leon_init_smp(); 2225 else 2226 sun4m_init_smp(); 2227#endif 2228} 2229