1/* 2 * linux/arch/arm/mm/mmu.c 3 * 4 * Copyright (C) 1995-2005 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10#include <linux/module.h> 11#include <linux/kernel.h> 12#include <linux/errno.h> 13#include <linux/init.h> 14#include <linux/mman.h> 15#include <linux/nodemask.h> 16#include <linux/memblock.h> 17#include <linux/sort.h> 18#include <linux/fs.h> 19 20#include <asm/cputype.h> 21#include <asm/sections.h> 22#include <asm/cachetype.h> 23#include <asm/setup.h> 24#include <asm/sizes.h> 25#include <asm/smp_plat.h> 26#include <asm/tlb.h> 27#include <asm/highmem.h> 28 29#include <asm/mach/arch.h> 30#include <asm/mach/map.h> 31 32#include "mm.h" 33 34DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 35 36/* 37 * empty_zero_page is a special page that is used for 38 * zero-initialized data and COW. 39 */ 40struct page *empty_zero_page; 41EXPORT_SYMBOL(empty_zero_page); 42 43/* 44 * The pmd table for the upper-most set of pages. 45 */ 46pmd_t *top_pmd; 47 48#define CPOLICY_UNCACHED 0 49#define CPOLICY_BUFFERED 1 50#define CPOLICY_WRITETHROUGH 2 51#define CPOLICY_WRITEBACK 3 52#define CPOLICY_WRITEALLOC 4 53 54static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; 55static unsigned int ecc_mask __initdata = 0; 56pgprot_t pgprot_user; 57pgprot_t pgprot_kernel; 58 59EXPORT_SYMBOL(pgprot_user); 60EXPORT_SYMBOL(pgprot_kernel); 61 62struct cachepolicy { 63 const char policy[16]; 64 unsigned int cr_mask; 65 unsigned int pmd; 66 unsigned int pte; 67}; 68 69static struct cachepolicy cache_policies[] __initdata = { 70 { 71 .policy = "uncached", 72 .cr_mask = CR_W|CR_C, 73 .pmd = PMD_SECT_UNCACHED, 74 .pte = L_PTE_MT_UNCACHED, 75 }, { 76 .policy = "buffered", 77 .cr_mask = CR_C, 78 .pmd = PMD_SECT_BUFFERED, 79 .pte = L_PTE_MT_BUFFERABLE, 80 }, { 81 .policy = "writethrough", 82 .cr_mask = 0, 83 .pmd = PMD_SECT_WT, 84 .pte = L_PTE_MT_WRITETHROUGH, 85 }, { 86 .policy = "writeback", 87 .cr_mask = 0, 88 .pmd = PMD_SECT_WB, 89 .pte = L_PTE_MT_WRITEBACK, 90 }, { 91 .policy = "writealloc", 92 .cr_mask = 0, 93 .pmd = PMD_SECT_WBWA, 94 .pte = L_PTE_MT_WRITEALLOC, 95 } 96}; 97 98/* 99 * These are useful for identifying cache coherency 100 * problems by allowing the cache or the cache and 101 * writebuffer to be turned off. (Note: the write 102 * buffer should not be on and the cache off). 103 */ 104static int __init early_cachepolicy(char *p) 105{ 106 int i; 107 108 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { 109 int len = strlen(cache_policies[i].policy); 110 111 if (memcmp(p, cache_policies[i].policy, len) == 0) { 112 cachepolicy = i; 113 cr_alignment &= ~cache_policies[i].cr_mask; 114 cr_no_alignment &= ~cache_policies[i].cr_mask; 115 break; 116 } 117 } 118 if (i == ARRAY_SIZE(cache_policies)) 119 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); 120 /* 121 * This restriction is partly to do with the way we boot; it is 122 * unpredictable to have memory mapped using two different sets of 123 * memory attributes (shared, type, and cache attribs). We can not 124 * change these attributes once the initial assembly has setup the 125 * page tables. 126 */ 127 if (cpu_architecture() >= CPU_ARCH_ARMv6) { 128 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n"); 129 cachepolicy = CPOLICY_WRITEBACK; 130 } 131 flush_cache_all(); 132 set_cr(cr_alignment); 133 return 0; 134} 135early_param("cachepolicy", early_cachepolicy); 136 137static int __init early_nocache(char *__unused) 138{ 139 char *p = "buffered"; 140 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); 141 early_cachepolicy(p); 142 return 0; 143} 144early_param("nocache", early_nocache); 145 146static int __init early_nowrite(char *__unused) 147{ 148 char *p = "uncached"; 149 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); 150 early_cachepolicy(p); 151 return 0; 152} 153early_param("nowb", early_nowrite); 154 155static int __init early_ecc(char *p) 156{ 157 if (memcmp(p, "on", 2) == 0) 158 ecc_mask = PMD_PROTECTION; 159 else if (memcmp(p, "off", 3) == 0) 160 ecc_mask = 0; 161 return 0; 162} 163early_param("ecc", early_ecc); 164 165static int __init noalign_setup(char *__unused) 166{ 167 cr_alignment &= ~CR_A; 168 cr_no_alignment &= ~CR_A; 169 set_cr(cr_alignment); 170 return 1; 171} 172__setup("noalign", noalign_setup); 173 174#ifndef CONFIG_SMP 175void adjust_cr(unsigned long mask, unsigned long set) 176{ 177 unsigned long flags; 178 179 mask &= ~CR_A; 180 181 set &= mask; 182 183 local_irq_save(flags); 184 185 cr_no_alignment = (cr_no_alignment & ~mask) | set; 186 cr_alignment = (cr_alignment & ~mask) | set; 187 188 set_cr((get_cr() & ~mask) | set); 189 190 local_irq_restore(flags); 191} 192#endif 193 194#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE 195#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE 196 197static struct mem_type mem_types[] = { 198 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ 199 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | 200 L_PTE_SHARED, 201 .prot_l1 = PMD_TYPE_TABLE, 202 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, 203 .domain = DOMAIN_IO, 204 }, 205 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ 206 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED, 207 .prot_l1 = PMD_TYPE_TABLE, 208 .prot_sect = PROT_SECT_DEVICE, 209 .domain = DOMAIN_IO, 210 }, 211 [MT_DEVICE_CACHED] = { /* ioremap_cached */ 212 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED, 213 .prot_l1 = PMD_TYPE_TABLE, 214 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, 215 .domain = DOMAIN_IO, 216 }, 217 [MT_DEVICE_WC] = { /* ioremap_wc */ 218 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, 219 .prot_l1 = PMD_TYPE_TABLE, 220 .prot_sect = PROT_SECT_DEVICE, 221 .domain = DOMAIN_IO, 222 }, 223 [MT_UNCACHED] = { 224 .prot_pte = PROT_PTE_DEVICE, 225 .prot_l1 = PMD_TYPE_TABLE, 226 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 227 .domain = DOMAIN_IO, 228 }, 229 [MT_CACHECLEAN] = { 230 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 231 .domain = DOMAIN_KERNEL, 232 }, 233 [MT_MINICLEAN] = { 234 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, 235 .domain = DOMAIN_KERNEL, 236 }, 237 [MT_LOW_VECTORS] = { 238 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 239 L_PTE_EXEC, 240 .prot_l1 = PMD_TYPE_TABLE, 241 .domain = DOMAIN_USER, 242 }, 243 [MT_HIGH_VECTORS] = { 244 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 245 L_PTE_USER | L_PTE_EXEC, 246 .prot_l1 = PMD_TYPE_TABLE, 247 .domain = DOMAIN_USER, 248 }, 249 [MT_MEMORY] = { 250 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 251 L_PTE_WRITE | L_PTE_EXEC, 252 .prot_l1 = PMD_TYPE_TABLE, 253 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 254 .domain = DOMAIN_KERNEL, 255 }, 256 [MT_ROM] = { 257 .prot_sect = PMD_TYPE_SECT, 258 .domain = DOMAIN_KERNEL, 259 }, 260 [MT_MEMORY_NONCACHED] = { 261 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 262 L_PTE_WRITE | L_PTE_EXEC | L_PTE_MT_BUFFERABLE, 263 .prot_l1 = PMD_TYPE_TABLE, 264 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, 265 .domain = DOMAIN_KERNEL, 266 }, 267 [MT_MEMORY_DTCM] = { 268 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | 269 L_PTE_DIRTY | L_PTE_WRITE, 270 .prot_l1 = PMD_TYPE_TABLE, 271 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, 272 .domain = DOMAIN_KERNEL, 273 }, 274 [MT_MEMORY_ITCM] = { 275 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | 276 L_PTE_USER | L_PTE_EXEC, 277 .prot_l1 = PMD_TYPE_TABLE, 278 .domain = DOMAIN_IO, 279 }, 280}; 281 282const struct mem_type *get_mem_type(unsigned int type) 283{ 284 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL; 285} 286EXPORT_SYMBOL(get_mem_type); 287 288/* 289 * Adjust the PMD section entries according to the CPU in use. 290 */ 291static void __init build_mem_type_table(void) 292{ 293 struct cachepolicy *cp; 294 unsigned int cr = get_cr(); 295 unsigned int user_pgprot, kern_pgprot, vecs_pgprot; 296 int cpu_arch = cpu_architecture(); 297 int i; 298 299 if (cpu_arch < CPU_ARCH_ARMv6) { 300#if defined(CONFIG_CPU_DCACHE_DISABLE) 301 if (cachepolicy > CPOLICY_BUFFERED) 302 cachepolicy = CPOLICY_BUFFERED; 303#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH) 304 if (cachepolicy > CPOLICY_WRITETHROUGH) 305 cachepolicy = CPOLICY_WRITETHROUGH; 306#endif 307 } 308 if (cpu_arch < CPU_ARCH_ARMv5) { 309 if (cachepolicy >= CPOLICY_WRITEALLOC) 310 cachepolicy = CPOLICY_WRITEBACK; 311 ecc_mask = 0; 312 } 313#ifdef CONFIG_SMP 314 cachepolicy = CPOLICY_WRITEALLOC; 315#else 316 /* for NS-B0 ACP on UP mode */ 317 if (ACP_WAR_ENAB() || arch_is_coherent()) 318 cachepolicy = CPOLICY_WRITEALLOC; 319#endif /* CONFIG_SMP */ 320 321 /* 322 * Strip out features not present on earlier architectures. 323 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those 324 * without extended page tables don't have the 'Shared' bit. 325 */ 326 if (cpu_arch < CPU_ARCH_ARMv5) 327 for (i = 0; i < ARRAY_SIZE(mem_types); i++) 328 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7); 329 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3()) 330 for (i = 0; i < ARRAY_SIZE(mem_types); i++) 331 mem_types[i].prot_sect &= ~PMD_SECT_S; 332 333 /* 334 * ARMv5 and lower, bit 4 must be set for page tables (was: cache 335 * "update-able on write" bit on ARM610). However, Xscale and 336 * Xscale3 require this bit to be cleared. 337 */ 338 if (cpu_is_xscale() || cpu_is_xsc3()) { 339 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 340 mem_types[i].prot_sect &= ~PMD_BIT4; 341 mem_types[i].prot_l1 &= ~PMD_BIT4; 342 } 343 } else if (cpu_arch < CPU_ARCH_ARMv6) { 344 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 345 if (mem_types[i].prot_l1) 346 mem_types[i].prot_l1 |= PMD_BIT4; 347 if (mem_types[i].prot_sect) 348 mem_types[i].prot_sect |= PMD_BIT4; 349 } 350 } 351 352 /* 353 * Mark the device areas according to the CPU/architecture. 354 */ 355 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) { 356 if (!cpu_is_xsc3()) { 357 /* 358 * Mark device regions on ARMv6+ as execute-never 359 * to prevent speculative instruction fetches. 360 */ 361 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN; 362 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN; 363 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN; 364 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN; 365 } 366 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { 367 /* 368 * For ARMv7 with TEX remapping, 369 * - shared device is SXCB=1100 370 * - nonshared device is SXCB=0100 371 * - write combine device mem is SXCB=0001 372 * (Uncached Normal memory) 373 */ 374 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1); 375 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1); 376 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; 377 } else if (cpu_is_xsc3()) { 378 /* 379 * For Xscale3, 380 * - shared device is TEXCB=00101 381 * - nonshared device is TEXCB=01000 382 * - write combine device mem is TEXCB=00100 383 * (Inner/Outer Uncacheable in xsc3 parlance) 384 */ 385 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED; 386 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2); 387 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); 388 } else { 389 /* 390 * For ARMv6 and ARMv7 without TEX remapping, 391 * - shared device is TEXCB=00001 392 * - nonshared device is TEXCB=01000 393 * - write combine device mem is TEXCB=00100 394 * (Uncached Normal in ARMv6 parlance). 395 */ 396 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; 397 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2); 398 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); 399 } 400 } else { 401 /* 402 * On others, write combining is "Uncached/Buffered" 403 */ 404 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; 405 } 406 407 /* 408 * Now deal with the memory-type mappings 409 */ 410 cp = &cache_policies[cachepolicy]; 411 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; 412 413#ifndef CONFIG_SMP 414 /* 415 * Only use write-through for non-SMP systems 416 */ 417 if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) 418 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; 419#endif 420 421 /* 422 * Enable CPU-specific coherency if supported. 423 * (Only available on XSC3 at the moment.) 424 */ 425 if (arch_is_coherent() && cpu_is_xsc3()) { 426 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 427 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; 428 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 429 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; 430 } 431 /* 432 * ARMv6 and above have extended page tables. 433 */ 434 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { 435 int mem_shared = 0; 436 437 /* 438 * Mark cache clean areas and XIP ROM read only 439 * from SVC mode and no access from userspace. 440 */ 441 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 442 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 443 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; 444 445#ifdef CONFIG_SMP 446 mem_shared = 1; 447#else 448 /* for NS-B0 ACP on UP mode */ 449 if (ACP_WAR_ENAB() || arch_is_coherent()) 450 mem_shared = 1; 451#endif /* CONFIG_SMP */ 452 453 /* 454 * Mark memory with the "shared" attribute for SMP or UP systems 455 */ 456 if (mem_shared) { 457 user_pgprot |= L_PTE_SHARED; 458 kern_pgprot |= L_PTE_SHARED; 459 vecs_pgprot |= L_PTE_SHARED; 460 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; 461 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; 462 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; 463 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 464 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 465 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; 466 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 467 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; 468 } 469 } 470 471 /* 472 * Non-cacheable Normal - intended for memory areas that must 473 * not cause dirty cache line writebacks when used 474 */ 475 if (cpu_arch >= CPU_ARCH_ARMv6) { 476 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { 477 /* Non-cacheable Normal is XCB = 001 */ 478 mem_types[MT_MEMORY_NONCACHED].prot_sect |= 479 PMD_SECT_BUFFERED; 480 } else { 481 /* For both ARMv6 and non-TEX-remapping ARMv7 */ 482 mem_types[MT_MEMORY_NONCACHED].prot_sect |= 483 PMD_SECT_TEX(1); 484 } 485 } else { 486 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; 487 } 488 489 for (i = 0; i < 16; i++) { 490 unsigned long v = pgprot_val(protection_map[i]); 491 protection_map[i] = __pgprot(v | user_pgprot); 492 } 493 494 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; 495 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; 496 497 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); 498 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | 499 L_PTE_DIRTY | L_PTE_WRITE | kern_pgprot); 500 501 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; 502 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 503 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; 504 mem_types[MT_MEMORY].prot_pte |= kern_pgprot; 505 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask; 506 mem_types[MT_ROM].prot_sect |= cp->pmd; 507 508 switch (cp->pmd) { 509 case PMD_SECT_WT: 510 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; 511 break; 512 case PMD_SECT_WB: 513 case PMD_SECT_WBWA: 514 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; 515 break; 516 } 517 printk("Memory policy: ECC %sabled, Data cache %s\n", 518 ecc_mask ? "en" : "dis", cp->policy); 519 520 for (i = 0; i < ARRAY_SIZE(mem_types); i++) { 521 struct mem_type *t = &mem_types[i]; 522 if (t->prot_l1) 523 t->prot_l1 |= PMD_DOMAIN(t->domain); 524 if (t->prot_sect) 525 t->prot_sect |= PMD_DOMAIN(t->domain); 526 } 527} 528 529#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE 530pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 531 unsigned long size, pgprot_t vma_prot) 532{ 533 if (!pfn_valid(pfn)) 534 return pgprot_noncached(vma_prot); 535 else if (file->f_flags & O_SYNC) 536 return pgprot_writecombine(vma_prot); 537 return vma_prot; 538} 539EXPORT_SYMBOL(phys_mem_access_prot); 540#endif 541 542#define vectors_base() (vectors_high() ? 0xffff0000 : 0) 543 544static void __init *early_alloc(unsigned long sz) 545{ 546 void *ptr = __va(memblock_alloc(sz, sz)); 547 memset(ptr, 0, sz); 548 return ptr; 549} 550 551static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) 552{ 553 if (pmd_none(*pmd)) { 554 pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t)); 555 __pmd_populate(pmd, __pa(pte) | prot); 556 } 557 BUG_ON(pmd_bad(*pmd)); 558 return pte_offset_kernel(pmd, addr); 559} 560 561static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, 562 unsigned long end, unsigned long pfn, 563 const struct mem_type *type) 564{ 565 pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1); 566 do { 567 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0); 568 pfn++; 569 } while (pte++, addr += PAGE_SIZE, addr != end); 570} 571 572static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, 573 unsigned long end, unsigned long phys, 574 const struct mem_type *type) 575{ 576 pmd_t *pmd = pmd_offset(pgd, addr); 577 578 /* 579 * Try a section mapping - end, addr and phys must all be aligned 580 * to a section boundary. Note that PMDs refer to the individual 581 * L1 entries, whereas PGDs refer to a group of L1 entries making 582 * up one logical pointer to an L2 table. 583 */ 584 if (((addr | end | phys) & ~SECTION_MASK) == 0) { 585 pmd_t *p = pmd; 586 587 if (addr & SECTION_SIZE) 588 pmd++; 589 590 do { 591 *pmd = __pmd(phys | type->prot_sect); 592 phys += SECTION_SIZE; 593 } while (pmd++, addr += SECTION_SIZE, addr != end); 594 595 flush_pmd_entry(p); 596 } else { 597 /* 598 * No need to loop; pte's aren't interested in the 599 * individual L1 entries. 600 */ 601 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); 602 } 603} 604 605static void __init create_36bit_mapping(struct map_desc *md, 606 const struct mem_type *type) 607{ 608 unsigned long phys, addr, length, end; 609 pgd_t *pgd; 610 611 addr = md->virtual; 612 phys = (unsigned long)__pfn_to_phys(md->pfn); 613 length = PAGE_ALIGN(md->length); 614 615 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) { 616 printk(KERN_ERR "MM: CPU does not support supersection " 617 "mapping for 0x%08llx at 0x%08lx\n", 618 __pfn_to_phys((u64)md->pfn), addr); 619 return; 620 } 621 622 /* N.B. ARMv6 supersections are only defined to work with domain 0. 623 * Since domain assignments can in fact be arbitrary, the 624 * 'domain == 0' check below is required to insure that ARMv6 625 * supersections are only allocated for domain 0 regardless 626 * of the actual domain assignments in use. 627 */ 628 if (type->domain) { 629 printk(KERN_ERR "MM: invalid domain in supersection " 630 "mapping for 0x%08llx at 0x%08lx\n", 631 __pfn_to_phys((u64)md->pfn), addr); 632 return; 633 } 634 635 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { 636 printk(KERN_ERR "MM: cannot create mapping for " 637 "0x%08llx at 0x%08lx invalid alignment\n", 638 __pfn_to_phys((u64)md->pfn), addr); 639 return; 640 } 641 642 /* 643 * Shift bits [35:32] of address into bits [23:20] of PMD 644 * (See ARMv6 spec). 645 */ 646 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); 647 648 pgd = pgd_offset_k(addr); 649 end = addr + length; 650 do { 651 pmd_t *pmd = pmd_offset(pgd, addr); 652 int i; 653 654 for (i = 0; i < 16; i++) 655 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER); 656 657 addr += SUPERSECTION_SIZE; 658 phys += SUPERSECTION_SIZE; 659 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT; 660 } while (addr != end); 661} 662 663/* 664 * Create the page directory entries and any necessary 665 * page tables for the mapping specified by `md'. We 666 * are able to cope here with varying sizes and address 667 * offsets, and we take full advantage of sections and 668 * supersections. 669 */ 670static void __init create_mapping(struct map_desc *md) 671{ 672 unsigned long phys, addr, length, end; 673 const struct mem_type *type; 674 pgd_t *pgd; 675 676 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { 677 printk(KERN_WARNING "BUG: not creating mapping for " 678 "0x%08llx at 0x%08lx in user region\n", 679 __pfn_to_phys((u64)md->pfn), md->virtual); 680 return; 681 } 682 683 if ((md->type == MT_DEVICE || md->type == MT_ROM) && 684 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { 685 printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx " 686 "overlaps vmalloc space\n", 687 __pfn_to_phys((u64)md->pfn), md->virtual); 688 } 689 690 type = &mem_types[md->type]; 691 692 /* 693 * Catch 36-bit addresses 694 */ 695 if (md->pfn >= 0x100000) { 696 create_36bit_mapping(md, type); 697 return; 698 } 699 700 addr = md->virtual & PAGE_MASK; 701 phys = (unsigned long)__pfn_to_phys(md->pfn); 702 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); 703 704 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { 705 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " 706 "be mapped using pages, ignoring.\n", 707 __pfn_to_phys(md->pfn), addr); 708 return; 709 } 710 711 pgd = pgd_offset_k(addr); 712 end = addr + length; 713 do { 714 unsigned long next = pgd_addr_end(addr, end); 715 716 alloc_init_section(pgd, addr, next, phys, type); 717 718 phys += next - addr; 719 addr = next; 720 } while (pgd++, addr != end); 721} 722 723/* 724 * Create the architecture specific mappings 725 */ 726void __init iotable_init(struct map_desc *io_desc, int nr) 727{ 728 int i; 729 730 for (i = 0; i < nr; i++) 731 create_mapping(io_desc + i); 732} 733 734static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M); 735 736/* 737 * vmalloc=size forces the vmalloc area to be exactly 'size' 738 * bytes. This can be used to increase (or decrease) the vmalloc 739 * area - the default is 128m. 740 */ 741static int __init early_vmalloc(char *arg) 742{ 743 unsigned long vmalloc_reserve = memparse(arg, NULL); 744 745 if (vmalloc_reserve < SZ_16M) { 746 vmalloc_reserve = SZ_16M; 747 printk(KERN_WARNING 748 "vmalloc area too small, limiting to %luMB\n", 749 vmalloc_reserve >> 20); 750 } 751 752 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { 753 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); 754 printk(KERN_WARNING 755 "vmalloc area is too big, limiting to %luMB\n", 756 vmalloc_reserve >> 20); 757 } 758 759 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve); 760 return 0; 761} 762early_param("vmalloc", early_vmalloc); 763 764phys_addr_t lowmem_end_addr; 765 766static void __init sanity_check_meminfo(void) 767{ 768 int i, j, highmem = 0; 769 770 lowmem_end_addr = __pa(vmalloc_min - 1) + 1; 771 772 for (i = 0, j = 0; i < meminfo.nr_banks; i++) { 773 struct membank *bank = &meminfo.bank[j]; 774 *bank = meminfo.bank[i]; 775 776#ifdef CONFIG_HIGHMEM 777 if (__va(bank->start) > vmalloc_min || 778 __va(bank->start) < (void *)PAGE_OFFSET) 779 highmem = 1; 780 781 bank->highmem = highmem; 782 783 /* 784 * Split those memory banks which are partially overlapping 785 * the vmalloc area greatly simplifying things later. 786 */ 787 if (__va(bank->start) < vmalloc_min && 788 bank->size > vmalloc_min - __va(bank->start)) { 789 if (meminfo.nr_banks >= NR_BANKS) { 790 printk(KERN_CRIT "NR_BANKS too low, " 791 "ignoring high memory\n"); 792 } else { 793 memmove(bank + 1, bank, 794 (meminfo.nr_banks - i) * sizeof(*bank)); 795 meminfo.nr_banks++; 796 i++; 797 bank[1].size -= vmalloc_min - __va(bank->start); 798 bank[1].start = __pa(vmalloc_min - 1) + 1; 799 bank[1].highmem = highmem = 1; 800 j++; 801 } 802 bank->size = vmalloc_min - __va(bank->start); 803 } 804#else 805 bank->highmem = highmem; 806 807 /* 808 * Check whether this memory bank would entirely overlap 809 * the vmalloc area. 810 */ 811 if (__va(bank->start) >= vmalloc_min || 812 __va(bank->start) < (void *)PAGE_OFFSET) { 813 printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " 814 "(vmalloc region overlap).\n", 815 bank->start, bank->start + bank->size - 1); 816 continue; 817 } 818 819 /* 820 * Check whether this memory bank would partially overlap 821 * the vmalloc area. 822 */ 823 if (__va(bank->start + bank->size) > vmalloc_min || 824 __va(bank->start + bank->size) < __va(bank->start)) { 825 unsigned long newsize = vmalloc_min - __va(bank->start); 826 printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " 827 "to -%.8lx (vmalloc region overlap).\n", 828 bank->start, bank->start + bank->size - 1, 829 bank->start + newsize - 1); 830 bank->size = newsize; 831 } 832#endif 833 j++; 834 } 835#ifdef CONFIG_HIGHMEM 836 if (highmem) { 837 const char *reason = NULL; 838 839 if (cache_is_vipt_aliasing()) { 840 /* 841 * Interactions between kmap and other mappings 842 * make highmem support with aliasing VIPT caches 843 * rather difficult. 844 */ 845 reason = "with VIPT aliasing cache"; 846#ifdef CONFIG_SMP 847 } else if (tlb_ops_need_broadcast()) { 848 /* 849 * kmap_high needs to occasionally flush TLB entries, 850 * however, if the TLB entries need to be broadcast 851 * we may deadlock: 852 * kmap_high(irqs off)->flush_all_zero_pkmaps-> 853 * flush_tlb_kernel_range->smp_call_function_many 854 * (must not be called with irqs off) 855 */ 856 reason = "without hardware TLB ops broadcasting"; 857#endif 858 } 859 if (reason) { 860 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", 861 reason); 862 while (j > 0 && meminfo.bank[j - 1].highmem) 863 j--; 864 } 865 } 866#endif 867 meminfo.nr_banks = j; 868} 869 870static inline void prepare_page_table(void) 871{ 872 unsigned long addr; 873 874 /* 875 * Clear out all the mappings below the kernel image. 876 */ 877 for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE) 878 pmd_clear(pmd_off_k(addr)); 879 880#ifdef CONFIG_XIP_KERNEL 881 /* The XIP kernel is mapped in the module area -- skip over it */ 882 addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK; 883#endif 884 for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) 885 pmd_clear(pmd_off_k(addr)); 886 887 /* 888 * Clear out all the kernel space mappings, except for the first 889 * memory bank, up to the end of the vmalloc region. 890 */ 891 for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0])); 892 addr < VMALLOC_END; addr += PGDIR_SIZE) 893 pmd_clear(pmd_off_k(addr)); 894} 895 896/* 897 * Reserve the special regions of memory 898 */ 899void __init arm_mm_memblock_reserve(void) 900{ 901 /* 902 * Reserve the page tables. These are already in use, 903 * and can only be in node 0. 904 */ 905 memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t)); 906 907#ifdef CONFIG_SA1111 908 /* 909 * Because of the SA1111 DMA bug, we want to preserve our 910 * precious DMA-able memory... 911 */ 912 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET); 913#endif 914} 915 916/* 917 * Set up device the mappings. Since we clear out the page tables for all 918 * mappings above VMALLOC_END, we will remove any debug device mappings. 919 * This means you have to be careful how you debug this function, or any 920 * called function. This means you can't use any function or debugging 921 * method which may touch any device, otherwise the kernel _will_ crash. 922 */ 923static void __init devicemaps_init(struct machine_desc *mdesc) 924{ 925 struct map_desc map; 926 unsigned long addr; 927 void *vectors; 928 929 /* 930 * Allocate the vector page early. 931 */ 932 vectors = early_alloc(PAGE_SIZE); 933 934 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) 935 pmd_clear(pmd_off_k(addr)); 936 937 /* 938 * Map the kernel if it is XIP. 939 * It is always first in the modulearea. 940 */ 941#ifdef CONFIG_XIP_KERNEL 942 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); 943 map.virtual = MODULES_VADDR; 944 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; 945 map.type = MT_ROM; 946 create_mapping(&map); 947#endif 948 949 /* 950 * Map the cache flushing regions. 951 */ 952#ifdef FLUSH_BASE 953 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); 954 map.virtual = FLUSH_BASE; 955 map.length = SZ_1M; 956 map.type = MT_CACHECLEAN; 957 create_mapping(&map); 958#endif 959#ifdef FLUSH_BASE_MINICACHE 960 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); 961 map.virtual = FLUSH_BASE_MINICACHE; 962 map.length = SZ_1M; 963 map.type = MT_MINICLEAN; 964 create_mapping(&map); 965#endif 966 967 /* 968 * Create a mapping for the machine vectors at the high-vectors 969 * location (0xffff0000). If we aren't using high-vectors, also 970 * create a mapping at the low-vectors virtual address. 971 */ 972 map.pfn = __phys_to_pfn(virt_to_phys(vectors)); 973 map.virtual = 0xffff0000; 974 map.length = PAGE_SIZE; 975 map.type = MT_HIGH_VECTORS; 976 create_mapping(&map); 977 978 if (!vectors_high()) { 979 map.virtual = 0; 980 map.type = MT_LOW_VECTORS; 981 create_mapping(&map); 982 } 983 984 /* 985 * Ask the machine support to map in the statically mapped devices. 986 */ 987 if (mdesc->map_io) 988 mdesc->map_io(); 989 990 /* 991 * Finally flush the caches and tlb to ensure that we're in a 992 * consistent state wrt the writebuffer. This also ensures that 993 * any write-allocated cache lines in the vector page are written 994 * back. After this point, we can start to touch devices again. 995 */ 996 local_flush_tlb_all(); 997 flush_cache_all(); 998} 999 1000static void __init kmap_init(void) 1001{ 1002#ifdef CONFIG_HIGHMEM 1003 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), 1004 PKMAP_BASE, _PAGE_KERNEL_TABLE); 1005#endif 1006} 1007 1008static inline void map_memory_bank(struct membank *bank) 1009{ 1010 struct map_desc map; 1011 1012 map.pfn = bank_pfn_start(bank); 1013 map.virtual = __phys_to_virt(bank_phys_start(bank)); 1014 map.length = bank_phys_size(bank); 1015 map.type = MT_MEMORY; 1016 1017 create_mapping(&map); 1018} 1019 1020static void __init map_lowmem(void) 1021{ 1022 struct meminfo *mi = &meminfo; 1023 int i; 1024 1025 /* Map all the lowmem memory banks. */ 1026 for (i = 0; i < mi->nr_banks; i++) { 1027 struct membank *bank = &mi->bank[i]; 1028 1029 if (!bank->highmem) 1030 map_memory_bank(bank); 1031 } 1032} 1033 1034static int __init meminfo_cmp(const void *_a, const void *_b) 1035{ 1036 const struct membank *a = _a, *b = _b; 1037 long cmp = bank_pfn_start(a) - bank_pfn_start(b); 1038 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; 1039} 1040 1041/* 1042 * paging_init() sets up the page tables, initialises the zone memory 1043 * maps, and sets up the zero page, bad page and bad page tables. 1044 */ 1045void __init paging_init(struct machine_desc *mdesc) 1046{ 1047 void *zero_page; 1048 1049 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); 1050 1051 build_mem_type_table(); 1052 sanity_check_meminfo(); 1053 prepare_page_table(); 1054 map_lowmem(); 1055 devicemaps_init(mdesc); 1056 kmap_init(); 1057 1058 top_pmd = pmd_off_k(0xffff0000); 1059 1060 /* allocate the zero page. */ 1061 zero_page = early_alloc(PAGE_SIZE); 1062 1063 bootmem_init(); 1064 1065 empty_zero_page = virt_to_page(zero_page); 1066 __flush_dcache_page(NULL, empty_zero_page); 1067} 1068 1069/* 1070 * In order to soft-boot, we need to insert a 1:1 mapping in place of 1071 * the user-mode pages. This will then ensure that we have predictable 1072 * results when turning the mmu off 1073 */ 1074void setup_mm_for_reboot(char mode) 1075{ 1076 unsigned long base_pmdval; 1077 pgd_t *pgd; 1078 int i; 1079 1080 /* 1081 * We need to access to user-mode page tables here. For kernel threads 1082 * we don't have any user-mode mappings so we use the context that we 1083 * "borrowed". 1084 */ 1085 pgd = current->active_mm->pgd; 1086 1087 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; 1088 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) 1089 base_pmdval |= PMD_BIT4; 1090 1091 for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) { 1092 unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval; 1093 pmd_t *pmd; 1094 1095 pmd = pmd_off(pgd, i << PGDIR_SHIFT); 1096 pmd[0] = __pmd(pmdval); 1097 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); 1098 flush_pmd_entry(pmd); 1099 } 1100 1101 local_flush_tlb_all(); 1102} 1103