1/* arch/sparc64/mm/tsb.c 2 * 3 * Copyright (C) 2006 David S. Miller <davem@davemloft.net> 4 */ 5 6#include <linux/kernel.h> 7#include <asm/system.h> 8#include <asm/page.h> 9#include <asm/tlbflush.h> 10#include <asm/tlb.h> 11#include <asm/mmu_context.h> 12#include <asm/pgtable.h> 13#include <asm/tsb.h> 14#include <asm/oplib.h> 15 16extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; 17 18static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long hash_shift, unsigned long nentries) 19{ 20 vaddr >>= hash_shift; 21 return vaddr & (nentries - 1); 22} 23 24static inline int tag_compare(unsigned long tag, unsigned long vaddr) 25{ 26 return (tag == (vaddr >> 22)); 27} 28 29/* TSB flushes need only occur on the processor initiating the address 30 * space modification, not on each cpu the address space has run on. 31 * Only the TLB flush needs that treatment. 32 */ 33 34void flush_tsb_kernel_range(unsigned long start, unsigned long end) 35{ 36 unsigned long v; 37 38 for (v = start; v < end; v += PAGE_SIZE) { 39 unsigned long hash = tsb_hash(v, PAGE_SHIFT, 40 KERNEL_TSB_NENTRIES); 41 struct tsb *ent = &swapper_tsb[hash]; 42 43 if (tag_compare(ent->tag, v)) { 44 ent->tag = (1UL << TSB_TAG_INVALID_BIT); 45 membar_storeload_storestore(); 46 } 47 } 48} 49 50static void __flush_tsb_one(struct mmu_gather *mp, unsigned long hash_shift, unsigned long tsb, unsigned long nentries) 51{ 52 unsigned long i; 53 54 for (i = 0; i < mp->tlb_nr; i++) { 55 unsigned long v = mp->vaddrs[i]; 56 unsigned long tag, ent, hash; 57 58 v &= ~0x1UL; 59 60 hash = tsb_hash(v, hash_shift, nentries); 61 ent = tsb + (hash * sizeof(struct tsb)); 62 tag = (v >> 22UL); 63 64 tsb_flush(ent, tag); 65 } 66} 67 68void flush_tsb_user(struct mmu_gather *mp) 69{ 70 struct mm_struct *mm = mp->mm; 71 unsigned long nentries, base, flags; 72 73 spin_lock_irqsave(&mm->context.lock, flags); 74 75 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; 76 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; 77 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 78 base = __pa(base); 79 __flush_tsb_one(mp, PAGE_SHIFT, base, nentries); 80 81#ifdef CONFIG_HUGETLB_PAGE 82 if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { 83 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; 84 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; 85 if (tlb_type == cheetah_plus || tlb_type == hypervisor) 86 base = __pa(base); 87 __flush_tsb_one(mp, HPAGE_SHIFT, base, nentries); 88 } 89#endif 90 spin_unlock_irqrestore(&mm->context.lock, flags); 91} 92 93#if defined(CONFIG_SPARC64_PAGE_SIZE_8KB) 94#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K 95#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K 96#elif defined(CONFIG_SPARC64_PAGE_SIZE_64KB) 97#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_64K 98#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_64K 99#elif defined(CONFIG_SPARC64_PAGE_SIZE_512KB) 100#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_512K 101#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_512K 102#elif defined(CONFIG_SPARC64_PAGE_SIZE_4MB) 103#define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_4MB 104#define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_4MB 105#else 106#error Broken base page size setting... 107#endif 108 109#ifdef CONFIG_HUGETLB_PAGE 110#if defined(CONFIG_HUGETLB_PAGE_SIZE_64K) 111#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_64K 112#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_64K 113#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K) 114#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_512K 115#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_512K 116#elif defined(CONFIG_HUGETLB_PAGE_SIZE_4MB) 117#define HV_PGSZ_IDX_HUGE HV_PGSZ_IDX_4MB 118#define HV_PGSZ_MASK_HUGE HV_PGSZ_MASK_4MB 119#else 120#error Broken huge page size setting... 121#endif 122#endif 123 124static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_idx, unsigned long tsb_bytes) 125{ 126 unsigned long tsb_reg, base, tsb_paddr; 127 unsigned long page_sz, tte; 128 129 mm->context.tsb_block[tsb_idx].tsb_nentries = 130 tsb_bytes / sizeof(struct tsb); 131 132 base = TSBMAP_BASE; 133 tte = pgprot_val(PAGE_KERNEL_LOCKED); 134 tsb_paddr = __pa(mm->context.tsb_block[tsb_idx].tsb); 135 BUG_ON(tsb_paddr & (tsb_bytes - 1UL)); 136 137 /* Use the smallest page size that can map the whole TSB 138 * in one TLB entry. 139 */ 140 switch (tsb_bytes) { 141 case 8192 << 0: 142 tsb_reg = 0x0UL; 143#ifdef DCACHE_ALIASING_POSSIBLE 144 base += (tsb_paddr & 8192); 145#endif 146 page_sz = 8192; 147 break; 148 149 case 8192 << 1: 150 tsb_reg = 0x1UL; 151 page_sz = 64 * 1024; 152 break; 153 154 case 8192 << 2: 155 tsb_reg = 0x2UL; 156 page_sz = 64 * 1024; 157 break; 158 159 case 8192 << 3: 160 tsb_reg = 0x3UL; 161 page_sz = 64 * 1024; 162 break; 163 164 case 8192 << 4: 165 tsb_reg = 0x4UL; 166 page_sz = 512 * 1024; 167 break; 168 169 case 8192 << 5: 170 tsb_reg = 0x5UL; 171 page_sz = 512 * 1024; 172 break; 173 174 case 8192 << 6: 175 tsb_reg = 0x6UL; 176 page_sz = 512 * 1024; 177 break; 178 179 case 8192 << 7: 180 tsb_reg = 0x7UL; 181 page_sz = 4 * 1024 * 1024; 182 break; 183 184 default: 185 BUG(); 186 }; 187 tte |= pte_sz_bits(page_sz); 188 189 if (tlb_type == cheetah_plus || tlb_type == hypervisor) { 190 /* Physical mapping, no locked TLB entry for TSB. */ 191 tsb_reg |= tsb_paddr; 192 193 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; 194 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = 0; 195 mm->context.tsb_block[tsb_idx].tsb_map_pte = 0; 196 } else { 197 tsb_reg |= base; 198 tsb_reg |= (tsb_paddr & (page_sz - 1UL)); 199 tte |= (tsb_paddr & ~(page_sz - 1UL)); 200 201 mm->context.tsb_block[tsb_idx].tsb_reg_val = tsb_reg; 202 mm->context.tsb_block[tsb_idx].tsb_map_vaddr = base; 203 mm->context.tsb_block[tsb_idx].tsb_map_pte = tte; 204 } 205 206 /* Setup the Hypervisor TSB descriptor. */ 207 if (tlb_type == hypervisor) { 208 struct hv_tsb_descr *hp = &mm->context.tsb_descr[tsb_idx]; 209 210 switch (tsb_idx) { 211 case MM_TSB_BASE: 212 hp->pgsz_idx = HV_PGSZ_IDX_BASE; 213 break; 214#ifdef CONFIG_HUGETLB_PAGE 215 case MM_TSB_HUGE: 216 hp->pgsz_idx = HV_PGSZ_IDX_HUGE; 217 break; 218#endif 219 default: 220 BUG(); 221 }; 222 hp->assoc = 1; 223 hp->num_ttes = tsb_bytes / 16; 224 hp->ctx_idx = 0; 225 switch (tsb_idx) { 226 case MM_TSB_BASE: 227 hp->pgsz_mask = HV_PGSZ_MASK_BASE; 228 break; 229#ifdef CONFIG_HUGETLB_PAGE 230 case MM_TSB_HUGE: 231 hp->pgsz_mask = HV_PGSZ_MASK_HUGE; 232 break; 233#endif 234 default: 235 BUG(); 236 }; 237 hp->tsb_base = tsb_paddr; 238 hp->resv = 0; 239 } 240} 241 242static struct kmem_cache *tsb_caches[8] __read_mostly; 243 244static const char *tsb_cache_names[8] = { 245 "tsb_8KB", 246 "tsb_16KB", 247 "tsb_32KB", 248 "tsb_64KB", 249 "tsb_128KB", 250 "tsb_256KB", 251 "tsb_512KB", 252 "tsb_1MB", 253}; 254 255void __init pgtable_cache_init(void) 256{ 257 unsigned long i; 258 259 for (i = 0; i < 8; i++) { 260 unsigned long size = 8192 << i; 261 const char *name = tsb_cache_names[i]; 262 263 tsb_caches[i] = kmem_cache_create(name, 264 size, size, 265 0, 266 NULL, NULL); 267 if (!tsb_caches[i]) { 268 prom_printf("Could not create %s cache\n", name); 269 prom_halt(); 270 } 271 } 272} 273 274/* When the RSS of an address space exceeds tsb_rss_limit for a TSB, 275 * do_sparc64_fault() invokes this routine to try and grow it. 276 * 277 * When we reach the maximum TSB size supported, we stick ~0UL into 278 * tsb_rss_limit for that TSB so the grow checks in do_sparc64_fault() 279 * will not trigger any longer. 280 * 281 * The TSB can be anywhere from 8K to 1MB in size, in increasing powers 282 * of two. The TSB must be aligned to it's size, so f.e. a 512K TSB 283 * must be 512K aligned. It also must be physically contiguous, so we 284 * cannot use vmalloc(). 285 * 286 * The idea here is to grow the TSB when the RSS of the process approaches 287 * the number of entries that the current TSB can hold at once. Currently, 288 * we trigger when the RSS hits 3/4 of the TSB capacity. 289 */ 290void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long rss) 291{ 292 unsigned long max_tsb_size = 1 * 1024 * 1024; 293 unsigned long new_size, old_size, flags; 294 struct tsb *old_tsb, *new_tsb; 295 unsigned long new_cache_index, old_cache_index; 296 unsigned long new_rss_limit; 297 gfp_t gfp_flags; 298 299 if (max_tsb_size > (PAGE_SIZE << MAX_ORDER)) 300 max_tsb_size = (PAGE_SIZE << MAX_ORDER); 301 302 new_cache_index = 0; 303 for (new_size = 8192; new_size < max_tsb_size; new_size <<= 1UL) { 304 unsigned long n_entries = new_size / sizeof(struct tsb); 305 306 n_entries = (n_entries * 3) / 4; 307 if (n_entries > rss) 308 break; 309 310 new_cache_index++; 311 } 312 313 if (new_size == max_tsb_size) 314 new_rss_limit = ~0UL; 315 else 316 new_rss_limit = ((new_size / sizeof(struct tsb)) * 3) / 4; 317 318retry_tsb_alloc: 319 gfp_flags = GFP_KERNEL; 320 if (new_size > (PAGE_SIZE * 2)) 321 gfp_flags = __GFP_NOWARN | __GFP_NORETRY; 322 323 new_tsb = kmem_cache_alloc(tsb_caches[new_cache_index], gfp_flags); 324 if (unlikely(!new_tsb)) { 325 /* Not being able to fork due to a high-order TSB 326 * allocation failure is very bad behavior. Just back 327 * down to a 0-order allocation and force no TSB 328 * growing for this address space. 329 */ 330 if (mm->context.tsb_block[tsb_index].tsb == NULL && 331 new_cache_index > 0) { 332 new_cache_index = 0; 333 new_size = 8192; 334 new_rss_limit = ~0UL; 335 goto retry_tsb_alloc; 336 } 337 338 /* If we failed on a TSB grow, we are under serious 339 * memory pressure so don't try to grow any more. 340 */ 341 if (mm->context.tsb_block[tsb_index].tsb != NULL) 342 mm->context.tsb_block[tsb_index].tsb_rss_limit = ~0UL; 343 return; 344 } 345 346 /* Mark all tags as invalid. */ 347 tsb_init(new_tsb, new_size); 348 349 /* Ok, we are about to commit the changes. If we are 350 * growing an existing TSB the locking is very tricky, 351 * so WATCH OUT! 352 * 353 * We have to hold mm->context.lock while committing to the 354 * new TSB, this synchronizes us with processors in 355 * flush_tsb_user() and switch_mm() for this address space. 356 * 357 * But even with that lock held, processors run asynchronously 358 * accessing the old TSB via TLB miss handling. This is OK 359 * because those actions are just propagating state from the 360 * Linux page tables into the TSB, page table mappings are not 361 * being changed. If a real fault occurs, the processor will 362 * synchronize with us when it hits flush_tsb_user(), this is 363 * also true for the case where vmscan is modifying the page 364 * tables. The only thing we need to be careful with is to 365 * skip any locked TSB entries during copy_tsb(). 366 * 367 * When we finish committing to the new TSB, we have to drop 368 * the lock and ask all other cpus running this address space 369 * to run tsb_context_switch() to see the new TSB table. 370 */ 371 spin_lock_irqsave(&mm->context.lock, flags); 372 373 old_tsb = mm->context.tsb_block[tsb_index].tsb; 374 old_cache_index = 375 (mm->context.tsb_block[tsb_index].tsb_reg_val & 0x7UL); 376 old_size = (mm->context.tsb_block[tsb_index].tsb_nentries * 377 sizeof(struct tsb)); 378 379 380 /* Handle multiple threads trying to grow the TSB at the same time. 381 * One will get in here first, and bump the size and the RSS limit. 382 * The others will get in here next and hit this check. 383 */ 384 if (unlikely(old_tsb && 385 (rss < mm->context.tsb_block[tsb_index].tsb_rss_limit))) { 386 spin_unlock_irqrestore(&mm->context.lock, flags); 387 388 kmem_cache_free(tsb_caches[new_cache_index], new_tsb); 389 return; 390 } 391 392 mm->context.tsb_block[tsb_index].tsb_rss_limit = new_rss_limit; 393 394 if (old_tsb) { 395 extern void copy_tsb(unsigned long old_tsb_base, 396 unsigned long old_tsb_size, 397 unsigned long new_tsb_base, 398 unsigned long new_tsb_size); 399 unsigned long old_tsb_base = (unsigned long) old_tsb; 400 unsigned long new_tsb_base = (unsigned long) new_tsb; 401 402 if (tlb_type == cheetah_plus || tlb_type == hypervisor) { 403 old_tsb_base = __pa(old_tsb_base); 404 new_tsb_base = __pa(new_tsb_base); 405 } 406 copy_tsb(old_tsb_base, old_size, new_tsb_base, new_size); 407 } 408 409 mm->context.tsb_block[tsb_index].tsb = new_tsb; 410 setup_tsb_params(mm, tsb_index, new_size); 411 412 spin_unlock_irqrestore(&mm->context.lock, flags); 413 414 /* If old_tsb is NULL, we're being invoked for the first time 415 * from init_new_context(). 416 */ 417 if (old_tsb) { 418 /* Reload it on the local cpu. */ 419 tsb_context_switch(mm); 420 421 /* Now force other processors to do the same. */ 422 smp_tsb_sync(mm); 423 424 /* Now it is safe to free the old tsb. */ 425 kmem_cache_free(tsb_caches[old_cache_index], old_tsb); 426 } 427} 428 429int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 430{ 431#ifdef CONFIG_HUGETLB_PAGE 432 unsigned long huge_pte_count; 433#endif 434 unsigned int i; 435 436 spin_lock_init(&mm->context.lock); 437 438 mm->context.sparc64_ctx_val = 0UL; 439 440#ifdef CONFIG_HUGETLB_PAGE 441 /* We reset it to zero because the fork() page copying 442 * will re-increment the counters as the parent PTEs are 443 * copied into the child address space. 444 */ 445 huge_pte_count = mm->context.huge_pte_count; 446 mm->context.huge_pte_count = 0; 447#endif 448 449 /* copy_mm() copies over the parent's mm_struct before calling 450 * us, so we need to zero out the TSB pointer or else tsb_grow() 451 * will be confused and think there is an older TSB to free up. 452 */ 453 for (i = 0; i < MM_NUM_TSBS; i++) 454 mm->context.tsb_block[i].tsb = NULL; 455 456 /* If this is fork, inherit the parent's TSB size. We would 457 * grow it to that size on the first page fault anyways. 458 */ 459 tsb_grow(mm, MM_TSB_BASE, get_mm_rss(mm)); 460 461#ifdef CONFIG_HUGETLB_PAGE 462 if (unlikely(huge_pte_count)) 463 tsb_grow(mm, MM_TSB_HUGE, huge_pte_count); 464#endif 465 466 if (unlikely(!mm->context.tsb_block[MM_TSB_BASE].tsb)) 467 return -ENOMEM; 468 469 return 0; 470} 471 472static void tsb_destroy_one(struct tsb_config *tp) 473{ 474 unsigned long cache_index; 475 476 if (!tp->tsb) 477 return; 478 cache_index = tp->tsb_reg_val & 0x7UL; 479 kmem_cache_free(tsb_caches[cache_index], tp->tsb); 480 tp->tsb = NULL; 481 tp->tsb_reg_val = 0UL; 482} 483 484void destroy_context(struct mm_struct *mm) 485{ 486 unsigned long flags, i; 487 488 for (i = 0; i < MM_NUM_TSBS; i++) 489 tsb_destroy_one(&mm->context.tsb_block[i]); 490 491 spin_lock_irqsave(&ctx_alloc_lock, flags); 492 493 if (CTX_VALID(mm->context)) { 494 unsigned long nr = CTX_NRBITS(mm->context); 495 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); 496 } 497 498 spin_unlock_irqrestore(&ctx_alloc_lock, flags); 499} 500