1/* $Id: mmu_context.h,v 1.1.1.1 2008/10/15 03:29:18 james26_jang Exp $ */ 2#ifndef __SPARC64_MMU_CONTEXT_H 3#define __SPARC64_MMU_CONTEXT_H 4 5/* Derived heavily from Linus's Alpha/AXP ASN code... */ 6 7#include <asm/page.h> 8 9/* 10 * For the 8k pagesize kernel, use only 10 hw context bits to optimize some shifts in 11 * the fast tlbmiss handlers, instead of all 13 bits (specifically for vpte offset 12 * calculation). For other pagesizes, this optimization in the tlbhandlers can not be 13 * done; but still, all 13 bits can not be used because the tlb handlers use "andcc" 14 * instruction which sign extends 13 bit arguments. 15 */ 16#if PAGE_SHIFT == 13 17#define CTX_VERSION_SHIFT 10 18#define TAG_CONTEXT_BITS 0x3ff 19#else 20#define CTX_VERSION_SHIFT 12 21#define TAG_CONTEXT_BITS 0xfff 22#endif 23 24#ifndef __ASSEMBLY__ 25 26#include <linux/spinlock.h> 27#include <asm/system.h> 28#include <asm/spitfire.h> 29 30static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu) 31{ 32} 33 34extern spinlock_t ctx_alloc_lock; 35extern unsigned long tlb_context_cache; 36extern unsigned long mmu_context_bmap[]; 37 38#define CTX_VERSION_MASK ((~0UL) << CTX_VERSION_SHIFT) 39#define CTX_FIRST_VERSION ((1UL << CTX_VERSION_SHIFT) + 1UL) 40#define CTX_VALID(__ctx) \ 41 (!(((__ctx) ^ tlb_context_cache) & CTX_VERSION_MASK)) 42#define CTX_HWBITS(__ctx) ((__ctx) & ~CTX_VERSION_MASK) 43 44extern void get_new_mmu_context(struct mm_struct *mm); 45 46/* Initialize a new mmu context. This is invoked when a new 47 * address space instance (unique or shared) is instantiated. 48 * This just needs to set mm->context to an invalid context. 49 */ 50#define init_new_context(__tsk, __mm) (((__mm)->context = 0UL), 0) 51 52/* Destroy a dead context. This occurs when mmput drops the 53 * mm_users count to zero, the mmaps have been released, and 54 * all the page tables have been flushed. Our job is to destroy 55 * any remaining processor-specific state, and in the sparc64 56 * case this just means freeing up the mmu context ID held by 57 * this task if valid. 58 */ 59#define destroy_context(__mm) \ 60do { spin_lock(&ctx_alloc_lock); \ 61 if (CTX_VALID((__mm)->context)) { \ 62 unsigned long nr = CTX_HWBITS((__mm)->context); \ 63 mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63)); \ 64 } \ 65 spin_unlock(&ctx_alloc_lock); \ 66} while(0) 67 68/* Reload the two core values used by TLB miss handler 69 * processing on sparc64. They are: 70 * 1) The physical address of mm->pgd, when full page 71 * table walks are necessary, this is where the 72 * search begins. 73 * 2) A "PGD cache". For 32-bit tasks only pgd[0] is 74 * ever used since that maps the entire low 4GB 75 * completely. To speed up TLB miss processing we 76 * make this value available to the handlers. This 77 * decreases the amount of memory traffic incurred. 78 */ 79#define reload_tlbmiss_state(__tsk, __mm) \ 80do { \ 81 register unsigned long paddr asm("o5"); \ 82 register unsigned long pgd_cache asm("o4"); \ 83 paddr = __pa((__mm)->pgd); \ 84 pgd_cache = 0UL; \ 85 if ((__tsk)->thread.flags & SPARC_FLAG_32BIT) \ 86 pgd_cache = pgd_val((__mm)->pgd[0]) << 11UL; \ 87 __asm__ __volatile__("wrpr %%g0, 0x494, %%pstate\n\t" \ 88 "mov %3, %%g4\n\t" \ 89 "mov %0, %%g7\n\t" \ 90 "stxa %1, [%%g4] %2\n\t" \ 91 "membar #Sync\n\t" \ 92 "wrpr %%g0, 0x096, %%pstate" \ 93 : /* no outputs */ \ 94 : "r" (paddr), "r" (pgd_cache),\ 95 "i" (ASI_DMMU), "i" (TSB_REG)); \ 96} while(0) 97 98/* Set MMU context in the actual hardware. */ 99#define load_secondary_context(__mm) \ 100 __asm__ __volatile__("stxa %0, [%1] %2\n\t" \ 101 "flush %%g6" \ 102 : /* No outputs */ \ 103 : "r" (CTX_HWBITS((__mm)->context)), \ 104 "r" (0x10), "i" (ASI_DMMU)) 105 106extern void __flush_tlb_mm(unsigned long, unsigned long); 107 108/* Switch the current MM context. */ 109static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk, int cpu) 110{ 111 unsigned long ctx_valid; 112 113 spin_lock(&mm->page_table_lock); 114 if (CTX_VALID(mm->context)) 115 ctx_valid = 1; 116 else 117 ctx_valid = 0; 118 119 if (!ctx_valid || (old_mm != mm)) { 120 if (!ctx_valid) 121 get_new_mmu_context(mm); 122 123 load_secondary_context(mm); 124 reload_tlbmiss_state(tsk, mm); 125 } 126 127 { 128 unsigned long vm_mask = (1UL << cpu); 129 130 /* Even if (mm == old_mm) we _must_ check 131 * the cpu_vm_mask. If we do not we could 132 * corrupt the TLB state because of how 133 * smp_flush_tlb_{page,range,mm} on sparc64 134 * and lazy tlb switches work. -DaveM 135 */ 136 if (!ctx_valid || !(mm->cpu_vm_mask & vm_mask)) { 137 mm->cpu_vm_mask |= vm_mask; 138 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); 139 } 140 } 141 spin_unlock(&mm->page_table_lock); 142} 143 144/* Activate a new MM instance for the current task. */ 145static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) 146{ 147 unsigned long vm_mask; 148 149 spin_lock(&mm->page_table_lock); 150 if (!CTX_VALID(mm->context)) 151 get_new_mmu_context(mm); 152 vm_mask = (1UL << smp_processor_id()); 153 if (!(mm->cpu_vm_mask & vm_mask)) 154 mm->cpu_vm_mask |= vm_mask; 155 spin_unlock(&mm->page_table_lock); 156 157 load_secondary_context(mm); 158 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); 159 reload_tlbmiss_state(current, mm); 160} 161 162#endif /* !(__ASSEMBLY__) */ 163 164#endif /* !(__SPARC64_MMU_CONTEXT_H) */ 165