1/* $Id: mmu_context.h,v 1.1.1.1 2007/08/03 18:53:36 Exp $ */ 2#ifndef __SPARC64_MMU_CONTEXT_H 3#define __SPARC64_MMU_CONTEXT_H 4 5/* Derived heavily from Linus's Alpha/AXP ASN code... */ 6 7#ifndef __ASSEMBLY__ 8 9#include <linux/spinlock.h> 10#include <asm/system.h> 11#include <asm/spitfire.h> 12#include <asm-generic/mm_hooks.h> 13 14static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 15{ 16} 17 18extern spinlock_t ctx_alloc_lock; 19extern unsigned long tlb_context_cache; 20extern unsigned long mmu_context_bmap[]; 21 22extern void get_new_mmu_context(struct mm_struct *mm); 23#ifdef CONFIG_SMP 24extern void smp_new_mmu_context_version(void); 25#else 26#define smp_new_mmu_context_version() do { } while (0) 27#endif 28 29extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 30extern void destroy_context(struct mm_struct *mm); 31 32extern void __tsb_context_switch(unsigned long pgd_pa, 33 struct tsb_config *tsb_base, 34 struct tsb_config *tsb_huge, 35 unsigned long tsb_descr_pa); 36 37static inline void tsb_context_switch(struct mm_struct *mm) 38{ 39 __tsb_context_switch(__pa(mm->pgd), 40 &mm->context.tsb_block[0], 41#ifdef CONFIG_HUGETLB_PAGE 42 (mm->context.tsb_block[1].tsb ? 43 &mm->context.tsb_block[1] : 44 NULL) 45#else 46 NULL 47#endif 48 , __pa(&mm->context.tsb_descr[0])); 49} 50 51extern void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long mm_rss); 52#ifdef CONFIG_SMP 53extern void smp_tsb_sync(struct mm_struct *mm); 54#else 55#define smp_tsb_sync(__mm) do { } while (0) 56#endif 57 58/* Set MMU context in the actual hardware. */ 59#define load_secondary_context(__mm) \ 60 __asm__ __volatile__( \ 61 "\n661: stxa %0, [%1] %2\n" \ 62 " .section .sun4v_1insn_patch, \"ax\"\n" \ 63 " .word 661b\n" \ 64 " stxa %0, [%1] %3\n" \ 65 " .previous\n" \ 66 " flush %%g6\n" \ 67 : /* No outputs */ \ 68 : "r" (CTX_HWBITS((__mm)->context)), \ 69 "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU)) 70 71extern void __flush_tlb_mm(unsigned long, unsigned long); 72 73/* Switch the current MM context. Interrupts are disabled. */ 74static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk) 75{ 76 unsigned long ctx_valid, flags; 77 int cpu; 78 79 spin_lock_irqsave(&mm->context.lock, flags); 80 ctx_valid = CTX_VALID(mm->context); 81 if (!ctx_valid) 82 get_new_mmu_context(mm); 83 84 /* We have to be extremely careful here or else we will miss 85 * a TSB grow if we switch back and forth between a kernel 86 * thread and an address space which has it's TSB size increased 87 * on another processor. 88 * 89 * It is possible to play some games in order to optimize the 90 * switch, but the safest thing to do is to unconditionally 91 * perform the secondary context load and the TSB context switch. 92 * 93 * For reference the bad case is, for address space "A": 94 * 95 * CPU 0 CPU 1 96 * run address space A 97 * set cpu0's bits in cpu_vm_mask 98 * switch to kernel thread, borrow 99 * address space A via entry_lazy_tlb 100 * run address space A 101 * set cpu1's bit in cpu_vm_mask 102 * flush_tlb_pending() 103 * reset cpu_vm_mask to just cpu1 104 * TSB grow 105 * run address space A 106 * context was valid, so skip 107 * TSB context switch 108 * 109 * At that point cpu0 continues to use a stale TSB, the one from 110 * before the TSB grow performed on cpu1. cpu1 did not cross-call 111 * cpu0 to update it's TSB because at that point the cpu_vm_mask 112 * only had cpu1 set in it. 113 */ 114 load_secondary_context(mm); 115 tsb_context_switch(mm); 116 117 /* Any time a processor runs a context on an address space 118 * for the first time, we must flush that context out of the 119 * local TLB. 120 */ 121 cpu = smp_processor_id(); 122 if (!ctx_valid || !cpu_isset(cpu, mm->cpu_vm_mask)) { 123 cpu_set(cpu, mm->cpu_vm_mask); 124 __flush_tlb_mm(CTX_HWBITS(mm->context), 125 SECONDARY_CONTEXT); 126 } 127 spin_unlock_irqrestore(&mm->context.lock, flags); 128} 129 130#define deactivate_mm(tsk,mm) do { } while (0) 131 132/* Activate a new MM instance for the current task. */ 133static inline void activate_mm(struct mm_struct *active_mm, struct mm_struct *mm) 134{ 135 unsigned long flags; 136 int cpu; 137 138 spin_lock_irqsave(&mm->context.lock, flags); 139 if (!CTX_VALID(mm->context)) 140 get_new_mmu_context(mm); 141 cpu = smp_processor_id(); 142 if (!cpu_isset(cpu, mm->cpu_vm_mask)) 143 cpu_set(cpu, mm->cpu_vm_mask); 144 145 load_secondary_context(mm); 146 __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT); 147 tsb_context_switch(mm); 148 spin_unlock_irqrestore(&mm->context.lock, flags); 149} 150 151#endif /* !(__ASSEMBLY__) */ 152 153#endif /* !(__SPARC64_MMU_CONTEXT_H) */ 154