1#ifdef __KERNEL__ 2#ifndef __PPC_MMU_CONTEXT_H 3#define __PPC_MMU_CONTEXT_H 4 5#include <asm/atomic.h> 6#include <asm/bitops.h> 7#include <asm/mmu.h> 8#include <asm/cputable.h> 9#include <asm-generic/mm_hooks.h> 10 11/* 12 * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs 13 * (virtual segment identifiers) for each context. Although the 14 * hardware supports 24-bit VSIDs, and thus >1 million contexts, 15 * we only use 32,768 of them. That is ample, since there can be 16 * at most around 30,000 tasks in the system anyway, and it means 17 * that we can use a bitmap to indicate which contexts are in use. 18 * Using a bitmap means that we entirely avoid all of the problems 19 * that we used to have when the context number overflowed, 20 * particularly on SMP systems. 21 * -- paulus. 22 */ 23 24/* 25 * This function defines the mapping from contexts to VSIDs (virtual 26 * segment IDs). We use a skew on both the context and the high 4 bits 27 * of the 32-bit virtual address (the "effective segment ID") in order 28 * to spread out the entries in the MMU hash table. Note, if this 29 * function is changed then arch/ppc/mm/hashtable.S will have to be 30 * changed to correspond. 31 */ 32#define CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \ 33 & 0xffffff) 34 35/* 36 The MPC8xx has only 16 contexts. We rotate through them on each 37 task switch. A better way would be to keep track of tasks that 38 own contexts, and implement an LRU usage. That way very active 39 tasks don't always have to pay the TLB reload overhead. The 40 kernel pages are mapped shared, so the kernel can run on behalf 41 of any task that makes a kernel entry. Shared does not mean they 42 are not protected, just that the ASID comparison is not performed. 43 -- Dan 44 45 The IBM4xx has 256 contexts, so we can just rotate through these 46 as a way of "switching" contexts. If the TID of the TLB is zero, 47 the PID/TID comparison is disabled, so we can use a TID of zero 48 to represent all kernel pages as shared among all contexts. 49 -- Dan 50 */ 51 52static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) 53{ 54} 55 56#ifdef CONFIG_8xx 57#define NO_CONTEXT 16 58#define LAST_CONTEXT 15 59#define FIRST_CONTEXT 0 60 61#elif defined(CONFIG_4xx) 62#define NO_CONTEXT 256 63#define LAST_CONTEXT 255 64#define FIRST_CONTEXT 1 65 66#elif defined(CONFIG_E200) || defined(CONFIG_E500) 67#define NO_CONTEXT 256 68#define LAST_CONTEXT 255 69#define FIRST_CONTEXT 1 70 71#else 72 73/* PPC 6xx, 7xx CPUs */ 74#define NO_CONTEXT ((unsigned long) -1) 75#define LAST_CONTEXT 32767 76#define FIRST_CONTEXT 1 77#endif 78 79/* 80 * Set the current MMU context. 81 * On 32-bit PowerPCs (other than the 8xx embedded chips), this is done by 82 * loading up the segment registers for the user part of the address space. 83 * 84 * Since the PGD is immediately available, it is much faster to simply 85 * pass this along as a second parameter, which is required for 8xx and 86 * can be used for debugging on all processors (if you happen to have 87 * an Abatron). 88 */ 89extern void set_context(unsigned long contextid, pgd_t *pgd); 90 91/* 92 * Bitmap of contexts in use. 93 * The size of this bitmap is LAST_CONTEXT + 1 bits. 94 */ 95extern unsigned long context_map[]; 96 97/* 98 * This caches the next context number that we expect to be free. 99 * Its use is an optimization only, we can't rely on this context 100 * number to be free, but it usually will be. 101 */ 102extern unsigned long next_mmu_context; 103 104/* 105 * If we don't have sufficient contexts to give one to every task 106 * that could be in the system, we need to be able to steal contexts. 107 * These variables support that. 108 */ 109#if LAST_CONTEXT < 30000 110#define FEW_CONTEXTS 1 111extern atomic_t nr_free_contexts; 112extern struct mm_struct *context_mm[LAST_CONTEXT+1]; 113extern void steal_context(void); 114#endif 115 116/* 117 * Get a new mmu context for the address space described by `mm'. 118 */ 119static inline void get_mmu_context(struct mm_struct *mm) 120{ 121 unsigned long ctx; 122 123 if (mm->context.id != NO_CONTEXT) 124 return; 125#ifdef FEW_CONTEXTS 126 while (atomic_dec_if_positive(&nr_free_contexts) < 0) 127 steal_context(); 128#endif 129 ctx = next_mmu_context; 130 while (test_and_set_bit(ctx, context_map)) { 131 ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx); 132 if (ctx > LAST_CONTEXT) 133 ctx = 0; 134 } 135 next_mmu_context = (ctx + 1) & LAST_CONTEXT; 136 mm->context.id = ctx; 137#ifdef FEW_CONTEXTS 138 context_mm[ctx] = mm; 139#endif 140} 141 142/* 143 * Set up the context for a new address space. 144 */ 145static inline int init_new_context(struct task_struct *t, struct mm_struct *mm) 146{ 147 mm->context.id = NO_CONTEXT; 148 mm->context.vdso_base = 0; 149 return 0; 150} 151 152/* 153 * We're finished using the context for an address space. 154 */ 155static inline void destroy_context(struct mm_struct *mm) 156{ 157 preempt_disable(); 158 if (mm->context.id != NO_CONTEXT) { 159 clear_bit(mm->context.id, context_map); 160 mm->context.id = NO_CONTEXT; 161#ifdef FEW_CONTEXTS 162 atomic_inc(&nr_free_contexts); 163#endif 164 } 165 preempt_enable(); 166} 167 168static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, 169 struct task_struct *tsk) 170{ 171#ifdef CONFIG_ALTIVEC 172 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 173 asm volatile ("dssall;\n" 174#ifndef CONFIG_POWER4 175 "sync;\n" /* G4 needs a sync here, G5 apparently not */ 176#endif 177 : : ); 178#endif /* CONFIG_ALTIVEC */ 179 180 tsk->thread.pgdir = next->pgd; 181 182 /* No need to flush userspace segments if the mm doesnt change */ 183 if (prev == next) 184 return; 185 186 /* Setup new userspace context */ 187 get_mmu_context(next); 188 set_context(next->context.id, next->pgd); 189} 190 191#define deactivate_mm(tsk,mm) do { } while (0) 192 193/* 194 * After we have set current->mm to a new value, this activates 195 * the context for the new mm so we see the new mappings. 196 */ 197#define activate_mm(active_mm, mm) switch_mm(active_mm, mm, current) 198 199extern void mmu_context_init(void); 200 201#endif /* __PPC_MMU_CONTEXT_H */ 202#endif /* __KERNEL__ */ 203