1/* 2 * PowerPC64 SLB support. 3 * 4 * Copyright (C) 2004 David Gibson <dwg@au.ibm.com>, IBM 5 * Based on earlier code writteh by: 6 * Dave Engebretsen and Mike Corrigan {engebret|mikejc}@us.ibm.com 7 * Copyright (c) 2001 Dave Engebretsen 8 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM 9 * 10 * 11 * This program is free software; you can redistribute it and/or 12 * modify it under the terms of the GNU General Public License 13 * as published by the Free Software Foundation; either version 14 * 2 of the License, or (at your option) any later version. 15 */ 16 17#undef DEBUG 18 19#include <asm/pgtable.h> 20#include <asm/mmu.h> 21#include <asm/mmu_context.h> 22#include <asm/paca.h> 23#include <asm/cputable.h> 24#include <asm/cacheflush.h> 25#include <asm/smp.h> 26#include <asm/firmware.h> 27#include <linux/compiler.h> 28 29#ifdef DEBUG 30#define DBG(fmt...) udbg_printf(fmt) 31#else 32#define DBG(fmt...) 33#endif 34 35extern void slb_allocate_realmode(unsigned long ea); 36extern void slb_allocate_user(unsigned long ea); 37 38static void slb_allocate(unsigned long ea) 39{ 40 /* Currently, we do real mode for all SLBs including user, but 41 * that will change if we bring back dynamic VSIDs 42 */ 43 slb_allocate_realmode(ea); 44} 45 46static inline unsigned long mk_esid_data(unsigned long ea, unsigned long slot) 47{ 48 return (ea & ESID_MASK) | SLB_ESID_V | slot; 49} 50 51static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags) 52{ 53 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; 54} 55 56static inline void slb_shadow_update(unsigned long esid, unsigned long vsid, 57 unsigned long entry) 58{ 59 /* 60 * Clear the ESID first so the entry is not valid while we are 61 * updating it. 62 */ 63 get_slb_shadow()->save_area[entry].esid = 0; 64 barrier(); 65 get_slb_shadow()->save_area[entry].vsid = vsid; 66 barrier(); 67 get_slb_shadow()->save_area[entry].esid = esid; 68 69} 70 71static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, 72 unsigned long entry) 73{ 74 /* 75 * Updating the shadow buffer before writing the SLB ensures 76 * we don't get a stale entry here if we get preempted by PHYP 77 * between these two statements. 78 */ 79 slb_shadow_update(mk_esid_data(ea, entry), mk_vsid_data(ea, flags), 80 entry); 81 82 asm volatile("slbmte %0,%1" : 83 : "r" (mk_vsid_data(ea, flags)), 84 "r" (mk_esid_data(ea, entry)) 85 : "memory" ); 86} 87 88void slb_flush_and_rebolt(void) 89{ 90 /* If you change this make sure you change SLB_NUM_BOLTED 91 * appropriately too. */ 92 unsigned long linear_llp, vmalloc_llp, lflags, vflags; 93 unsigned long ksp_esid_data; 94 95 WARN_ON(!irqs_disabled()); 96 97 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; 98 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; 99 lflags = SLB_VSID_KERNEL | linear_llp; 100 vflags = SLB_VSID_KERNEL | vmalloc_llp; 101 102 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); 103 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) 104 ksp_esid_data &= ~SLB_ESID_V; 105 106 /* Only third entry (stack) may change here so only resave that */ 107 slb_shadow_update(ksp_esid_data, 108 mk_vsid_data(ksp_esid_data, lflags), 2); 109 110 /* We need to do this all in asm, so we're sure we don't touch 111 * the stack between the slbia and rebolting it. */ 112 asm volatile("isync\n" 113 "slbia\n" 114 /* Slot 1 - first VMALLOC segment */ 115 "slbmte %0,%1\n" 116 /* Slot 2 - kernel stack */ 117 "slbmte %2,%3\n" 118 "isync" 119 :: "r"(mk_vsid_data(VMALLOC_START, vflags)), 120 "r"(mk_esid_data(VMALLOC_START, 1)), 121 "r"(mk_vsid_data(ksp_esid_data, lflags)), 122 "r"(ksp_esid_data) 123 : "memory"); 124} 125 126/* Flush all user entries from the segment table of the current processor. */ 127void switch_slb(struct task_struct *tsk, struct mm_struct *mm) 128{ 129 unsigned long offset = get_paca()->slb_cache_ptr; 130 unsigned long esid_data = 0; 131 unsigned long pc = KSTK_EIP(tsk); 132 unsigned long stack = KSTK_ESP(tsk); 133 unsigned long unmapped_base; 134 135 if (offset <= SLB_CACHE_ENTRIES) { 136 int i; 137 asm volatile("isync" : : : "memory"); 138 for (i = 0; i < offset; i++) { 139 esid_data = ((unsigned long)get_paca()->slb_cache[i] 140 << SID_SHIFT) | SLBIE_C; 141 asm volatile("slbie %0" : : "r" (esid_data)); 142 } 143 asm volatile("isync" : : : "memory"); 144 } else { 145 slb_flush_and_rebolt(); 146 } 147 148 if (offset == 1 || offset > SLB_CACHE_ENTRIES) 149 asm volatile("slbie %0" : : "r" (esid_data)); 150 151 get_paca()->slb_cache_ptr = 0; 152 get_paca()->context = mm->context; 153 154 /* 155 * preload some userspace segments into the SLB. 156 */ 157 if (test_tsk_thread_flag(tsk, TIF_32BIT)) 158 unmapped_base = TASK_UNMAPPED_BASE_USER32; 159 else 160 unmapped_base = TASK_UNMAPPED_BASE_USER64; 161 162 if (is_kernel_addr(pc)) 163 return; 164 slb_allocate(pc); 165 166 if (GET_ESID(pc) == GET_ESID(stack)) 167 return; 168 169 if (is_kernel_addr(stack)) 170 return; 171 slb_allocate(stack); 172 173 if ((GET_ESID(pc) == GET_ESID(unmapped_base)) 174 || (GET_ESID(stack) == GET_ESID(unmapped_base))) 175 return; 176 177 if (is_kernel_addr(unmapped_base)) 178 return; 179 slb_allocate(unmapped_base); 180} 181 182static inline void patch_slb_encoding(unsigned int *insn_addr, 183 unsigned int immed) 184{ 185 /* Assume the instruction had a "0" immediate value, just 186 * "or" in the new value 187 */ 188 *insn_addr |= immed; 189 flush_icache_range((unsigned long)insn_addr, 4+ 190 (unsigned long)insn_addr); 191} 192 193void slb_initialize(void) 194{ 195 unsigned long linear_llp, vmalloc_llp, io_llp; 196 unsigned long lflags, vflags; 197 static int slb_encoding_inited; 198 extern unsigned int *slb_miss_kernel_load_linear; 199 extern unsigned int *slb_miss_kernel_load_io; 200 201 /* Prepare our SLB miss handler based on our page size */ 202 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; 203 io_llp = mmu_psize_defs[mmu_io_psize].sllp; 204 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; 205 get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; 206 207 if (!slb_encoding_inited) { 208 slb_encoding_inited = 1; 209 patch_slb_encoding(slb_miss_kernel_load_linear, 210 SLB_VSID_KERNEL | linear_llp); 211 patch_slb_encoding(slb_miss_kernel_load_io, 212 SLB_VSID_KERNEL | io_llp); 213 214 DBG("SLB: linear LLP = %04x\n", linear_llp); 215 DBG("SLB: io LLP = %04x\n", io_llp); 216 } 217 218 get_paca()->stab_rr = SLB_NUM_BOLTED; 219 220 /* On iSeries the bolted entries have already been set up by 221 * the hypervisor from the lparMap data in head.S */ 222 if (firmware_has_feature(FW_FEATURE_ISERIES)) 223 return; 224 225 lflags = SLB_VSID_KERNEL | linear_llp; 226 vflags = SLB_VSID_KERNEL | vmalloc_llp; 227 228 /* Invalidate the entire SLB (even slot 0) & all the ERATS */ 229 asm volatile("isync":::"memory"); 230 asm volatile("slbmte %0,%0"::"r" (0) : "memory"); 231 asm volatile("isync; slbia; isync":::"memory"); 232 create_shadowed_slbe(PAGE_OFFSET, lflags, 0); 233 234 create_shadowed_slbe(VMALLOC_START, vflags, 1); 235 236 /* We don't bolt the stack for the time being - we're in boot, 237 * so the stack is in the bolted segment. By the time it goes 238 * elsewhere, we'll call _switch() which will bolt in the new 239 * one. */ 240 asm volatile("isync":::"memory"); 241} 242