1/* 2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved. 3 * 4 * Authors: 5 * Alexander Graf <agraf@suse.de> 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License, version 2, as 9 * published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 19 */ 20 21#include <linux/kvm_host.h> 22#include <linux/hash.h> 23 24#include <asm/kvm_ppc.h> 25#include <asm/kvm_book3s.h> 26#include <asm/mmu-hash32.h> 27#include <asm/machdep.h> 28#include <asm/mmu_context.h> 29#include <asm/hw_irq.h> 30 31/* #define DEBUG_MMU */ 32/* #define DEBUG_SR */ 33 34#ifdef DEBUG_MMU 35#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__) 36#else 37#define dprintk_mmu(a, ...) do { } while(0) 38#endif 39 40#ifdef DEBUG_SR 41#define dprintk_sr(a, ...) printk(KERN_INFO a, __VA_ARGS__) 42#else 43#define dprintk_sr(a, ...) do { } while(0) 44#endif 45 46#if PAGE_SHIFT != 12 47#error Unknown page size 48#endif 49 50#ifdef CONFIG_SMP 51#error XXX need to grab mmu_hash_lock 52#endif 53 54#ifdef CONFIG_PTE_64BIT 55#error Only 32 bit pages are supported for now 56#endif 57 58static ulong htab; 59static u32 htabmask; 60 61void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) 62{ 63 volatile u32 *pteg; 64 65 /* Remove from host HTAB */ 66 pteg = (u32*)pte->slot; 67 pteg[0] = 0; 68 69 /* And make sure it's gone from the TLB too */ 70 asm volatile ("sync"); 71 asm volatile ("tlbie %0" : : "r" (pte->pte.eaddr) : "memory"); 72 asm volatile ("sync"); 73 asm volatile ("tlbsync"); 74} 75 76/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using 77 * a hash, so we don't waste cycles on looping */ 78static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) 79{ 80 return hash_64(gvsid, SID_MAP_BITS); 81} 82 83 84static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) 85{ 86 struct kvmppc_sid_map *map; 87 u16 sid_map_mask; 88 89 if (vcpu->arch.msr & MSR_PR) 90 gvsid |= VSID_PR; 91 92 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); 93 map = &to_book3s(vcpu)->sid_map[sid_map_mask]; 94 if (map->guest_vsid == gvsid) { 95 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n", 96 gvsid, map->host_vsid); 97 return map; 98 } 99 100 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; 101 if (map->guest_vsid == gvsid) { 102 dprintk_sr("SR: Searching 0x%llx -> 0x%llx\n", 103 gvsid, map->host_vsid); 104 return map; 105 } 106 107 dprintk_sr("SR: Searching 0x%llx -> not found\n", gvsid); 108 return NULL; 109} 110 111static u32 *kvmppc_mmu_get_pteg(struct kvm_vcpu *vcpu, u32 vsid, u32 eaddr, 112 bool primary) 113{ 114 u32 page, hash; 115 ulong pteg = htab; 116 117 page = (eaddr & ~ESID_MASK) >> 12; 118 119 hash = ((vsid ^ page) << 6); 120 if (!primary) 121 hash = ~hash; 122 123 hash &= htabmask; 124 125 pteg |= hash; 126 127 dprintk_mmu("htab: %lx | hash: %x | htabmask: %x | pteg: %lx\n", 128 htab, hash, htabmask, pteg); 129 130 return (u32*)pteg; 131} 132 133extern char etext[]; 134 135int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) 136{ 137 pfn_t hpaddr; 138 u64 va; 139 u64 vsid; 140 struct kvmppc_sid_map *map; 141 volatile u32 *pteg; 142 u32 eaddr = orig_pte->eaddr; 143 u32 pteg0, pteg1; 144 register int rr = 0; 145 bool primary = false; 146 bool evict = false; 147 struct hpte_cache *pte; 148 149 /* Get host physical address for gpa */ 150 hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); 151 if (kvm_is_error_hva(hpaddr)) { 152 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", 153 orig_pte->eaddr); 154 return -EINVAL; 155 } 156 hpaddr <<= PAGE_SHIFT; 157 158 /* and write the mapping ea -> hpa into the pt */ 159 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); 160 map = find_sid_vsid(vcpu, vsid); 161 if (!map) { 162 kvmppc_mmu_map_segment(vcpu, eaddr); 163 map = find_sid_vsid(vcpu, vsid); 164 } 165 BUG_ON(!map); 166 167 vsid = map->host_vsid; 168 va = (vsid << SID_SHIFT) | (eaddr & ~ESID_MASK); 169 170next_pteg: 171 if (rr == 16) { 172 primary = !primary; 173 evict = true; 174 rr = 0; 175 } 176 177 pteg = kvmppc_mmu_get_pteg(vcpu, vsid, eaddr, primary); 178 179 /* not evicting yet */ 180 if (!evict && (pteg[rr] & PTE_V)) { 181 rr += 2; 182 goto next_pteg; 183 } 184 185 dprintk_mmu("KVM: old PTEG: %p (%d)\n", pteg, rr); 186 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]); 187 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]); 188 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]); 189 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]); 190 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]); 191 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]); 192 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]); 193 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]); 194 195 pteg0 = ((eaddr & 0x0fffffff) >> 22) | (vsid << 7) | PTE_V | 196 (primary ? 0 : PTE_SEC); 197 pteg1 = hpaddr | PTE_M | PTE_R | PTE_C; 198 199 if (orig_pte->may_write) { 200 pteg1 |= PP_RWRW; 201 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); 202 } else { 203 pteg1 |= PP_RWRX; 204 } 205 206 local_irq_disable(); 207 208 if (pteg[rr]) { 209 pteg[rr] = 0; 210 asm volatile ("sync"); 211 } 212 pteg[rr + 1] = pteg1; 213 pteg[rr] = pteg0; 214 asm volatile ("sync"); 215 216 local_irq_enable(); 217 218 dprintk_mmu("KVM: new PTEG: %p\n", pteg); 219 dprintk_mmu("KVM: %08x - %08x\n", pteg[0], pteg[1]); 220 dprintk_mmu("KVM: %08x - %08x\n", pteg[2], pteg[3]); 221 dprintk_mmu("KVM: %08x - %08x\n", pteg[4], pteg[5]); 222 dprintk_mmu("KVM: %08x - %08x\n", pteg[6], pteg[7]); 223 dprintk_mmu("KVM: %08x - %08x\n", pteg[8], pteg[9]); 224 dprintk_mmu("KVM: %08x - %08x\n", pteg[10], pteg[11]); 225 dprintk_mmu("KVM: %08x - %08x\n", pteg[12], pteg[13]); 226 dprintk_mmu("KVM: %08x - %08x\n", pteg[14], pteg[15]); 227 228 229 /* Now tell our Shadow PTE code about the new page */ 230 231 pte = kvmppc_mmu_hpte_cache_next(vcpu); 232 233 dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%llx (0x%llx) -> %lx\n", 234 orig_pte->may_write ? 'w' : '-', 235 orig_pte->may_execute ? 'x' : '-', 236 orig_pte->eaddr, (ulong)pteg, va, 237 orig_pte->vpage, hpaddr); 238 239 pte->slot = (ulong)&pteg[rr]; 240 pte->host_va = va; 241 pte->pte = *orig_pte; 242 pte->pfn = hpaddr >> PAGE_SHIFT; 243 244 kvmppc_mmu_hpte_cache_map(vcpu, pte); 245 246 return 0; 247} 248 249static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) 250{ 251 struct kvmppc_sid_map *map; 252 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 253 u16 sid_map_mask; 254 static int backwards_map = 0; 255 256 if (vcpu->arch.msr & MSR_PR) 257 gvsid |= VSID_PR; 258 259 /* We might get collisions that trap in preceding order, so let's 260 map them differently */ 261 262 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); 263 if (backwards_map) 264 sid_map_mask = SID_MAP_MASK - sid_map_mask; 265 266 map = &to_book3s(vcpu)->sid_map[sid_map_mask]; 267 268 /* Make sure we're taking the other map next time */ 269 backwards_map = !backwards_map; 270 271 /* Uh-oh ... out of mappings. Let's flush! */ 272 if (vcpu_book3s->vsid_next >= vcpu_book3s->vsid_max) { 273 vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; 274 memset(vcpu_book3s->sid_map, 0, 275 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); 276 kvmppc_mmu_pte_flush(vcpu, 0, 0); 277 kvmppc_mmu_flush_segments(vcpu); 278 } 279 map->host_vsid = vcpu_book3s->vsid_next; 280 281 /* Would have to be 111 to be completely aligned with the rest of 282 Linux, but that is just way too little space! */ 283 vcpu_book3s->vsid_next+=1; 284 285 map->guest_vsid = gvsid; 286 map->valid = true; 287 288 return map; 289} 290 291int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) 292{ 293 u32 esid = eaddr >> SID_SHIFT; 294 u64 gvsid; 295 u32 sr; 296 struct kvmppc_sid_map *map; 297 struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); 298 299 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { 300 /* Invalidate an entry */ 301 svcpu->sr[esid] = SR_INVALID; 302 return -ENOENT; 303 } 304 305 map = find_sid_vsid(vcpu, gvsid); 306 if (!map) 307 map = create_sid_map(vcpu, gvsid); 308 309 map->guest_esid = esid; 310 sr = map->host_vsid | SR_KP; 311 svcpu->sr[esid] = sr; 312 313 dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr); 314 315 return 0; 316} 317 318void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) 319{ 320 int i; 321 struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); 322 323 dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr)); 324 for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++) 325 svcpu->sr[i] = SR_INVALID; 326} 327 328void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 329{ 330 kvmppc_mmu_hpte_destroy(vcpu); 331 preempt_disable(); 332 __destroy_context(to_book3s(vcpu)->context_id); 333 preempt_enable(); 334} 335 336/* From mm/mmu_context_hash32.c */ 337#define CTX_TO_VSID(ctx) (((ctx) * (897 * 16)) & 0xffffff) 338 339int kvmppc_mmu_init(struct kvm_vcpu *vcpu) 340{ 341 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 342 int err; 343 ulong sdr1; 344 345 err = __init_new_context(); 346 if (err < 0) 347 return -1; 348 vcpu3s->context_id = err; 349 350 vcpu3s->vsid_max = CTX_TO_VSID(vcpu3s->context_id + 1) - 1; 351 vcpu3s->vsid_first = CTX_TO_VSID(vcpu3s->context_id); 352 353 BUG_ON(vcpu3s->vsid_max < vcpu3s->vsid_first); 354 355 vcpu3s->vsid_next = vcpu3s->vsid_first; 356 357 /* Remember where the HTAB is */ 358 asm ( "mfsdr1 %0" : "=r"(sdr1) ); 359 htabmask = ((sdr1 & 0x1FF) << 16) | 0xFFC0; 360 htab = (ulong)__va(sdr1 & 0xffff0000); 361 362 kvmppc_mmu_hpte_init(vcpu); 363 364 return 0; 365} 366