1/* 2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. 3 * 4 * Authors: 5 * Alexander Graf <agraf@suse.de> 6 * Kevin Wolf <mail@kevin-wolf.de> 7 * 8 * Description: 9 * This file is derived from arch/powerpc/kvm/44x.c, 10 * by Hollis Blanchard <hollisb@us.ibm.com>. 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License, version 2, as 14 * published by the Free Software Foundation. 15 */ 16 17#include <linux/kvm_host.h> 18#include <linux/err.h> 19#include <linux/slab.h> 20 21#include <asm/reg.h> 22#include <asm/cputable.h> 23#include <asm/cacheflush.h> 24#include <asm/tlbflush.h> 25#include <asm/uaccess.h> 26#include <asm/io.h> 27#include <asm/kvm_ppc.h> 28#include <asm/kvm_book3s.h> 29#include <asm/mmu_context.h> 30#include <linux/gfp.h> 31#include <linux/sched.h> 32#include <linux/vmalloc.h> 33#include <linux/highmem.h> 34 35#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU 36 37/* #define EXIT_DEBUG */ 38/* #define EXIT_DEBUG_SIMPLE */ 39/* #define DEBUG_EXT */ 40 41static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, 42 ulong msr); 43 44/* Some compatibility defines */ 45#ifdef CONFIG_PPC_BOOK3S_32 46#define MSR_USER32 MSR_USER 47#define MSR_USER64 MSR_USER 48#define HW_PAGE_SIZE PAGE_SIZE 49#endif 50 51struct kvm_stats_debugfs_item debugfs_entries[] = { 52 { "exits", VCPU_STAT(sum_exits) }, 53 { "mmio", VCPU_STAT(mmio_exits) }, 54 { "sig", VCPU_STAT(signal_exits) }, 55 { "sysc", VCPU_STAT(syscall_exits) }, 56 { "inst_emu", VCPU_STAT(emulated_inst_exits) }, 57 { "dec", VCPU_STAT(dec_exits) }, 58 { "ext_intr", VCPU_STAT(ext_intr_exits) }, 59 { "queue_intr", VCPU_STAT(queue_intr) }, 60 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 61 { "pf_storage", VCPU_STAT(pf_storage) }, 62 { "sp_storage", VCPU_STAT(sp_storage) }, 63 { "pf_instruc", VCPU_STAT(pf_instruc) }, 64 { "sp_instruc", VCPU_STAT(sp_instruc) }, 65 { "ld", VCPU_STAT(ld) }, 66 { "ld_slow", VCPU_STAT(ld_slow) }, 67 { "st", VCPU_STAT(st) }, 68 { "st_slow", VCPU_STAT(st_slow) }, 69 { NULL } 70}; 71 72void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) 73{ 74} 75 76void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) 77{ 78} 79 80void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 81{ 82#ifdef CONFIG_PPC_BOOK3S_64 83 memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb)); 84 memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu, 85 sizeof(get_paca()->shadow_vcpu)); 86 to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max; 87#endif 88 89#ifdef CONFIG_PPC_BOOK3S_32 90 current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu; 91#endif 92} 93 94void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 95{ 96#ifdef CONFIG_PPC_BOOK3S_64 97 memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb)); 98 memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, 99 sizeof(get_paca()->shadow_vcpu)); 100 to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max; 101#endif 102 103 kvmppc_giveup_ext(vcpu, MSR_FP); 104 kvmppc_giveup_ext(vcpu, MSR_VEC); 105 kvmppc_giveup_ext(vcpu, MSR_VSX); 106} 107 108#if defined(EXIT_DEBUG) 109static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu) 110{ 111 u64 jd = mftb() - vcpu->arch.dec_jiffies; 112 return vcpu->arch.dec - jd; 113} 114#endif 115 116static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) 117{ 118 vcpu->arch.shadow_msr = vcpu->arch.msr; 119 /* Guest MSR values */ 120 vcpu->arch.shadow_msr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | 121 MSR_BE | MSR_DE; 122 /* Process MSR values */ 123 vcpu->arch.shadow_msr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | 124 MSR_EE; 125 /* External providers the guest reserved */ 126 vcpu->arch.shadow_msr |= (vcpu->arch.msr & vcpu->arch.guest_owned_ext); 127 /* 64-bit Process MSR values */ 128#ifdef CONFIG_PPC_BOOK3S_64 129 vcpu->arch.shadow_msr |= MSR_ISF | MSR_HV; 130#endif 131} 132 133void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) 134{ 135 ulong old_msr = vcpu->arch.msr; 136 137#ifdef EXIT_DEBUG 138 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); 139#endif 140 141 msr &= to_book3s(vcpu)->msr_mask; 142 vcpu->arch.msr = msr; 143 kvmppc_recalc_shadow_msr(vcpu); 144 145 if (msr & (MSR_WE|MSR_POW)) { 146 if (!vcpu->arch.pending_exceptions) { 147 kvm_vcpu_block(vcpu); 148 vcpu->stat.halt_wakeup++; 149 } 150 } 151 152 if ((vcpu->arch.msr & (MSR_PR|MSR_IR|MSR_DR)) != 153 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { 154 kvmppc_mmu_flush_segments(vcpu); 155 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); 156 } 157 158 /* Preload FPU if it's enabled */ 159 if (vcpu->arch.msr & MSR_FP) 160 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 161} 162 163void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) 164{ 165 vcpu->arch.srr0 = kvmppc_get_pc(vcpu); 166 vcpu->arch.srr1 = vcpu->arch.msr | flags; 167 kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec); 168 vcpu->arch.mmu.reset_msr(vcpu); 169} 170 171static int kvmppc_book3s_vec2irqprio(unsigned int vec) 172{ 173 unsigned int prio; 174 175 switch (vec) { 176 case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break; 177 case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break; 178 case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break; 179 case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break; 180 case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break; 181 case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break; 182 case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break; 183 case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break; 184 case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break; 185 case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break; 186 case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break; 187 case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break; 188 case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break; 189 case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break; 190 case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break; 191 default: prio = BOOK3S_IRQPRIO_MAX; break; 192 } 193 194 return prio; 195} 196 197static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, 198 unsigned int vec) 199{ 200 clear_bit(kvmppc_book3s_vec2irqprio(vec), 201 &vcpu->arch.pending_exceptions); 202} 203 204void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) 205{ 206 vcpu->stat.queue_intr++; 207 208 set_bit(kvmppc_book3s_vec2irqprio(vec), 209 &vcpu->arch.pending_exceptions); 210#ifdef EXIT_DEBUG 211 printk(KERN_INFO "Queueing interrupt %x\n", vec); 212#endif 213} 214 215 216void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) 217{ 218 to_book3s(vcpu)->prog_flags = flags; 219 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM); 220} 221 222void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) 223{ 224 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); 225} 226 227int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) 228{ 229 return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions); 230} 231 232void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) 233{ 234 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); 235} 236 237void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, 238 struct kvm_interrupt *irq) 239{ 240 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); 241} 242 243void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, 244 struct kvm_interrupt *irq) 245{ 246 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); 247} 248 249int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) 250{ 251 int deliver = 1; 252 int vec = 0; 253 ulong flags = 0ULL; 254 255 switch (priority) { 256 case BOOK3S_IRQPRIO_DECREMENTER: 257 deliver = vcpu->arch.msr & MSR_EE; 258 vec = BOOK3S_INTERRUPT_DECREMENTER; 259 break; 260 case BOOK3S_IRQPRIO_EXTERNAL: 261 deliver = vcpu->arch.msr & MSR_EE; 262 vec = BOOK3S_INTERRUPT_EXTERNAL; 263 break; 264 case BOOK3S_IRQPRIO_SYSTEM_RESET: 265 vec = BOOK3S_INTERRUPT_SYSTEM_RESET; 266 break; 267 case BOOK3S_IRQPRIO_MACHINE_CHECK: 268 vec = BOOK3S_INTERRUPT_MACHINE_CHECK; 269 break; 270 case BOOK3S_IRQPRIO_DATA_STORAGE: 271 vec = BOOK3S_INTERRUPT_DATA_STORAGE; 272 break; 273 case BOOK3S_IRQPRIO_INST_STORAGE: 274 vec = BOOK3S_INTERRUPT_INST_STORAGE; 275 break; 276 case BOOK3S_IRQPRIO_DATA_SEGMENT: 277 vec = BOOK3S_INTERRUPT_DATA_SEGMENT; 278 break; 279 case BOOK3S_IRQPRIO_INST_SEGMENT: 280 vec = BOOK3S_INTERRUPT_INST_SEGMENT; 281 break; 282 case BOOK3S_IRQPRIO_ALIGNMENT: 283 vec = BOOK3S_INTERRUPT_ALIGNMENT; 284 break; 285 case BOOK3S_IRQPRIO_PROGRAM: 286 vec = BOOK3S_INTERRUPT_PROGRAM; 287 flags = to_book3s(vcpu)->prog_flags; 288 break; 289 case BOOK3S_IRQPRIO_VSX: 290 vec = BOOK3S_INTERRUPT_VSX; 291 break; 292 case BOOK3S_IRQPRIO_ALTIVEC: 293 vec = BOOK3S_INTERRUPT_ALTIVEC; 294 break; 295 case BOOK3S_IRQPRIO_FP_UNAVAIL: 296 vec = BOOK3S_INTERRUPT_FP_UNAVAIL; 297 break; 298 case BOOK3S_IRQPRIO_SYSCALL: 299 vec = BOOK3S_INTERRUPT_SYSCALL; 300 break; 301 case BOOK3S_IRQPRIO_DEBUG: 302 vec = BOOK3S_INTERRUPT_TRACE; 303 break; 304 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR: 305 vec = BOOK3S_INTERRUPT_PERFMON; 306 break; 307 default: 308 deliver = 0; 309 printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority); 310 break; 311 } 312 313 314 if (deliver) 315 kvmppc_inject_interrupt(vcpu, vec, flags); 316 317 return deliver; 318} 319 320void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) 321{ 322 unsigned long *pending = &vcpu->arch.pending_exceptions; 323 unsigned int priority; 324 325#ifdef EXIT_DEBUG 326 if (vcpu->arch.pending_exceptions) 327 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); 328#endif 329 priority = __ffs(*pending); 330 while (priority < BOOK3S_IRQPRIO_MAX) { 331 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && 332 (priority != BOOK3S_IRQPRIO_DECREMENTER)) { 333 /* DEC interrupts get cleared by mtdec */ 334 clear_bit(priority, &vcpu->arch.pending_exceptions); 335 break; 336 } 337 338 priority = find_next_bit(pending, 339 BITS_PER_BYTE * sizeof(*pending), 340 priority + 1); 341 } 342} 343 344void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) 345{ 346 u32 host_pvr; 347 348 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; 349 vcpu->arch.pvr = pvr; 350#ifdef CONFIG_PPC_BOOK3S_64 351 if ((pvr >= 0x330000) && (pvr < 0x70330000)) { 352 kvmppc_mmu_book3s_64_init(vcpu); 353 to_book3s(vcpu)->hior = 0xfff00000; 354 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; 355 } else 356#endif 357 { 358 kvmppc_mmu_book3s_32_init(vcpu); 359 to_book3s(vcpu)->hior = 0; 360 to_book3s(vcpu)->msr_mask = 0xffffffffULL; 361 } 362 363 /* If we are in hypervisor level on 970, we can tell the CPU to 364 * treat DCBZ as 32 bytes store */ 365 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32; 366 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && 367 !strcmp(cur_cpu_spec->platform, "ppc970")) 368 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; 369 370 /* Cell performs badly if MSR_FEx are set. So let's hope nobody 371 really needs them in a VM on Cell and force disable them. */ 372 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be")) 373 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); 374 375#ifdef CONFIG_PPC_BOOK3S_32 376 /* 32 bit Book3S always has 32 byte dcbz */ 377 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; 378#endif 379 380 /* On some CPUs we can execute paired single operations natively */ 381 asm ( "mfpvr %0" : "=r"(host_pvr)); 382 switch (host_pvr) { 383 case 0x00080200: /* lonestar 2.0 */ 384 case 0x00088202: /* lonestar 2.2 */ 385 case 0x70000100: /* gekko 1.0 */ 386 case 0x00080100: /* gekko 2.0 */ 387 case 0x00083203: /* gekko 2.3a */ 388 case 0x00083213: /* gekko 2.3b */ 389 case 0x00083204: /* gekko 2.4 */ 390 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ 391 case 0x00087200: /* broadway */ 392 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS; 393 /* Enable HID2.PSE - in case we need it later */ 394 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29)); 395 } 396} 397 398/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To 399 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to 400 * emulate 32 bytes dcbz length. 401 * 402 * The Book3s_64 inventors also realized this case and implemented a special bit 403 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it. 404 * 405 * My approach here is to patch the dcbz instruction on executing pages. 406 */ 407static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) 408{ 409 struct page *hpage; 410 u64 hpage_offset; 411 u32 *page; 412 int i; 413 414 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); 415 if (is_error_page(hpage)) 416 return; 417 418 hpage_offset = pte->raddr & ~PAGE_MASK; 419 hpage_offset &= ~0xFFFULL; 420 hpage_offset /= 4; 421 422 get_page(hpage); 423 page = kmap_atomic(hpage, KM_USER0); 424 425 /* patch dcbz into reserved instruction, so we trap */ 426 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) 427 if ((page[i] & 0xff0007ff) == INS_DCBZ) 428 page[i] &= 0xfffffff7; 429 430 kunmap_atomic(page, KM_USER0); 431 put_page(hpage); 432} 433 434static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, 435 struct kvmppc_pte *pte) 436{ 437 int relocated = (vcpu->arch.msr & (data ? MSR_DR : MSR_IR)); 438 int r; 439 440 if (relocated) { 441 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data); 442 } else { 443 pte->eaddr = eaddr; 444 pte->raddr = eaddr & 0xffffffff; 445 pte->vpage = VSID_REAL | eaddr >> 12; 446 pte->may_read = true; 447 pte->may_write = true; 448 pte->may_execute = true; 449 r = 0; 450 } 451 452 return r; 453} 454 455static hva_t kvmppc_bad_hva(void) 456{ 457 return PAGE_OFFSET; 458} 459 460static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte, 461 bool read) 462{ 463 hva_t hpage; 464 465 if (read && !pte->may_read) 466 goto err; 467 468 if (!read && !pte->may_write) 469 goto err; 470 471 hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT); 472 if (kvm_is_error_hva(hpage)) 473 goto err; 474 475 return hpage | (pte->raddr & ~PAGE_MASK); 476err: 477 return kvmppc_bad_hva(); 478} 479 480int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 481 bool data) 482{ 483 struct kvmppc_pte pte; 484 485 vcpu->stat.st++; 486 487 if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) 488 return -ENOENT; 489 490 *eaddr = pte.raddr; 491 492 if (!pte.may_write) 493 return -EPERM; 494 495 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) 496 return EMULATE_DO_MMIO; 497 498 return EMULATE_DONE; 499} 500 501int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, 502 bool data) 503{ 504 struct kvmppc_pte pte; 505 hva_t hva = *eaddr; 506 507 vcpu->stat.ld++; 508 509 if (kvmppc_xlate(vcpu, *eaddr, data, &pte)) 510 goto nopte; 511 512 *eaddr = pte.raddr; 513 514 hva = kvmppc_pte_to_hva(vcpu, &pte, true); 515 if (kvm_is_error_hva(hva)) 516 goto mmio; 517 518 if (copy_from_user(ptr, (void __user *)hva, size)) { 519 printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva); 520 goto mmio; 521 } 522 523 return EMULATE_DONE; 524 525nopte: 526 return -ENOENT; 527mmio: 528 return EMULATE_DO_MMIO; 529} 530 531static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) 532{ 533 return kvm_is_visible_gfn(vcpu->kvm, gfn); 534} 535 536int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, 537 ulong eaddr, int vec) 538{ 539 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); 540 int r = RESUME_GUEST; 541 int relocated; 542 int page_found = 0; 543 struct kvmppc_pte pte; 544 bool is_mmio = false; 545 bool dr = (vcpu->arch.msr & MSR_DR) ? true : false; 546 bool ir = (vcpu->arch.msr & MSR_IR) ? true : false; 547 u64 vsid; 548 549 relocated = data ? dr : ir; 550 551 /* Resolve real address if translation turned on */ 552 if (relocated) { 553 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data); 554 } else { 555 pte.may_execute = true; 556 pte.may_read = true; 557 pte.may_write = true; 558 pte.raddr = eaddr & 0xffffffff; 559 pte.eaddr = eaddr; 560 pte.vpage = eaddr >> 12; 561 } 562 563 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { 564 case 0: 565 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); 566 break; 567 case MSR_DR: 568 case MSR_IR: 569 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); 570 571 if ((vcpu->arch.msr & (MSR_DR|MSR_IR)) == MSR_DR) 572 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); 573 else 574 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); 575 pte.vpage |= vsid; 576 577 if (vsid == -1) 578 page_found = -EINVAL; 579 break; 580 } 581 582 if (vcpu->arch.mmu.is_dcbz32(vcpu) && 583 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { 584 /* 585 * If we do the dcbz hack, we have to NX on every execution, 586 * so we can patch the executing code. This renders our guest 587 * NX-less. 588 */ 589 pte.may_execute = !data; 590 } 591 592 if (page_found == -ENOENT) { 593 /* Page not found in guest PTE entries */ 594 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); 595 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr; 596 vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); 597 kvmppc_book3s_queue_irqprio(vcpu, vec); 598 } else if (page_found == -EPERM) { 599 /* Storage protection */ 600 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); 601 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE; 602 to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; 603 vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); 604 kvmppc_book3s_queue_irqprio(vcpu, vec); 605 } else if (page_found == -EINVAL) { 606 /* Page not found in guest SLB */ 607 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); 608 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); 609 } else if (!is_mmio && 610 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { 611 /* The guest's PTE is not mapped yet. Map on the host */ 612 kvmppc_mmu_map_page(vcpu, &pte); 613 if (data) 614 vcpu->stat.sp_storage++; 615 else if (vcpu->arch.mmu.is_dcbz32(vcpu) && 616 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) 617 kvmppc_patch_dcbz(vcpu, &pte); 618 } else { 619 /* MMIO */ 620 vcpu->stat.mmio_exits++; 621 vcpu->arch.paddr_accessed = pte.raddr; 622 r = kvmppc_emulate_mmio(run, vcpu); 623 if ( r == RESUME_HOST_NV ) 624 r = RESUME_HOST; 625 } 626 627 return r; 628} 629 630static inline int get_fpr_index(int i) 631{ 632#ifdef CONFIG_VSX 633 i *= 2; 634#endif 635 return i; 636} 637 638/* Give up external provider (FPU, Altivec, VSX) */ 639void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) 640{ 641 struct thread_struct *t = ¤t->thread; 642 u64 *vcpu_fpr = vcpu->arch.fpr; 643#ifdef CONFIG_VSX 644 u64 *vcpu_vsx = vcpu->arch.vsr; 645#endif 646 u64 *thread_fpr = (u64*)t->fpr; 647 int i; 648 649 if (!(vcpu->arch.guest_owned_ext & msr)) 650 return; 651 652#ifdef DEBUG_EXT 653 printk(KERN_INFO "Giving up ext 0x%lx\n", msr); 654#endif 655 656 switch (msr) { 657 case MSR_FP: 658 giveup_fpu(current); 659 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) 660 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)]; 661 662 vcpu->arch.fpscr = t->fpscr.val; 663 break; 664 case MSR_VEC: 665#ifdef CONFIG_ALTIVEC 666 giveup_altivec(current); 667 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr)); 668 vcpu->arch.vscr = t->vscr; 669#endif 670 break; 671 case MSR_VSX: 672#ifdef CONFIG_VSX 673 __giveup_vsx(current); 674 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++) 675 vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1]; 676#endif 677 break; 678 default: 679 BUG(); 680 } 681 682 vcpu->arch.guest_owned_ext &= ~msr; 683 current->thread.regs->msr &= ~msr; 684 kvmppc_recalc_shadow_msr(vcpu); 685} 686 687static int kvmppc_read_inst(struct kvm_vcpu *vcpu) 688{ 689 ulong srr0 = kvmppc_get_pc(vcpu); 690 u32 last_inst = kvmppc_get_last_inst(vcpu); 691 int ret; 692 693 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); 694 if (ret == -ENOENT) { 695 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 33, 1); 696 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 34, 36, 0); 697 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0); 698 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); 699 return EMULATE_AGAIN; 700 } 701 702 return EMULATE_DONE; 703} 704 705static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr) 706{ 707 708 /* Need to do paired single emulation? */ 709 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)) 710 return EMULATE_DONE; 711 712 /* Read out the instruction */ 713 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) 714 /* Need to emulate */ 715 return EMULATE_FAIL; 716 717 return EMULATE_AGAIN; 718} 719 720/* Handle external providers (FPU, Altivec, VSX) */ 721static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, 722 ulong msr) 723{ 724 struct thread_struct *t = ¤t->thread; 725 u64 *vcpu_fpr = vcpu->arch.fpr; 726#ifdef CONFIG_VSX 727 u64 *vcpu_vsx = vcpu->arch.vsr; 728#endif 729 u64 *thread_fpr = (u64*)t->fpr; 730 int i; 731 732 /* When we have paired singles, we emulate in software */ 733 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) 734 return RESUME_GUEST; 735 736 if (!(vcpu->arch.msr & msr)) { 737 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 738 return RESUME_GUEST; 739 } 740 741 /* We already own the ext */ 742 if (vcpu->arch.guest_owned_ext & msr) { 743 return RESUME_GUEST; 744 } 745 746#ifdef DEBUG_EXT 747 printk(KERN_INFO "Loading up ext 0x%lx\n", msr); 748#endif 749 750 current->thread.regs->msr |= msr; 751 752 switch (msr) { 753 case MSR_FP: 754 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) 755 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i]; 756 757 t->fpscr.val = vcpu->arch.fpscr; 758 t->fpexc_mode = 0; 759 kvmppc_load_up_fpu(); 760 break; 761 case MSR_VEC: 762#ifdef CONFIG_ALTIVEC 763 memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr)); 764 t->vscr = vcpu->arch.vscr; 765 t->vrsave = -1; 766 kvmppc_load_up_altivec(); 767#endif 768 break; 769 case MSR_VSX: 770#ifdef CONFIG_VSX 771 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++) 772 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i]; 773 kvmppc_load_up_vsx(); 774#endif 775 break; 776 default: 777 BUG(); 778 } 779 780 vcpu->arch.guest_owned_ext |= msr; 781 782 kvmppc_recalc_shadow_msr(vcpu); 783 784 return RESUME_GUEST; 785} 786 787int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, 788 unsigned int exit_nr) 789{ 790 int r = RESUME_HOST; 791 792 vcpu->stat.sum_exits++; 793 794 run->exit_reason = KVM_EXIT_UNKNOWN; 795 run->ready_for_interrupt_injection = 1; 796#ifdef EXIT_DEBUG 797 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n", 798 exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu), 799 kvmppc_get_dec(vcpu), to_svcpu(vcpu)->shadow_srr1); 800#elif defined(EXIT_DEBUG_SIMPLE) 801 if ((exit_nr != 0x900) && (exit_nr != 0x500)) 802 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n", 803 exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu), 804 vcpu->arch.msr); 805#endif 806 kvm_resched(vcpu); 807 switch (exit_nr) { 808 case BOOK3S_INTERRUPT_INST_STORAGE: 809 vcpu->stat.pf_instruc++; 810 811#ifdef CONFIG_PPC_BOOK3S_32 812 /* We set segments as unused segments when invalidating them. So 813 * treat the respective fault as segment fault. */ 814 if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] 815 == SR_INVALID) { 816 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); 817 r = RESUME_GUEST; 818 break; 819 } 820#endif 821 822 /* only care about PTEG not found errors, but leave NX alone */ 823 if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) { 824 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); 825 vcpu->stat.sp_instruc++; 826 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && 827 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { 828 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); 829 r = RESUME_GUEST; 830 } else { 831 vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 0x58000000; 832 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 833 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); 834 r = RESUME_GUEST; 835 } 836 break; 837 case BOOK3S_INTERRUPT_DATA_STORAGE: 838 { 839 ulong dar = kvmppc_get_fault_dar(vcpu); 840 vcpu->stat.pf_storage++; 841 842#ifdef CONFIG_PPC_BOOK3S_32 843 /* We set segments as unused segments when invalidating them. So 844 * treat the respective fault as segment fault. */ 845 if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) { 846 kvmppc_mmu_map_segment(vcpu, dar); 847 r = RESUME_GUEST; 848 break; 849 } 850#endif 851 852 /* The only case we need to handle is missing shadow PTEs */ 853 if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) { 854 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); 855 } else { 856 vcpu->arch.dear = dar; 857 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr; 858 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 859 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFUL); 860 r = RESUME_GUEST; 861 } 862 break; 863 } 864 case BOOK3S_INTERRUPT_DATA_SEGMENT: 865 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { 866 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); 867 kvmppc_book3s_queue_irqprio(vcpu, 868 BOOK3S_INTERRUPT_DATA_SEGMENT); 869 } 870 r = RESUME_GUEST; 871 break; 872 case BOOK3S_INTERRUPT_INST_SEGMENT: 873 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) { 874 kvmppc_book3s_queue_irqprio(vcpu, 875 BOOK3S_INTERRUPT_INST_SEGMENT); 876 } 877 r = RESUME_GUEST; 878 break; 879 /* We're good on these - the host merely wanted to get our attention */ 880 case BOOK3S_INTERRUPT_DECREMENTER: 881 vcpu->stat.dec_exits++; 882 r = RESUME_GUEST; 883 break; 884 case BOOK3S_INTERRUPT_EXTERNAL: 885 vcpu->stat.ext_intr_exits++; 886 r = RESUME_GUEST; 887 break; 888 case BOOK3S_INTERRUPT_PERFMON: 889 r = RESUME_GUEST; 890 break; 891 case BOOK3S_INTERRUPT_PROGRAM: 892 { 893 enum emulation_result er; 894 ulong flags; 895 896program_interrupt: 897 flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull; 898 899 if (vcpu->arch.msr & MSR_PR) { 900#ifdef EXIT_DEBUG 901 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); 902#endif 903 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) != 904 (INS_DCBZ & 0xfffffff7)) { 905 kvmppc_core_queue_program(vcpu, flags); 906 r = RESUME_GUEST; 907 break; 908 } 909 } 910 911 vcpu->stat.emulated_inst_exits++; 912 er = kvmppc_emulate_instruction(run, vcpu); 913 switch (er) { 914 case EMULATE_DONE: 915 r = RESUME_GUEST_NV; 916 break; 917 case EMULATE_AGAIN: 918 r = RESUME_GUEST; 919 break; 920 case EMULATE_FAIL: 921 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", 922 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); 923 kvmppc_core_queue_program(vcpu, flags); 924 r = RESUME_GUEST; 925 break; 926 case EMULATE_DO_MMIO: 927 run->exit_reason = KVM_EXIT_MMIO; 928 r = RESUME_HOST_NV; 929 break; 930 default: 931 BUG(); 932 } 933 break; 934 } 935 case BOOK3S_INTERRUPT_SYSCALL: 936 if (vcpu->arch.osi_enabled && 937 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && 938 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { 939 u64 *gprs = run->osi.gprs; 940 int i; 941 942 run->exit_reason = KVM_EXIT_OSI; 943 for (i = 0; i < 32; i++) 944 gprs[i] = kvmppc_get_gpr(vcpu, i); 945 vcpu->arch.osi_needed = 1; 946 r = RESUME_HOST_NV; 947 948 } else { 949 vcpu->stat.syscall_exits++; 950 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 951 r = RESUME_GUEST; 952 } 953 break; 954 case BOOK3S_INTERRUPT_FP_UNAVAIL: 955 case BOOK3S_INTERRUPT_ALTIVEC: 956 case BOOK3S_INTERRUPT_VSX: 957 { 958 int ext_msr = 0; 959 960 switch (exit_nr) { 961 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break; 962 case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break; 963 case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break; 964 } 965 966 switch (kvmppc_check_ext(vcpu, exit_nr)) { 967 case EMULATE_DONE: 968 /* everything ok - let's enable the ext */ 969 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr); 970 break; 971 case EMULATE_FAIL: 972 /* we need to emulate this instruction */ 973 goto program_interrupt; 974 break; 975 default: 976 /* nothing to worry about - go again */ 977 break; 978 } 979 break; 980 } 981 case BOOK3S_INTERRUPT_ALIGNMENT: 982 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { 983 to_book3s(vcpu)->dsisr = kvmppc_alignment_dsisr(vcpu, 984 kvmppc_get_last_inst(vcpu)); 985 vcpu->arch.dear = kvmppc_alignment_dar(vcpu, 986 kvmppc_get_last_inst(vcpu)); 987 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 988 } 989 r = RESUME_GUEST; 990 break; 991 case BOOK3S_INTERRUPT_MACHINE_CHECK: 992 case BOOK3S_INTERRUPT_TRACE: 993 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 994 r = RESUME_GUEST; 995 break; 996 default: 997 /* Ugh - bork here! What did we get? */ 998 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", 999 exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1); 1000 r = RESUME_HOST; 1001 BUG(); 1002 break; 1003 } 1004 1005 1006 if (!(r & RESUME_HOST)) { 1007 /* To avoid clobbering exit_reason, only check for signals if 1008 * we aren't already exiting to userspace for some other 1009 * reason. */ 1010 if (signal_pending(current)) { 1011#ifdef EXIT_DEBUG 1012 printk(KERN_EMERG "KVM: Going back to host\n"); 1013#endif 1014 vcpu->stat.signal_exits++; 1015 run->exit_reason = KVM_EXIT_INTR; 1016 r = -EINTR; 1017 } else { 1018 /* In case an interrupt came in that was triggered 1019 * from userspace (like DEC), we need to check what 1020 * to inject now! */ 1021 kvmppc_core_deliver_interrupts(vcpu); 1022 } 1023 } 1024 1025#ifdef EXIT_DEBUG 1026 printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, kvmppc_get_pc(vcpu), r); 1027#endif 1028 1029 return r; 1030} 1031 1032int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) 1033{ 1034 return 0; 1035} 1036 1037int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 1038{ 1039 int i; 1040 1041 regs->pc = kvmppc_get_pc(vcpu); 1042 regs->cr = kvmppc_get_cr(vcpu); 1043 regs->ctr = kvmppc_get_ctr(vcpu); 1044 regs->lr = kvmppc_get_lr(vcpu); 1045 regs->xer = kvmppc_get_xer(vcpu); 1046 regs->msr = vcpu->arch.msr; 1047 regs->srr0 = vcpu->arch.srr0; 1048 regs->srr1 = vcpu->arch.srr1; 1049 regs->pid = vcpu->arch.pid; 1050 regs->sprg0 = vcpu->arch.sprg0; 1051 regs->sprg1 = vcpu->arch.sprg1; 1052 regs->sprg2 = vcpu->arch.sprg2; 1053 regs->sprg3 = vcpu->arch.sprg3; 1054 regs->sprg5 = vcpu->arch.sprg4; 1055 regs->sprg6 = vcpu->arch.sprg5; 1056 regs->sprg7 = vcpu->arch.sprg6; 1057 1058 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 1059 regs->gpr[i] = kvmppc_get_gpr(vcpu, i); 1060 1061 return 0; 1062} 1063 1064int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) 1065{ 1066 int i; 1067 1068 kvmppc_set_pc(vcpu, regs->pc); 1069 kvmppc_set_cr(vcpu, regs->cr); 1070 kvmppc_set_ctr(vcpu, regs->ctr); 1071 kvmppc_set_lr(vcpu, regs->lr); 1072 kvmppc_set_xer(vcpu, regs->xer); 1073 kvmppc_set_msr(vcpu, regs->msr); 1074 vcpu->arch.srr0 = regs->srr0; 1075 vcpu->arch.srr1 = regs->srr1; 1076 vcpu->arch.sprg0 = regs->sprg0; 1077 vcpu->arch.sprg1 = regs->sprg1; 1078 vcpu->arch.sprg2 = regs->sprg2; 1079 vcpu->arch.sprg3 = regs->sprg3; 1080 vcpu->arch.sprg5 = regs->sprg4; 1081 vcpu->arch.sprg6 = regs->sprg5; 1082 vcpu->arch.sprg7 = regs->sprg6; 1083 1084 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 1085 kvmppc_set_gpr(vcpu, i, regs->gpr[i]); 1086 1087 return 0; 1088} 1089 1090int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, 1091 struct kvm_sregs *sregs) 1092{ 1093 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 1094 int i; 1095 1096 sregs->pvr = vcpu->arch.pvr; 1097 1098 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; 1099 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { 1100 for (i = 0; i < 64; i++) { 1101 sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i; 1102 sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv; 1103 } 1104 } else { 1105 for (i = 0; i < 16; i++) { 1106 sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; 1107 sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; 1108 } 1109 for (i = 0; i < 8; i++) { 1110 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; 1111 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; 1112 } 1113 } 1114 1115 return 0; 1116} 1117 1118int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, 1119 struct kvm_sregs *sregs) 1120{ 1121 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); 1122 int i; 1123 1124 kvmppc_set_pvr(vcpu, sregs->pvr); 1125 1126 vcpu3s->sdr1 = sregs->u.s.sdr1; 1127 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { 1128 for (i = 0; i < 64; i++) { 1129 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv, 1130 sregs->u.s.ppc64.slb[i].slbe); 1131 } 1132 } else { 1133 for (i = 0; i < 16; i++) { 1134 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); 1135 } 1136 for (i = 0; i < 8; i++) { 1137 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false, 1138 (u32)sregs->u.s.ppc32.ibat[i]); 1139 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true, 1140 (u32)(sregs->u.s.ppc32.ibat[i] >> 32)); 1141 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false, 1142 (u32)sregs->u.s.ppc32.dbat[i]); 1143 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true, 1144 (u32)(sregs->u.s.ppc32.dbat[i] >> 32)); 1145 } 1146 } 1147 1148 /* Flush the MMU after messing with the segments */ 1149 kvmppc_mmu_pte_flush(vcpu, 0, 0); 1150 1151 return 0; 1152} 1153 1154int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 1155{ 1156 return -ENOTSUPP; 1157} 1158 1159int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 1160{ 1161 return -ENOTSUPP; 1162} 1163 1164int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, 1165 struct kvm_translation *tr) 1166{ 1167 return 0; 1168} 1169 1170/* 1171 * Get (and clear) the dirty memory log for a memory slot. 1172 */ 1173int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 1174 struct kvm_dirty_log *log) 1175{ 1176 struct kvm_memory_slot *memslot; 1177 struct kvm_vcpu *vcpu; 1178 ulong ga, ga_end; 1179 int is_dirty = 0; 1180 int r; 1181 unsigned long n; 1182 1183 mutex_lock(&kvm->slots_lock); 1184 1185 r = kvm_get_dirty_log(kvm, log, &is_dirty); 1186 if (r) 1187 goto out; 1188 1189 /* If nothing is dirty, don't bother messing with page tables. */ 1190 if (is_dirty) { 1191 memslot = &kvm->memslots->memslots[log->slot]; 1192 1193 ga = memslot->base_gfn << PAGE_SHIFT; 1194 ga_end = ga + (memslot->npages << PAGE_SHIFT); 1195 1196 kvm_for_each_vcpu(n, vcpu, kvm) 1197 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); 1198 1199 n = kvm_dirty_bitmap_bytes(memslot); 1200 memset(memslot->dirty_bitmap, 0, n); 1201 } 1202 1203 r = 0; 1204out: 1205 mutex_unlock(&kvm->slots_lock); 1206 return r; 1207} 1208 1209int kvmppc_core_check_processor_compat(void) 1210{ 1211 return 0; 1212} 1213 1214struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) 1215{ 1216 struct kvmppc_vcpu_book3s *vcpu_book3s; 1217 struct kvm_vcpu *vcpu; 1218 int err = -ENOMEM; 1219 1220 vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s)); 1221 if (!vcpu_book3s) 1222 goto out; 1223 1224 memset(vcpu_book3s, 0, sizeof(struct kvmppc_vcpu_book3s)); 1225 1226 vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *) 1227 kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL); 1228 if (!vcpu_book3s->shadow_vcpu) 1229 goto free_vcpu; 1230 1231 vcpu = &vcpu_book3s->vcpu; 1232 err = kvm_vcpu_init(vcpu, kvm, id); 1233 if (err) 1234 goto free_shadow_vcpu; 1235 1236 vcpu->arch.host_retip = kvm_return_point; 1237 vcpu->arch.host_msr = mfmsr(); 1238#ifdef CONFIG_PPC_BOOK3S_64 1239 /* default to book3s_64 (970fx) */ 1240 vcpu->arch.pvr = 0x3C0301; 1241#else 1242 /* default to book3s_32 (750) */ 1243 vcpu->arch.pvr = 0x84202; 1244#endif 1245 kvmppc_set_pvr(vcpu, vcpu->arch.pvr); 1246 vcpu_book3s->slb_nr = 64; 1247 1248 /* remember where some real-mode handlers are */ 1249 vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem; 1250 vcpu->arch.trampoline_enter = kvmppc_trampoline_enter; 1251 vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem; 1252#ifdef CONFIG_PPC_BOOK3S_64 1253 vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall; 1254#else 1255 vcpu->arch.rmcall = (ulong)kvmppc_rmcall; 1256#endif 1257 1258 vcpu->arch.shadow_msr = MSR_USER64; 1259 1260 err = kvmppc_mmu_init(vcpu); 1261 if (err < 0) 1262 goto free_shadow_vcpu; 1263 1264 return vcpu; 1265 1266free_shadow_vcpu: 1267 kfree(vcpu_book3s->shadow_vcpu); 1268free_vcpu: 1269 vfree(vcpu_book3s); 1270out: 1271 return ERR_PTR(err); 1272} 1273 1274void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) 1275{ 1276 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 1277 1278 kvm_vcpu_uninit(vcpu); 1279 kfree(vcpu_book3s->shadow_vcpu); 1280 vfree(vcpu_book3s); 1281} 1282 1283extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); 1284int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 1285{ 1286 int ret; 1287 double fpr[32][TS_FPRWIDTH]; 1288 unsigned int fpscr; 1289 int fpexc_mode; 1290#ifdef CONFIG_ALTIVEC 1291 vector128 vr[32]; 1292 vector128 vscr; 1293 unsigned long uninitialized_var(vrsave); 1294 int used_vr; 1295#endif 1296#ifdef CONFIG_VSX 1297 int used_vsr; 1298#endif 1299 ulong ext_msr; 1300 1301 /* No need to go into the guest when all we do is going out */ 1302 if (signal_pending(current)) { 1303 kvm_run->exit_reason = KVM_EXIT_INTR; 1304 return -EINTR; 1305 } 1306 1307 /* Save FPU state in stack */ 1308 if (current->thread.regs->msr & MSR_FP) 1309 giveup_fpu(current); 1310 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr)); 1311 fpscr = current->thread.fpscr.val; 1312 fpexc_mode = current->thread.fpexc_mode; 1313 1314#ifdef CONFIG_ALTIVEC 1315 /* Save Altivec state in stack */ 1316 used_vr = current->thread.used_vr; 1317 if (used_vr) { 1318 if (current->thread.regs->msr & MSR_VEC) 1319 giveup_altivec(current); 1320 memcpy(vr, current->thread.vr, sizeof(current->thread.vr)); 1321 vscr = current->thread.vscr; 1322 vrsave = current->thread.vrsave; 1323 } 1324#endif 1325 1326#ifdef CONFIG_VSX 1327 /* Save VSX state in stack */ 1328 used_vsr = current->thread.used_vsr; 1329 if (used_vsr && (current->thread.regs->msr & MSR_VSX)) 1330 __giveup_vsx(current); 1331#endif 1332 1333 /* Remember the MSR with disabled extensions */ 1334 ext_msr = current->thread.regs->msr; 1335 1336 local_irq_enable(); 1337 1338 /* Preload FPU if it's enabled */ 1339 if (vcpu->arch.msr & MSR_FP) 1340 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 1341 1342 ret = __kvmppc_vcpu_entry(kvm_run, vcpu); 1343 1344 local_irq_disable(); 1345 1346 current->thread.regs->msr = ext_msr; 1347 1348 /* Make sure we save the guest FPU/Altivec/VSX state */ 1349 kvmppc_giveup_ext(vcpu, MSR_FP); 1350 kvmppc_giveup_ext(vcpu, MSR_VEC); 1351 kvmppc_giveup_ext(vcpu, MSR_VSX); 1352 1353 /* Restore FPU state from stack */ 1354 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr)); 1355 current->thread.fpscr.val = fpscr; 1356 current->thread.fpexc_mode = fpexc_mode; 1357 1358#ifdef CONFIG_ALTIVEC 1359 /* Restore Altivec state from stack */ 1360 if (used_vr && current->thread.used_vr) { 1361 memcpy(current->thread.vr, vr, sizeof(current->thread.vr)); 1362 current->thread.vscr = vscr; 1363 current->thread.vrsave = vrsave; 1364 } 1365 current->thread.used_vr = used_vr; 1366#endif 1367 1368#ifdef CONFIG_VSX 1369 current->thread.used_vsr = used_vsr; 1370#endif 1371 1372 return ret; 1373} 1374 1375static int kvmppc_book3s_init(void) 1376{ 1377 int r; 1378 1379 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0, 1380 THIS_MODULE); 1381 1382 if (r) 1383 return r; 1384 1385 r = kvmppc_mmu_hpte_sysinit(); 1386 1387 return r; 1388} 1389 1390static void kvmppc_book3s_exit(void) 1391{ 1392 kvmppc_mmu_hpte_sysexit(); 1393 kvm_exit(); 1394} 1395 1396module_init(kvmppc_book3s_init); 1397module_exit(kvmppc_book3s_exit); 1398