1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright SUSE Linux Products GmbH 2009 16 * 17 * Authors: Alexander Graf <agraf@suse.de> 18 */ 19 20#include <asm/kvm_ppc.h> 21#include <asm/disassemble.h> 22#include <asm/kvm_book3s.h> 23#include <asm/reg.h> 24 25#define OP_19_XOP_RFID 18 26#define OP_19_XOP_RFI 50 27 28#define OP_31_XOP_MFMSR 83 29#define OP_31_XOP_MTMSR 146 30#define OP_31_XOP_MTMSRD 178 31#define OP_31_XOP_MTSR 210 32#define OP_31_XOP_MTSRIN 242 33#define OP_31_XOP_TLBIEL 274 34#define OP_31_XOP_TLBIE 306 35#define OP_31_XOP_SLBMTE 402 36#define OP_31_XOP_SLBIE 434 37#define OP_31_XOP_SLBIA 498 38#define OP_31_XOP_MFSR 595 39#define OP_31_XOP_MFSRIN 659 40#define OP_31_XOP_DCBA 758 41#define OP_31_XOP_SLBMFEV 851 42#define OP_31_XOP_EIOIO 854 43#define OP_31_XOP_SLBMFEE 915 44 45/* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */ 46#define OP_31_XOP_DCBZ 1010 47 48#define OP_LFS 48 49#define OP_LFD 50 50#define OP_STFS 52 51#define OP_STFD 54 52 53#define SPRN_GQR0 912 54#define SPRN_GQR1 913 55#define SPRN_GQR2 914 56#define SPRN_GQR3 915 57#define SPRN_GQR4 916 58#define SPRN_GQR5 917 59#define SPRN_GQR6 918 60#define SPRN_GQR7 919 61 62/* Book3S_32 defines mfsrin(v) - but that messes up our abstract 63 * function pointers, so let's just disable the define. */ 64#undef mfsrin 65 66int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 67 unsigned int inst, int *advance) 68{ 69 int emulated = EMULATE_DONE; 70 71 switch (get_op(inst)) { 72 case 19: 73 switch (get_xop(inst)) { 74 case OP_19_XOP_RFID: 75 case OP_19_XOP_RFI: 76 kvmppc_set_pc(vcpu, vcpu->arch.srr0); 77 kvmppc_set_msr(vcpu, vcpu->arch.srr1); 78 *advance = 0; 79 break; 80 81 default: 82 emulated = EMULATE_FAIL; 83 break; 84 } 85 break; 86 case 31: 87 switch (get_xop(inst)) { 88 case OP_31_XOP_MFMSR: 89 kvmppc_set_gpr(vcpu, get_rt(inst), vcpu->arch.msr); 90 break; 91 case OP_31_XOP_MTMSRD: 92 { 93 ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst)); 94 if (inst & 0x10000) { 95 vcpu->arch.msr &= ~(MSR_RI | MSR_EE); 96 vcpu->arch.msr |= rs & (MSR_RI | MSR_EE); 97 } else 98 kvmppc_set_msr(vcpu, rs); 99 break; 100 } 101 case OP_31_XOP_MTMSR: 102 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst))); 103 break; 104 case OP_31_XOP_MFSR: 105 { 106 int srnum; 107 108 srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32); 109 if (vcpu->arch.mmu.mfsrin) { 110 u32 sr; 111 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); 112 kvmppc_set_gpr(vcpu, get_rt(inst), sr); 113 } 114 break; 115 } 116 case OP_31_XOP_MFSRIN: 117 { 118 int srnum; 119 120 srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf; 121 if (vcpu->arch.mmu.mfsrin) { 122 u32 sr; 123 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); 124 kvmppc_set_gpr(vcpu, get_rt(inst), sr); 125 } 126 break; 127 } 128 case OP_31_XOP_MTSR: 129 vcpu->arch.mmu.mtsrin(vcpu, 130 (inst >> 16) & 0xf, 131 kvmppc_get_gpr(vcpu, get_rs(inst))); 132 break; 133 case OP_31_XOP_MTSRIN: 134 vcpu->arch.mmu.mtsrin(vcpu, 135 (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf, 136 kvmppc_get_gpr(vcpu, get_rs(inst))); 137 break; 138 case OP_31_XOP_TLBIE: 139 case OP_31_XOP_TLBIEL: 140 { 141 bool large = (inst & 0x00200000) ? true : false; 142 ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst)); 143 vcpu->arch.mmu.tlbie(vcpu, addr, large); 144 break; 145 } 146 case OP_31_XOP_EIOIO: 147 break; 148 case OP_31_XOP_SLBMTE: 149 if (!vcpu->arch.mmu.slbmte) 150 return EMULATE_FAIL; 151 152 vcpu->arch.mmu.slbmte(vcpu, 153 kvmppc_get_gpr(vcpu, get_rs(inst)), 154 kvmppc_get_gpr(vcpu, get_rb(inst))); 155 break; 156 case OP_31_XOP_SLBIE: 157 if (!vcpu->arch.mmu.slbie) 158 return EMULATE_FAIL; 159 160 vcpu->arch.mmu.slbie(vcpu, 161 kvmppc_get_gpr(vcpu, get_rb(inst))); 162 break; 163 case OP_31_XOP_SLBIA: 164 if (!vcpu->arch.mmu.slbia) 165 return EMULATE_FAIL; 166 167 vcpu->arch.mmu.slbia(vcpu); 168 break; 169 case OP_31_XOP_SLBMFEE: 170 if (!vcpu->arch.mmu.slbmfee) { 171 emulated = EMULATE_FAIL; 172 } else { 173 ulong t, rb; 174 175 rb = kvmppc_get_gpr(vcpu, get_rb(inst)); 176 t = vcpu->arch.mmu.slbmfee(vcpu, rb); 177 kvmppc_set_gpr(vcpu, get_rt(inst), t); 178 } 179 break; 180 case OP_31_XOP_SLBMFEV: 181 if (!vcpu->arch.mmu.slbmfev) { 182 emulated = EMULATE_FAIL; 183 } else { 184 ulong t, rb; 185 186 rb = kvmppc_get_gpr(vcpu, get_rb(inst)); 187 t = vcpu->arch.mmu.slbmfev(vcpu, rb); 188 kvmppc_set_gpr(vcpu, get_rt(inst), t); 189 } 190 break; 191 case OP_31_XOP_DCBA: 192 /* Gets treated as NOP */ 193 break; 194 case OP_31_XOP_DCBZ: 195 { 196 ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst)); 197 ulong ra = 0; 198 ulong addr, vaddr; 199 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 200 u32 dsisr; 201 int r; 202 203 if (get_ra(inst)) 204 ra = kvmppc_get_gpr(vcpu, get_ra(inst)); 205 206 addr = (ra + rb) & ~31ULL; 207 if (!(vcpu->arch.msr & MSR_SF)) 208 addr &= 0xffffffff; 209 vaddr = addr; 210 211 r = kvmppc_st(vcpu, &addr, 32, zeros, true); 212 if ((r == -ENOENT) || (r == -EPERM)) { 213 *advance = 0; 214 vcpu->arch.dear = vaddr; 215 to_svcpu(vcpu)->fault_dar = vaddr; 216 217 dsisr = DSISR_ISSTORE; 218 if (r == -ENOENT) 219 dsisr |= DSISR_NOHPTE; 220 else if (r == -EPERM) 221 dsisr |= DSISR_PROTFAULT; 222 223 to_book3s(vcpu)->dsisr = dsisr; 224 to_svcpu(vcpu)->fault_dsisr = dsisr; 225 226 kvmppc_book3s_queue_irqprio(vcpu, 227 BOOK3S_INTERRUPT_DATA_STORAGE); 228 } 229 230 break; 231 } 232 default: 233 emulated = EMULATE_FAIL; 234 } 235 break; 236 default: 237 emulated = EMULATE_FAIL; 238 } 239 240 if (emulated == EMULATE_FAIL) 241 emulated = kvmppc_emulate_paired_single(run, vcpu); 242 243 return emulated; 244} 245 246void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper, 247 u32 val) 248{ 249 if (upper) { 250 /* Upper BAT */ 251 u32 bl = (val >> 2) & 0x7ff; 252 bat->bepi_mask = (~bl << 17); 253 bat->bepi = val & 0xfffe0000; 254 bat->vs = (val & 2) ? 1 : 0; 255 bat->vp = (val & 1) ? 1 : 0; 256 bat->raw = (bat->raw & 0xffffffff00000000ULL) | val; 257 } else { 258 /* Lower BAT */ 259 bat->brpn = val & 0xfffe0000; 260 bat->wimg = (val >> 3) & 0xf; 261 bat->pp = val & 3; 262 bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32); 263 } 264} 265 266static u32 kvmppc_read_bat(struct kvm_vcpu *vcpu, int sprn) 267{ 268 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 269 struct kvmppc_bat *bat; 270 271 switch (sprn) { 272 case SPRN_IBAT0U ... SPRN_IBAT3L: 273 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2]; 274 break; 275 case SPRN_IBAT4U ... SPRN_IBAT7L: 276 bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)]; 277 break; 278 case SPRN_DBAT0U ... SPRN_DBAT3L: 279 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2]; 280 break; 281 case SPRN_DBAT4U ... SPRN_DBAT7L: 282 bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)]; 283 break; 284 default: 285 BUG(); 286 } 287 288 if (sprn % 2) 289 return bat->raw >> 32; 290 else 291 return bat->raw; 292} 293 294static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val) 295{ 296 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); 297 struct kvmppc_bat *bat; 298 299 switch (sprn) { 300 case SPRN_IBAT0U ... SPRN_IBAT3L: 301 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2]; 302 break; 303 case SPRN_IBAT4U ... SPRN_IBAT7L: 304 bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)]; 305 break; 306 case SPRN_DBAT0U ... SPRN_DBAT3L: 307 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2]; 308 break; 309 case SPRN_DBAT4U ... SPRN_DBAT7L: 310 bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)]; 311 break; 312 default: 313 BUG(); 314 } 315 316 kvmppc_set_bat(vcpu, bat, !(sprn % 2), val); 317} 318 319int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) 320{ 321 int emulated = EMULATE_DONE; 322 ulong spr_val = kvmppc_get_gpr(vcpu, rs); 323 324 switch (sprn) { 325 case SPRN_SDR1: 326 to_book3s(vcpu)->sdr1 = spr_val; 327 break; 328 case SPRN_DSISR: 329 to_book3s(vcpu)->dsisr = spr_val; 330 break; 331 case SPRN_DAR: 332 vcpu->arch.dear = spr_val; 333 break; 334 case SPRN_HIOR: 335 to_book3s(vcpu)->hior = spr_val; 336 break; 337 case SPRN_IBAT0U ... SPRN_IBAT3L: 338 case SPRN_IBAT4U ... SPRN_IBAT7L: 339 case SPRN_DBAT0U ... SPRN_DBAT3L: 340 case SPRN_DBAT4U ... SPRN_DBAT7L: 341 kvmppc_write_bat(vcpu, sprn, (u32)spr_val); 342 /* BAT writes happen so rarely that we're ok to flush 343 * everything here */ 344 kvmppc_mmu_pte_flush(vcpu, 0, 0); 345 kvmppc_mmu_flush_segments(vcpu); 346 break; 347 case SPRN_HID0: 348 to_book3s(vcpu)->hid[0] = spr_val; 349 break; 350 case SPRN_HID1: 351 to_book3s(vcpu)->hid[1] = spr_val; 352 break; 353 case SPRN_HID2: 354 to_book3s(vcpu)->hid[2] = spr_val; 355 break; 356 case SPRN_HID2_GEKKO: 357 to_book3s(vcpu)->hid[2] = spr_val; 358 /* HID2.PSE controls paired single on gekko */ 359 switch (vcpu->arch.pvr) { 360 case 0x00080200: /* lonestar 2.0 */ 361 case 0x00088202: /* lonestar 2.2 */ 362 case 0x70000100: /* gekko 1.0 */ 363 case 0x00080100: /* gekko 2.0 */ 364 case 0x00083203: /* gekko 2.3a */ 365 case 0x00083213: /* gekko 2.3b */ 366 case 0x00083204: /* gekko 2.4 */ 367 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ 368 case 0x00087200: /* broadway */ 369 if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) { 370 /* Native paired singles */ 371 } else if (spr_val & (1 << 29)) { /* HID2.PSE */ 372 vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE; 373 kvmppc_giveup_ext(vcpu, MSR_FP); 374 } else { 375 vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE; 376 } 377 break; 378 } 379 break; 380 case SPRN_HID4: 381 case SPRN_HID4_GEKKO: 382 to_book3s(vcpu)->hid[4] = spr_val; 383 break; 384 case SPRN_HID5: 385 to_book3s(vcpu)->hid[5] = spr_val; 386 /* guest HID5 set can change is_dcbz32 */ 387 if (vcpu->arch.mmu.is_dcbz32(vcpu) && 388 (mfmsr() & MSR_HV)) 389 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; 390 break; 391 case SPRN_GQR0: 392 case SPRN_GQR1: 393 case SPRN_GQR2: 394 case SPRN_GQR3: 395 case SPRN_GQR4: 396 case SPRN_GQR5: 397 case SPRN_GQR6: 398 case SPRN_GQR7: 399 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val; 400 break; 401 case SPRN_ICTC: 402 case SPRN_THRM1: 403 case SPRN_THRM2: 404 case SPRN_THRM3: 405 case SPRN_CTRLF: 406 case SPRN_CTRLT: 407 case SPRN_L2CR: 408 case SPRN_MMCR0_GEKKO: 409 case SPRN_MMCR1_GEKKO: 410 case SPRN_PMC1_GEKKO: 411 case SPRN_PMC2_GEKKO: 412 case SPRN_PMC3_GEKKO: 413 case SPRN_PMC4_GEKKO: 414 case SPRN_WPAR_GEKKO: 415 break; 416 default: 417 printk(KERN_INFO "KVM: invalid SPR write: %d\n", sprn); 418#ifndef DEBUG_SPR 419 emulated = EMULATE_FAIL; 420#endif 421 break; 422 } 423 424 return emulated; 425} 426 427int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) 428{ 429 int emulated = EMULATE_DONE; 430 431 switch (sprn) { 432 case SPRN_IBAT0U ... SPRN_IBAT3L: 433 case SPRN_IBAT4U ... SPRN_IBAT7L: 434 case SPRN_DBAT0U ... SPRN_DBAT3L: 435 case SPRN_DBAT4U ... SPRN_DBAT7L: 436 kvmppc_set_gpr(vcpu, rt, kvmppc_read_bat(vcpu, sprn)); 437 break; 438 case SPRN_SDR1: 439 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1); 440 break; 441 case SPRN_DSISR: 442 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->dsisr); 443 break; 444 case SPRN_DAR: 445 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear); 446 break; 447 case SPRN_HIOR: 448 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior); 449 break; 450 case SPRN_HID0: 451 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]); 452 break; 453 case SPRN_HID1: 454 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]); 455 break; 456 case SPRN_HID2: 457 case SPRN_HID2_GEKKO: 458 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]); 459 break; 460 case SPRN_HID4: 461 case SPRN_HID4_GEKKO: 462 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]); 463 break; 464 case SPRN_HID5: 465 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]); 466 break; 467 case SPRN_GQR0: 468 case SPRN_GQR1: 469 case SPRN_GQR2: 470 case SPRN_GQR3: 471 case SPRN_GQR4: 472 case SPRN_GQR5: 473 case SPRN_GQR6: 474 case SPRN_GQR7: 475 kvmppc_set_gpr(vcpu, rt, 476 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]); 477 break; 478 case SPRN_THRM1: 479 case SPRN_THRM2: 480 case SPRN_THRM3: 481 case SPRN_CTRLF: 482 case SPRN_CTRLT: 483 case SPRN_L2CR: 484 case SPRN_MMCR0_GEKKO: 485 case SPRN_MMCR1_GEKKO: 486 case SPRN_PMC1_GEKKO: 487 case SPRN_PMC2_GEKKO: 488 case SPRN_PMC3_GEKKO: 489 case SPRN_PMC4_GEKKO: 490 case SPRN_WPAR_GEKKO: 491 kvmppc_set_gpr(vcpu, rt, 0); 492 break; 493 default: 494 printk(KERN_INFO "KVM: invalid SPR read: %d\n", sprn); 495#ifndef DEBUG_SPR 496 emulated = EMULATE_FAIL; 497#endif 498 break; 499 } 500 501 return emulated; 502} 503 504u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst) 505{ 506 u32 dsisr = 0; 507 508 /* 509 * This is what the spec says about DSISR bits (not mentioned = 0): 510 * 511 * 12:13 [DS] Set to bits 30:31 512 * 15:16 [X] Set to bits 29:30 513 * 17 [X] Set to bit 25 514 * [D/DS] Set to bit 5 515 * 18:21 [X] Set to bits 21:24 516 * [D/DS] Set to bits 1:4 517 * 22:26 Set to bits 6:10 (RT/RS/FRT/FRS) 518 * 27:31 Set to bits 11:15 (RA) 519 */ 520 521 switch (get_op(inst)) { 522 /* D-form */ 523 case OP_LFS: 524 case OP_LFD: 525 case OP_STFD: 526 case OP_STFS: 527 dsisr |= (inst >> 12) & 0x4000; /* bit 17 */ 528 dsisr |= (inst >> 17) & 0x3c00; /* bits 18:21 */ 529 break; 530 /* X-form */ 531 case 31: 532 dsisr |= (inst << 14) & 0x18000; /* bits 15:16 */ 533 dsisr |= (inst << 8) & 0x04000; /* bit 17 */ 534 dsisr |= (inst << 3) & 0x03c00; /* bits 18:21 */ 535 break; 536 default: 537 printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); 538 break; 539 } 540 541 dsisr |= (inst >> 16) & 0x03ff; /* bits 22:31 */ 542 543 return dsisr; 544} 545 546ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) 547{ 548 ulong dar = 0; 549 ulong ra; 550 551 switch (get_op(inst)) { 552 case OP_LFS: 553 case OP_LFD: 554 case OP_STFD: 555 case OP_STFS: 556 ra = get_ra(inst); 557 if (ra) 558 dar = kvmppc_get_gpr(vcpu, ra); 559 dar += (s32)((s16)inst); 560 break; 561 case 31: 562 ra = get_ra(inst); 563 if (ra) 564 dar = kvmppc_get_gpr(vcpu, ra); 565 dar += kvmppc_get_gpr(vcpu, get_rb(inst)); 566 break; 567 default: 568 printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); 569 break; 570 } 571 572 return dar; 573} 574