1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2007 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 */ 19 20#include <linux/jiffies.h> 21#include <linux/hrtimer.h> 22#include <linux/types.h> 23#include <linux/string.h> 24#include <linux/kvm_host.h> 25 26#include <asm/reg.h> 27#include <asm/time.h> 28#include <asm/byteorder.h> 29#include <asm/kvm_ppc.h> 30#include <asm/disassemble.h> 31#include "timing.h" 32#include "trace.h" 33 34#define OP_TRAP 3 35#define OP_TRAP_64 2 36 37#define OP_31_XOP_LWZX 23 38#define OP_31_XOP_LBZX 87 39#define OP_31_XOP_STWX 151 40#define OP_31_XOP_STBX 215 41#define OP_31_XOP_LBZUX 119 42#define OP_31_XOP_STBUX 247 43#define OP_31_XOP_LHZX 279 44#define OP_31_XOP_LHZUX 311 45#define OP_31_XOP_MFSPR 339 46#define OP_31_XOP_LHAX 343 47#define OP_31_XOP_STHX 407 48#define OP_31_XOP_STHUX 439 49#define OP_31_XOP_MTSPR 467 50#define OP_31_XOP_DCBI 470 51#define OP_31_XOP_LWBRX 534 52#define OP_31_XOP_TLBSYNC 566 53#define OP_31_XOP_STWBRX 662 54#define OP_31_XOP_LHBRX 790 55#define OP_31_XOP_STHBRX 918 56 57#define OP_LWZ 32 58#define OP_LWZU 33 59#define OP_LBZ 34 60#define OP_LBZU 35 61#define OP_STW 36 62#define OP_STWU 37 63#define OP_STB 38 64#define OP_STBU 39 65#define OP_LHZ 40 66#define OP_LHZU 41 67#define OP_LHA 42 68#define OP_LHAU 43 69#define OP_STH 44 70#define OP_STHU 45 71 72#ifdef CONFIG_PPC_BOOK3S 73static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu) 74{ 75 return 1; 76} 77#else 78static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu) 79{ 80 return vcpu->arch.tcr & TCR_DIE; 81} 82#endif 83 84void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) 85{ 86 unsigned long dec_nsec; 87 88 pr_debug("mtDEC: %x\n", vcpu->arch.dec); 89#ifdef CONFIG_PPC_BOOK3S 90 /* mtdec lowers the interrupt line when positive. */ 91 kvmppc_core_dequeue_dec(vcpu); 92 93 /* POWER4+ triggers a dec interrupt if the value is < 0 */ 94 if (vcpu->arch.dec & 0x80000000) { 95 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); 96 kvmppc_core_queue_dec(vcpu); 97 return; 98 } 99#endif 100 if (kvmppc_dec_enabled(vcpu)) { 101 /* The decrementer ticks at the same rate as the timebase, so 102 * that's how we convert the guest DEC value to the number of 103 * host ticks. */ 104 105 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); 106 dec_nsec = vcpu->arch.dec; 107 dec_nsec *= 1000; 108 dec_nsec /= tb_ticks_per_usec; 109 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), 110 HRTIMER_MODE_REL); 111 vcpu->arch.dec_jiffies = get_tb(); 112 } else { 113 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); 114 } 115} 116 117int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) 118{ 119 u32 inst = kvmppc_get_last_inst(vcpu); 120 u32 ea; 121 int ra; 122 int rb; 123 int rs; 124 int rt; 125 int sprn; 126 enum emulation_result emulated = EMULATE_DONE; 127 int advance = 1; 128 129 /* this default type might be overwritten by subcategories */ 130 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); 131 132 pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); 133 134 switch (get_op(inst)) { 135 case OP_TRAP: 136#ifdef CONFIG_PPC_BOOK3S 137 case OP_TRAP_64: 138 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); 139#else 140 kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR); 141#endif 142 advance = 0; 143 break; 144 145 case 31: 146 switch (get_xop(inst)) { 147 148 case OP_31_XOP_LWZX: 149 rt = get_rt(inst); 150 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 151 break; 152 153 case OP_31_XOP_LBZX: 154 rt = get_rt(inst); 155 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 156 break; 157 158 case OP_31_XOP_LBZUX: 159 rt = get_rt(inst); 160 ra = get_ra(inst); 161 rb = get_rb(inst); 162 163 ea = kvmppc_get_gpr(vcpu, rb); 164 if (ra) 165 ea += kvmppc_get_gpr(vcpu, ra); 166 167 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 168 kvmppc_set_gpr(vcpu, ra, ea); 169 break; 170 171 case OP_31_XOP_STWX: 172 rs = get_rs(inst); 173 emulated = kvmppc_handle_store(run, vcpu, 174 kvmppc_get_gpr(vcpu, rs), 175 4, 1); 176 break; 177 178 case OP_31_XOP_STBX: 179 rs = get_rs(inst); 180 emulated = kvmppc_handle_store(run, vcpu, 181 kvmppc_get_gpr(vcpu, rs), 182 1, 1); 183 break; 184 185 case OP_31_XOP_STBUX: 186 rs = get_rs(inst); 187 ra = get_ra(inst); 188 rb = get_rb(inst); 189 190 ea = kvmppc_get_gpr(vcpu, rb); 191 if (ra) 192 ea += kvmppc_get_gpr(vcpu, ra); 193 194 emulated = kvmppc_handle_store(run, vcpu, 195 kvmppc_get_gpr(vcpu, rs), 196 1, 1); 197 kvmppc_set_gpr(vcpu, rs, ea); 198 break; 199 200 case OP_31_XOP_LHAX: 201 rt = get_rt(inst); 202 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); 203 break; 204 205 case OP_31_XOP_LHZX: 206 rt = get_rt(inst); 207 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 208 break; 209 210 case OP_31_XOP_LHZUX: 211 rt = get_rt(inst); 212 ra = get_ra(inst); 213 rb = get_rb(inst); 214 215 ea = kvmppc_get_gpr(vcpu, rb); 216 if (ra) 217 ea += kvmppc_get_gpr(vcpu, ra); 218 219 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 220 kvmppc_set_gpr(vcpu, ra, ea); 221 break; 222 223 case OP_31_XOP_MFSPR: 224 sprn = get_sprn(inst); 225 rt = get_rt(inst); 226 227 switch (sprn) { 228 case SPRN_SRR0: 229 kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break; 230 case SPRN_SRR1: 231 kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break; 232 case SPRN_PVR: 233 kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; 234 case SPRN_PIR: 235 kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break; 236 case SPRN_MSSSR0: 237 kvmppc_set_gpr(vcpu, rt, 0); break; 238 239 /* Note: mftb and TBRL/TBWL are user-accessible, so 240 * the guest can always access the real TB anyways. 241 * In fact, we probably will never see these traps. */ 242 case SPRN_TBWL: 243 kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break; 244 case SPRN_TBWU: 245 kvmppc_set_gpr(vcpu, rt, get_tb()); break; 246 247 case SPRN_SPRG0: 248 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break; 249 case SPRN_SPRG1: 250 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break; 251 case SPRN_SPRG2: 252 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break; 253 case SPRN_SPRG3: 254 kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break; 255 /* Note: SPRG4-7 are user-readable, so we don't get 256 * a trap. */ 257 258 case SPRN_DEC: 259 { 260 u64 jd = get_tb() - vcpu->arch.dec_jiffies; 261 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd); 262 pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", 263 vcpu->arch.dec, jd, 264 kvmppc_get_gpr(vcpu, rt)); 265 break; 266 } 267 default: 268 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); 269 if (emulated == EMULATE_FAIL) { 270 printk("mfspr: unknown spr %x\n", sprn); 271 kvmppc_set_gpr(vcpu, rt, 0); 272 } 273 break; 274 } 275 break; 276 277 case OP_31_XOP_STHX: 278 rs = get_rs(inst); 279 ra = get_ra(inst); 280 rb = get_rb(inst); 281 282 emulated = kvmppc_handle_store(run, vcpu, 283 kvmppc_get_gpr(vcpu, rs), 284 2, 1); 285 break; 286 287 case OP_31_XOP_STHUX: 288 rs = get_rs(inst); 289 ra = get_ra(inst); 290 rb = get_rb(inst); 291 292 ea = kvmppc_get_gpr(vcpu, rb); 293 if (ra) 294 ea += kvmppc_get_gpr(vcpu, ra); 295 296 emulated = kvmppc_handle_store(run, vcpu, 297 kvmppc_get_gpr(vcpu, rs), 298 2, 1); 299 kvmppc_set_gpr(vcpu, ra, ea); 300 break; 301 302 case OP_31_XOP_MTSPR: 303 sprn = get_sprn(inst); 304 rs = get_rs(inst); 305 switch (sprn) { 306 case SPRN_SRR0: 307 vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break; 308 case SPRN_SRR1: 309 vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break; 310 311 case SPRN_TBWL: break; 312 case SPRN_TBWU: break; 313 314 case SPRN_MSSSR0: break; 315 316 case SPRN_DEC: 317 vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs); 318 kvmppc_emulate_dec(vcpu); 319 break; 320 321 case SPRN_SPRG0: 322 vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break; 323 case SPRN_SPRG1: 324 vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break; 325 case SPRN_SPRG2: 326 vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break; 327 case SPRN_SPRG3: 328 vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break; 329 330 default: 331 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); 332 if (emulated == EMULATE_FAIL) 333 printk("mtspr: unknown spr %x\n", sprn); 334 break; 335 } 336 break; 337 338 case OP_31_XOP_DCBI: 339 /* Do nothing. The guest is performing dcbi because 340 * hardware DMA is not snooped by the dcache, but 341 * emulated DMA either goes through the dcache as 342 * normal writes, or the host kernel has handled dcache 343 * coherence. */ 344 break; 345 346 case OP_31_XOP_LWBRX: 347 rt = get_rt(inst); 348 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); 349 break; 350 351 case OP_31_XOP_TLBSYNC: 352 break; 353 354 case OP_31_XOP_STWBRX: 355 rs = get_rs(inst); 356 ra = get_ra(inst); 357 rb = get_rb(inst); 358 359 emulated = kvmppc_handle_store(run, vcpu, 360 kvmppc_get_gpr(vcpu, rs), 361 4, 0); 362 break; 363 364 case OP_31_XOP_LHBRX: 365 rt = get_rt(inst); 366 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); 367 break; 368 369 case OP_31_XOP_STHBRX: 370 rs = get_rs(inst); 371 ra = get_ra(inst); 372 rb = get_rb(inst); 373 374 emulated = kvmppc_handle_store(run, vcpu, 375 kvmppc_get_gpr(vcpu, rs), 376 2, 0); 377 break; 378 379 default: 380 /* Attempt core-specific emulation below. */ 381 emulated = EMULATE_FAIL; 382 } 383 break; 384 385 case OP_LWZ: 386 rt = get_rt(inst); 387 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 388 break; 389 390 case OP_LWZU: 391 ra = get_ra(inst); 392 rt = get_rt(inst); 393 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 394 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 395 break; 396 397 case OP_LBZ: 398 rt = get_rt(inst); 399 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 400 break; 401 402 case OP_LBZU: 403 ra = get_ra(inst); 404 rt = get_rt(inst); 405 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 406 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 407 break; 408 409 case OP_STW: 410 rs = get_rs(inst); 411 emulated = kvmppc_handle_store(run, vcpu, 412 kvmppc_get_gpr(vcpu, rs), 413 4, 1); 414 break; 415 416 case OP_STWU: 417 ra = get_ra(inst); 418 rs = get_rs(inst); 419 emulated = kvmppc_handle_store(run, vcpu, 420 kvmppc_get_gpr(vcpu, rs), 421 4, 1); 422 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 423 break; 424 425 case OP_STB: 426 rs = get_rs(inst); 427 emulated = kvmppc_handle_store(run, vcpu, 428 kvmppc_get_gpr(vcpu, rs), 429 1, 1); 430 break; 431 432 case OP_STBU: 433 ra = get_ra(inst); 434 rs = get_rs(inst); 435 emulated = kvmppc_handle_store(run, vcpu, 436 kvmppc_get_gpr(vcpu, rs), 437 1, 1); 438 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 439 break; 440 441 case OP_LHZ: 442 rt = get_rt(inst); 443 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 444 break; 445 446 case OP_LHZU: 447 ra = get_ra(inst); 448 rt = get_rt(inst); 449 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 450 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 451 break; 452 453 case OP_LHA: 454 rt = get_rt(inst); 455 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); 456 break; 457 458 case OP_LHAU: 459 ra = get_ra(inst); 460 rt = get_rt(inst); 461 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); 462 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 463 break; 464 465 case OP_STH: 466 rs = get_rs(inst); 467 emulated = kvmppc_handle_store(run, vcpu, 468 kvmppc_get_gpr(vcpu, rs), 469 2, 1); 470 break; 471 472 case OP_STHU: 473 ra = get_ra(inst); 474 rs = get_rs(inst); 475 emulated = kvmppc_handle_store(run, vcpu, 476 kvmppc_get_gpr(vcpu, rs), 477 2, 1); 478 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 479 break; 480 481 default: 482 emulated = EMULATE_FAIL; 483 } 484 485 if (emulated == EMULATE_FAIL) { 486 emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); 487 if (emulated == EMULATE_AGAIN) { 488 advance = 0; 489 } else if (emulated == EMULATE_FAIL) { 490 advance = 0; 491 printk(KERN_ERR "Couldn't emulate instruction 0x%08x " 492 "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); 493 kvmppc_core_queue_program(vcpu, 0); 494 } 495 } 496 497 trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); 498 499 /* Advance past emulated instruction. */ 500 if (advance) 501 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); 502 503 return emulated; 504} 505