Lines Matching defs:vcpu

10 static void load_spr_state(struct kvm_vcpu *vcpu,
14 mtspr(SPRN_TAR, vcpu->arch.tar);
18 current->thread.vrsave != vcpu->arch.vrsave)
19 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
22 if (vcpu->arch.hfscr & HFSCR_EBB) {
23 if (current->thread.ebbhr != vcpu->arch.ebbhr)
24 mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
25 if (current->thread.ebbrr != vcpu->arch.ebbrr)
26 mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
27 if (current->thread.bescr != vcpu->arch.bescr)
28 mtspr(SPRN_BESCR, vcpu->arch.bescr);
32 current->thread.tidr != vcpu->arch.tid)
33 mtspr(SPRN_TIDR, vcpu->arch.tid);
34 if (host_os_sprs->iamr != vcpu->arch.iamr)
35 mtspr(SPRN_IAMR, vcpu->arch.iamr);
36 if (host_os_sprs->amr != vcpu->arch.amr)
37 mtspr(SPRN_AMR, vcpu->arch.amr);
38 if (vcpu->arch.uamor != 0)
39 mtspr(SPRN_UAMOR, vcpu->arch.uamor);
40 if (current->thread.fscr != vcpu->arch.fscr)
41 mtspr(SPRN_FSCR, vcpu->arch.fscr);
42 if (current->thread.dscr != vcpu->arch.dscr)
43 mtspr(SPRN_DSCR, vcpu->arch.dscr);
44 if (vcpu->arch.pspb != 0)
45 mtspr(SPRN_PSPB, vcpu->arch.pspb);
54 if (!(vcpu->arch.ctrl & 1))
58 static void store_spr_state(struct kvm_vcpu *vcpu)
60 vcpu->arch.tar = mfspr(SPRN_TAR);
64 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
67 if (vcpu->arch.hfscr & HFSCR_EBB) {
68 vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
69 vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
70 vcpu->arch.bescr = mfspr(SPRN_BESCR);
74 vcpu->arch.tid = mfspr(SPRN_TIDR);
75 vcpu->arch.iamr = mfspr(SPRN_IAMR);
76 vcpu->arch.amr = mfspr(SPRN_AMR);
77 vcpu->arch.uamor = mfspr(SPRN_UAMOR);
78 vcpu->arch.fscr = mfspr(SPRN_FSCR);
79 vcpu->arch.dscr = mfspr(SPRN_DSCR);
80 vcpu->arch.pspb = mfspr(SPRN_PSPB);
82 vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
86 bool load_vcpu_state(struct kvm_vcpu *vcpu,
94 unsigned long guest_msr = vcpu->arch.shregs.msr;
96 kvmppc_restore_tm_hv(vcpu, guest_msr, true);
98 } else if (vcpu->arch.hfscr & HFSCR_TM) {
99 mtspr(SPRN_TEXASR, vcpu->arch.texasr);
100 mtspr(SPRN_TFHAR, vcpu->arch.tfhar);
101 mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
106 load_spr_state(vcpu, host_os_sprs);
108 load_fp_state(&vcpu->arch.fp);
110 load_vr_state(&vcpu->arch.vr);
117 void store_vcpu_state(struct kvm_vcpu *vcpu)
119 store_spr_state(vcpu);
121 store_fp_state(&vcpu->arch.fp);
123 store_vr_state(&vcpu->arch.vr);
129 unsigned long guest_msr = vcpu->arch.shregs.msr;
131 kvmppc_save_tm_hv(vcpu, guest_msr, true);
132 } else if (vcpu->arch.hfscr & HFSCR_TM) {
133 vcpu->arch.texasr = mfspr(SPRN_TEXASR);
134 vcpu->arch.tfhar = mfspr(SPRN_TFHAR);
135 vcpu->arch.tfiar = mfspr(SPRN_TFIAR);
137 if (!vcpu->arch.nested) {
138 vcpu->arch.load_tm++; /* see load_ebb comment */
139 if (!vcpu->arch.load_tm)
140 vcpu->arch.hfscr &= ~HFSCR_TM;
155 /* vcpu guest regs must already be saved */
156 void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
169 current->thread.tidr != vcpu->arch.tid)
171 if (host_os_sprs->iamr != vcpu->arch.iamr)
173 if (vcpu->arch.uamor != 0)
175 if (host_os_sprs->amr != vcpu->arch.amr)
177 if (current->thread.fscr != vcpu->arch.fscr)
179 if (current->thread.dscr != vcpu->arch.dscr)
181 if (vcpu->arch.pspb != 0)
185 if (!(vcpu->arch.ctrl & 1))
190 vcpu->arch.vrsave != current->thread.vrsave)
193 if (vcpu->arch.hfscr & HFSCR_EBB) {
194 if (vcpu->arch.bescr != current->thread.bescr)
196 if (vcpu->arch.ebbhr != current->thread.ebbhr)
198 if (vcpu->arch.ebbrr != current->thread.ebbrr)
201 if (!vcpu->arch.nested) {
208 vcpu->arch.load_ebb++;
209 if (!vcpu->arch.load_ebb)
210 vcpu->arch.hfscr &= ~HFSCR_EBB;
214 if (vcpu->arch.tar != current->thread.tar)
220 void accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next)
222 struct kvmppc_vcore *vc = vcpu->arch.vcore;
229 curr = vcpu->arch.cur_activity;
230 vcpu->arch.cur_activity = next;
231 prev_tb = vcpu->arch.cur_tb_start;
232 vcpu->arch.cur_tb_start = tb;
301 static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
303 struct kvm_nested_guest *nested = vcpu->arch.nested;
308 pid = kvmppc_get_pid(vcpu);
326 static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
333 pid = kvmppc_get_pid(vcpu);
346 for (i = 0; i < vcpu->arch.slb_max; i++)
347 mtslb(vcpu->arch.slb[i].orige, vcpu->arch.slb[i].origv);
391 static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu)
403 for (i = 0; i < vcpu->arch.slb_nr; i++) {
409 vcpu->arch.slb[nr].orige = slbee | i;
410 vcpu->arch.slb[nr].origv = slbev;
414 vcpu->arch.slb_max = nr;
494 unsigned long kvmppc_msr_hard_disable_set_facilities(struct kvm_vcpu *vcpu, unsigned long msr)
509 (vcpu->arch.hfscr & HFSCR_TM))
532 int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb)
535 struct kvm *kvm = vcpu->kvm;
536 struct kvm_nested_guest *nested = vcpu->arch.nested;
537 struct kvmppc_vcore *vc = vcpu->arch.vcore;
558 WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_HV);
559 WARN_ON_ONCE(!(vcpu->arch.shregs.msr & MSR_ME));
561 vcpu->arch.ceded = 0;
587 msr = kvmppc_msr_hard_disable_set_facilities(vcpu, msr);
593 if (unlikely(load_vcpu_state(vcpu, &host_os_sprs)))
608 mtspr(SPRN_PURR, vcpu->arch.purr);
609 mtspr(SPRN_SPURR, vcpu->arch.spurr);
613 if (vcpu->arch.doorbell_request) {
614 vcpu->arch.doorbell_request = 0;
619 if (vcpu->arch.dawr0 != host_dawr0)
620 mtspr(SPRN_DAWR0, vcpu->arch.dawr0);
621 if (vcpu->arch.dawrx0 != host_dawrx0)
622 mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0);
624 if (vcpu->arch.dawr1 != host_dawr1)
625 mtspr(SPRN_DAWR1, vcpu->arch.dawr1);
626 if (vcpu->arch.dawrx1 != host_dawrx1)
627 mtspr(SPRN_DAWRX1, vcpu->arch.dawrx1);
630 if (vcpu->arch.ciabr != host_ciabr)
631 mtspr(SPRN_CIABR, vcpu->arch.ciabr);
635 mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
638 if (vcpu->arch.psscr != host_psscr)
639 mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
642 mtspr(SPRN_HFSCR, vcpu->arch.hfscr);
644 mtspr(SPRN_HSRR0, vcpu->arch.regs.nip);
645 mtspr(SPRN_HSRR1, (vcpu->arch.shregs.msr & ~MSR_HV) | MSR_ME);
664 mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0);
665 mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1);
666 mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2);
667 mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3);
673 * in load_vcpu_state can change some SPRs and vcpu state (nip, msr).
698 switch_mmu_to_guest_radix(kvm, vcpu, lpcr);
700 switch_mmu_to_guest_hpt(kvm, vcpu, lpcr);
711 mtspr(SPRN_DEC, vcpu->arch.dec_expires - *tb);
716 mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
717 mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
718 mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
719 mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
721 switch_pmu_to_guest(vcpu, &host_os_sprs);
722 accumulate_time(vcpu, &vcpu->arch.in_guest);
724 kvmppc_p9_enter_guest(vcpu);
726 accumulate_time(vcpu, &vcpu->arch.guest_exit);
727 switch_pmu_to_host(vcpu, &host_os_sprs);
730 vcpu->arch.shregs.srr0 = mfspr(SPRN_SRR0);
731 vcpu->arch.shregs.srr1 = mfspr(SPRN_SRR1);
732 vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
733 vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
745 vcpu->arch.regs.gpr[1] = local_paca->kvm_hstate.scratch1;
746 vcpu->arch.regs.gpr[3] = local_paca->kvm_hstate.scratch2;
760 vcpu->arch.regs.gpr[9] = exsave[EX_R9/sizeof(u64)];
761 vcpu->arch.regs.gpr[10] = exsave[EX_R10/sizeof(u64)];
762 vcpu->arch.regs.gpr[11] = exsave[EX_R11/sizeof(u64)];
763 vcpu->arch.regs.gpr[12] = exsave[EX_R12/sizeof(u64)];
764 vcpu->arch.regs.gpr[13] = exsave[EX_R13/sizeof(u64)];
765 vcpu->arch.ppr = exsave[EX_PPR/sizeof(u64)];
766 vcpu->arch.cfar = exsave[EX_CFAR/sizeof(u64)];
767 vcpu->arch.regs.ctr = exsave[EX_CTR/sizeof(u64)];
769 vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
772 vcpu->arch.fault_dar = exsave[EX_DAR/sizeof(u64)];
773 vcpu->arch.fault_dsisr = exsave[EX_DSISR/sizeof(u64)];
774 kvmppc_realmode_machine_check(vcpu);
777 kvmppc_p9_realmode_hmi_handler(vcpu);
780 vcpu->arch.emul_inst = mfspr(SPRN_HEIR);
783 vcpu->arch.fault_dar = exsave[EX_DAR/sizeof(u64)];
784 vcpu->arch.fault_dsisr = exsave[EX_DSISR/sizeof(u64)];
785 vcpu->arch.fault_gpa = mfspr(SPRN_ASDR);
788 vcpu->arch.fault_gpa = mfspr(SPRN_ASDR);
791 vcpu->arch.hfscr = mfspr(SPRN_HFSCR);
800 vcpu->arch.emul_inst = mfspr(SPRN_HEIR);
808 (vcpu->arch.shregs.msr & MSR_TS_S)) {
809 if (kvmhv_p9_tm_emulation_early(vcpu)) {
814 mtspr(SPRN_HSRR0, vcpu->arch.regs.nip);
815 mtspr(SPRN_HSRR1, vcpu->arch.shregs.msr);
825 local_paca->kvm_hstate.host_purr += purr - vcpu->arch.purr;
826 local_paca->kvm_hstate.host_spurr += spurr - vcpu->arch.spurr;
827 vcpu->arch.purr = purr;
828 vcpu->arch.spurr = spurr;
830 vcpu->arch.ic = mfspr(SPRN_IC);
831 vcpu->arch.pid = mfspr(SPRN_PID);
832 vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
834 vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0);
835 vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1);
836 vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
837 vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
841 vcpu->arch.doorbell_request = 1;
849 vcpu->arch.dec_expires = dec + *tb;
862 save_clear_guest_mmu(kvm, vcpu);
872 vcpu->arch.shregs.msr & MSR_TS_MASK)
876 store_vcpu_state(vcpu);
888 if (vcpu->arch.ciabr != host_ciabr)
892 if (vcpu->arch.dawr0 != host_dawr0)
894 if (vcpu->arch.dawrx0 != host_dawrx0)
897 if (vcpu->arch.dawr1 != host_dawr1)
899 if (vcpu->arch.dawrx1 != host_dawrx1)
914 restore_p9_host_os_sprs(vcpu, &host_os_sprs);