Lines Matching defs:vcpu

109 void kvm_init_mmu(struct kvm_vcpu *vcpu);
110 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
112 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
115 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
116 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
118 void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
121 int kvm_mmu_load(struct kvm_vcpu *vcpu);
122 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
123 void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu);
124 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
125 void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu);
126 void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
129 static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
131 if (likely(vcpu->arch.mmu->root.hpa != INVALID_PAGE))
134 return kvm_mmu_load(vcpu);
137 static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
141 return kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)
146 static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
148 return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
151 static inline unsigned long kvm_get_active_cr3_lam_bits(struct kvm_vcpu *vcpu)
153 if (!guest_can_use(vcpu, X86_FEATURE_LAM))
156 return kvm_read_cr3(vcpu) & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
159 static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
161 u64 root_hpa = vcpu->arch.mmu->root.hpa;
166 static_call(kvm_x86_load_mmu_pgd)(vcpu, root_hpa,
167 vcpu->arch.mmu->root_role.level);
170 static inline void kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
182 if (!tdp_enabled || mmu == &vcpu->arch.guest_mmu)
185 __kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
196 static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
202 unsigned long rflags = static_call(kvm_x86_get_rflags)(vcpu);
222 kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
236 pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
258 int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
311 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
314 static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu,
319 if (mmu != &vcpu->arch.nested_mmu)
321 return translate_nested_gpa(vcpu, gpa, access, exception);