Lines Matching defs:vcpu

34 static struct kvm_mtrr_range *var_mtrr_msr_to_range(struct kvm_vcpu *vcpu,
39 return &vcpu->arch.mtrr_state.var_ranges[index];
68 static bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
91 mask = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
119 static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
131 if (guest_cpuid_has(vcpu, X86_FEATURE_MTRR))
318 static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
320 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
323 if (!kvm_mmu_honors_guest_mtrrs(vcpu->kvm))
338 var_mtrr_range(var_mtrr_msr_to_range(vcpu, msr), &start, &end);
341 kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
349 static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
351 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
354 cur = var_mtrr_msr_to_range(vcpu, msr);
367 cur->mask = data | kvm_vcpu_reserved_gpa_bits_raw(vcpu);
378 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
382 if (!kvm_mtrr_valid(vcpu, msr, data))
387 *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data;
389 vcpu->arch.mtrr_state.deftype = data;
391 set_var_mtrr_msr(vcpu, msr, data);
393 update_mtrr(vcpu, msr);
397 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
418 *pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index];
420 *pdata = vcpu->arch.mtrr_state.deftype;
424 *pdata = var_mtrr_msr_to_range(vcpu, msr)->base;
426 *pdata = var_mtrr_msr_to_range(vcpu, msr)->mask;
428 *pdata &= ~kvm_vcpu_reserved_gpa_bits_raw(vcpu);
434 void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
436 INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
614 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
616 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
674 return mtrr_disabled_type(vcpu);
690 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
693 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;