Lines Matching defs:index

1074 static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
1081 if (index != XCR_XFEATURE_ENABLED_MASK)
1686 switch (msr->index) {
1694 rdmsrl_safe(msr->index, &msr->data);
1702 static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
1709 msr.index = index;
1712 if (r == KVM_MSR_RET_INVALID && kvm_msr_ignored_check(index, 0, false))
1793 bool kvm_msr_allowed(struct kvm_vcpu *vcpu, u32 index, u32 type)
1803 if (index >= 0x800 && index <= 0x8ff)
1823 if ((index >= start) && (index < end) && (flags & type)) {
1824 allowed = test_bit(index - start, bitmap);
1837 * Write @data into the MSR specified by @index. Select MSR specific fault
1842 static int __kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data,
1847 switch (index) {
1898 msr.index = index;
1905 u32 index, u64 data, bool host_initiated)
1907 int ret = __kvm_set_msr(vcpu, index, data, host_initiated);
1910 if (kvm_msr_ignored_check(index, data, true))
1917 * Read the MSR specified by @index into @data. Select MSR specific fault
1922 int __kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data,
1928 switch (index) {
1940 msr.index = index;
1950 u32 index, u64 *data, bool host_initiated)
1952 int ret = __kvm_get_msr(vcpu, index, data, host_initiated);
1957 if (kvm_msr_ignored_check(index, 0, false))
1964 static int kvm_get_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 *data)
1966 if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_READ))
1968 return kvm_get_msr_ignored_check(vcpu, index, data, false);
1971 static int kvm_set_msr_with_filter(struct kvm_vcpu *vcpu, u32 index, u64 data)
1973 if (!kvm_msr_allowed(vcpu, index, KVM_MSR_FILTER_WRITE))
1975 return kvm_set_msr_ignored_check(vcpu, index, data, false);
1978 int kvm_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
1980 return kvm_get_msr_ignored_check(vcpu, index, data, false);
1984 int kvm_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
1986 return kvm_set_msr_ignored_check(vcpu, index, data, false);
2032 static int kvm_msr_user_space(struct kvm_vcpu *vcpu, u32 index,
2047 vcpu->run->msr.index = index;
2221 static int do_get_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
2223 return kvm_get_msr_ignored_check(vcpu, index, data, true);
2226 static int do_set_msr(struct kvm_vcpu *vcpu, unsigned index, u64 *data)
2237 if (kvm_vcpu_has_run(vcpu) && kvm_is_immutable_feature_msr(index)) {
2238 if (do_get_msr(vcpu, index, &val) || *data != val)
2244 return kvm_set_msr_ignored_check(vcpu, index, *data, true);
3483 u32 msr = msr_info->index;
3787 u32 msr = msr_info->index;
4232 switch (msr_info->index) {
4268 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
4319 return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data);
4341 return kvm_x2apic_msr_read(vcpu, msr_info->index, &msr_info->data);
4435 return get_msr_mce(vcpu, msr_info->index, &msr_info->data,
4467 msr_info->index, &msr_info->data,
4522 if (kvm_pmu_is_valid_msr(vcpu, msr_info->index))
4530 kvm_is_msr_to_save(msr_info->index)) {
4549 unsigned index, u64 *data))
4554 if (do_msr(vcpu, entries[i].index, &entries[i].data))
4567 unsigned index, u64 *data),
7341 .index = msr_index,
8546 static int emulator_set_xcr(struct x86_emulate_ctxt *ctxt, u32 index, u64 xcr)
8548 return __kvm_set_xcr(emul_to_vcpu(ctxt), index, xcr);