Searched refs:msr (Results 1 - 25 of 432) sorted by last modified time

1234567891011>>

/linux-master/arch/x86/xen/
H A Denlighten_pv.c62 #include <asm/msr-index.h>
1024 static u64 xen_do_read_msr(unsigned int msr, int *err) argument
1028 if (pmu_msr_read(msr, &val, err))
1032 val = native_read_msr_safe(msr, err);
1034 val = native_read_msr(msr);
1036 switch (msr) {
1063 static void xen_do_write_msr(unsigned int msr, unsigned int low, argument
1066 switch (msr) {
1092 if (!pmu_msr_write(msr, low, high, err)) {
1094 *err = native_write_msr_safe(msr, lo
1101 xen_read_msr_safe(unsigned int msr, int *err) argument
1106 xen_write_msr_safe(unsigned int msr, unsigned int low, unsigned int high) argument
1116 xen_read_msr(unsigned int msr) argument
1123 xen_write_msr(unsigned int msr, unsigned low, unsigned high) argument
[all...]
/linux-master/drivers/platform/x86/intel/speed_select_if/
H A Disst_if_common.c496 static bool match_punit_msr_white_list(int msr) argument
501 if (punit_msr_white_list[i] == msr)
515 if (!match_punit_msr_white_list(msr_cmd->msr))
526 msr_cmd->msr,
530 ret = isst_store_cmd(0, msr_cmd->msr,
537 msr_cmd->msr, &data);
/linux-master/arch/x86/kernel/cpu/
H A Damd.c31 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p) argument
39 gprs[1] = msr;
49 static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) argument
57 gprs[1] = msr;
485 u64 msr; local
503 rdmsrl(MSR_AMD64_SYSCFG, msr);
504 if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
520 rdmsrl(MSR_K7_HWCR, msr);
521 if (!(msr & MSR_K7_HWCR_SMMLOCK))
643 * bit 6 of msr C001_001
[all...]
H A Dbugs.c26 #include <asm/msr.h>
H A Dcommon.c58 #include <asm/msr.h>
560 u64 msr = 0; local
563 rdmsrl(MSR_IA32_S_CET, msr);
565 wrmsrl(MSR_IA32_S_CET, msr & ~CET_ENDBR_EN);
568 return msr;
573 u64 msr; local
576 rdmsrl(MSR_IA32_S_CET, msr);
577 msr &= ~CET_ENDBR_EN;
578 msr |= (save & CET_ENDBR_EN);
579 wrmsrl(MSR_IA32_S_CET, msr);
[all...]
H A Dtopology_amd.c130 u64 msr; member in union:__anon10
136 rdmsrl(MSR_FAM10H_NODE_ID, nid.msr);
/linux-master/tools/testing/selftests/kvm/x86_64/
H A Dpmu_counters_test.c321 #define GUEST_ASSERT_PMC_MSR_ACCESS(insn, msr, expect_gp, vector) \
324 expect_gp ? "#GP" : "no fault", msr, vector) \
326 #define GUEST_ASSERT_PMC_VALUE(insn, msr, val, expected) \
329 msr, expected_val, val);
363 const uint32_t msr = base_msr + i; local
377 const bool expect_gp = !expect_success && msr != MSR_P6_PERFCTR0 &&
378 msr != MSR_P6_PERFCTR1;
383 vector = wrmsr_safe(msr, test_val);
384 GUEST_ASSERT_PMC_MSR_ACCESS(WRMSR, msr, expect_gp, vector);
386 vector = rdmsr_safe(msr,
[all...]
/linux-master/arch/x86/kvm/
H A Dx86.c70 #include <asm/msr.h>
322 * When called, it means the previous get/set msr reached an invalid msr.
323 * Return true if we want to ignore/silent this failed msr access.
325 static bool kvm_msr_ignored_check(u32 msr, u64 data, bool write) argument
332 op, msr, data);
337 op, msr, data);
389 static int kvm_probe_user_return_msr(u32 msr) argument
395 ret = rdmsrl_safe(msr, &val);
398 ret = wrmsrl_safe(msr, va
404 kvm_add_user_return_msr(u32 msr) argument
416 kvm_find_user_return_msr(u32 msr) argument
1592 kvm_is_immutable_feature_msr(u32 msr) argument
1684 kvm_get_msr_feature(struct kvm_msr_entry *msr) argument
1704 struct kvm_msr_entry msr; local
1845 struct msr_data msr; local
1925 struct msr_data msr; local
2184 u32 msr = kvm_rcx_read(vcpu); local
3458 is_mci_control_msr(u32 msr) argument
3462 is_mci_status_msr(u32 msr) argument
3483 u32 msr = msr_info->index; local
3787 u32 msr = msr_info->index; local
4180 get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) argument
7340 struct kvm_msr_entry msr = { local
[all...]
H A Dpmu.c59 * 1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
615 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) argument
617 switch (msr) {
625 return static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr) ||
626 static_call(kvm_x86_pmu_is_valid_msr)(vcpu, msr);
629 static void kvm_pmu_mark_pmc_in_use(struct kvm_vcpu *vcpu, u32 msr) argument
632 struct kvm_pmc *pmc = static_call(kvm_x86_pmu_msr_idx_to_pmc)(vcpu, msr);
641 u32 msr = msr_info->index; local
643 switch (msr) {
666 u32 msr local
[all...]
H A Dlapic.c32 #include <asm/msr.h>
3227 int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) argument
3230 u32 reg = (msr - APIC_BASE_MSR) << 4;
3238 int kvm_x2apic_msr_read(struct kvm_vcpu *vcpu, u32 msr, u64 *data) argument
3241 u32 reg = (msr - APIC_BASE_MSR) << 4;
/linux-master/arch/x86/kvm/vmx/
H A Dvmx.h410 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr);
417 int vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr);
420 void vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
421 void vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type);
428 static inline void vmx_set_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, argument
432 vmx_enable_intercept_for_msr(vcpu, msr, type);
434 vmx_disable_intercept_for_msr(vcpu, msr, type);
449 u32 msr) \
453 if (msr <= 0x1fff) \
454 return bitop##_bit(msr, bitma
[all...]
H A Dvmx.c373 u64 msr; local
378 msr = __rdmsr(MSR_IA32_MCU_OPT_CTRL);
379 msr |= FB_CLEAR_DIS;
380 native_wrmsrl(MSR_IA32_MCU_OPT_CTRL, msr);
382 vmx->msr_ia32_mcu_opt_ctrl = msr;
675 static int vmx_get_passthrough_msr_slot(u32 msr) argument
679 switch (msr) {
701 if (vmx_possible_passthrough_msrs[i] == msr)
705 WARN(1, "Invalid MSR %x, please adapt vmx_possible_passthrough_msrs[]", msr);
709 struct vmx_uret_msr *vmx_find_uret_msr(struct vcpu_vmx *vmx, u32 msr) argument
719 vmx_set_guest_uret_msr(struct vcpu_vmx *vmx, struct vmx_uret_msr *msr, u64 data) argument
932 msr_write_intercepted(struct vcpu_vmx *vmx, u32 msr) argument
965 vmx_find_loadstore_msr_slot(struct vmx_msrs *m, u32 msr) argument
976 clear_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr) argument
1028 add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr, u64 guest_val, u64 host_val, bool entry_only) argument
1836 vmx_setup_uret_msr(struct vcpu_vmx *vmx, unsigned int msr, bool load_into_hardware) argument
1936 is_vmx_feature_control_msr_valid(struct vcpu_vmx *vmx, struct msr_data *msr) argument
1960 vmx_get_msr_feature(struct kvm_msr_entry *msr) argument
1980 struct vmx_uret_msr *msr; local
2161 struct vmx_uret_msr *msr; local
2539 adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, u32 msr, u32 *result) argument
2557 adjust_vmx_controls64(u64 ctl_opt, u32 msr) argument
2786 u64 msr; local
3956 vmx_disable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type) argument
3998 vmx_enable_intercept_for_msr(struct kvm_vcpu *vcpu, u32 msr, int type) argument
4141 u32 msr = vmx_possible_passthrough_msrs[i]; local
7828 struct vmx_uret_msr *msr; local
[all...]
H A Dpmu_intel.c124 static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr) argument
129 return get_gp_pmc(pmu, msr, MSR_IA32_PMC0);
150 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) argument
156 switch (msr) {
171 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
172 get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
173 get_fixed_pmc(pmu, msr) || get_fw_gp_pmc(pmu, msr) ||
174 intel_pmu_is_valid_lbr_msr(vcpu, msr);
181 static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr) argument
302 u32 msr = msr_info->index; local
345 u32 msr = msr_info->index; local
[all...]
/linux-master/arch/x86/kvm/svm/
H A Dsvm.c264 u32 svm_msrpm_offset(u32 msr) argument
270 if (msr < msrpm_ranges[i] ||
271 msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
274 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
630 * to avoid having stale value in the msr
757 static int direct_access_msr_slot(u32 msr) argument
762 if (direct_access_msrs[i].index == msr)
768 static void set_shadow_msr_intercept(struct kvm_vcpu *vcpu, u32 msr, int read, argument
772 int slot = direct_access_msr_slot(msr);
794 static bool msr_write_intercepted(struct kvm_vcpu *vcpu, u32 msr) argument
822 set_msr_interception_bitmap(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, int read, int write) argument
859 set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr, int read, int write) argument
931 u32 msr = direct_access_msrs[i].index; local
2808 svm_get_msr_feature(struct kvm_msr_entry *msr) argument
2971 svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) argument
5058 u64 msr, mask; local
[all...]
H A Dsvm.h507 u32 msr = offset * 16; local
509 return (msr >= APIC_BASE_MSR) &&
510 (msr < (APIC_BASE_MSR + 0x100));
541 u32 svm_msrpm_offset(u32 msr);
557 void set_msr_interception(struct kvm_vcpu *vcpu, u32 *msrpm, u32 msr,
/linux-master/arch/x86/include/asm/
H A Dperf_event.h320 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
549 unsigned msr; member in struct:perf_guest_switch_msr
H A Dkvm_host.h34 #include <asm/msr-index.h>
1630 int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
1631 int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr);
2074 int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
2075 int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr);
2175 static inline unsigned long read_msr(unsigned long msr) argument
2179 rdmsrl(msr, value);
2229 int kvm_add_user_return_msr(u32 msr);
2230 int kvm_find_user_return_msr(u32 msr);
2233 static inline bool kvm_is_supported_user_return_msr(u32 msr) argument
[all...]
H A Dapic.h14 #include <asm/msr.h>
115 u64 msr; local
117 if (rdmsrl_safe(MSR_IA32_APICBASE, &msr))
119 return msr & X2APIC_ENABLE;
218 u64 msr; local
223 rdmsrl(APIC_BASE_MSR + (reg >> 4), msr);
224 return (u32)msr;
/linux-master/arch/x86/events/intel/
H A Dlbr.c6 #include <asm/msr.h>
/linux-master/tools/include/uapi/linux/
H A Dkvm.h101 __u32 msr; member in struct:kvm_hyperv_exit::__anon156::__anon157
113 __u32 msr; member in struct:kvm_hyperv_exit::__anon156::__anon159
407 } msr; member in union:kvm_run::__anon162
/linux-master/tools/arch/x86/include/uapi/asm/
H A Dkvm.h556 __u32 msr; member in struct:kvm_xen_hvm_config
/linux-master/tools/arch/powerpc/include/uapi/asm/
H A Dkvm.h41 __u64 msr; member in struct:kvm_regs
/linux-master/arch/arm64/kernel/
H A Dhead.S157 msr sctlr_el2, x19
160 msr sctlr_el1, x19
195 msr sp_el0, \tsk
222 msr vbar_el1, x8 // vector table address
275 msr sctlr_el1, x0
278 msr spsr_el1, x0
279 msr elr_el1, lr
284 msr elr_el2, lr
295 msr sctlr_el2, x0
314 msr hcr_el
[all...]
/linux-master/arch/x86/kernel/apic/
H A Dapic.c1691 u64 msr; local
1695 rdmsrl(MSR_IA32_XAPIC_DISABLE_STATUS, msr);
1696 return (msr & LEGACY_XAPIC_DISABLED);
1703 u64 msr; local
1708 rdmsrl(MSR_IA32_APICBASE, msr);
1709 if (!(msr & X2APIC_ENABLE))
1712 wrmsrl(MSR_IA32_APICBASE, msr & ~(X2APIC_ENABLE | XAPIC_ENABLE));
1713 wrmsrl(MSR_IA32_APICBASE, msr & ~X2APIC_ENABLE);
1719 u64 msr; local
1721 rdmsrl(MSR_IA32_APICBASE, msr);
[all...]
/linux-master/arch/x86/events/
H A Dcore.c169 for (er = extra_regs; er->msr; er++) {
180 reg->reg = er->msr;
333 pr_err("Failed to access perfctr msr (MSR %x is %Lx)\n",

Completed in 348 milliseconds

1234567891011>>