1/* SPDX-License-Identifier: GPL-2.0 */ 2#ifndef _ASM_X86_MSR_H 3#define _ASM_X86_MSR_H 4 5#include "msr-index.h" 6 7#ifndef __ASSEMBLY__ 8 9#include <asm/asm.h> 10#include <asm/errno.h> 11#include <asm/cpumask.h> 12#include <uapi/asm/msr.h> 13#include <asm/shared/msr.h> 14 15#include <linux/percpu.h> 16 17struct msr_info { 18 u32 msr_no; 19 struct msr reg; 20 struct msr __percpu *msrs; 21 int err; 22}; 23 24struct msr_regs_info { 25 u32 *regs; 26 int err; 27}; 28 29struct saved_msr { 30 bool valid; 31 struct msr_info info; 32}; 33 34struct saved_msrs { 35 unsigned int num; 36 struct saved_msr *array; 37}; 38 39/* 40 * both i386 and x86_64 returns 64-bit value in edx:eax, but gcc's "A" 41 * constraint has different meanings. For i386, "A" means exactly 42 * edx:eax, while for x86_64 it doesn't mean rdx:rax or edx:eax. Instead, 43 * it means rax *or* rdx. 44 */ 45#ifdef CONFIG_X86_64 46/* Using 64-bit values saves one instruction clearing the high half of low */ 47#define DECLARE_ARGS(val, low, high) unsigned long low, high 48#define EAX_EDX_VAL(val, low, high) ((low) | (high) << 32) 49#define EAX_EDX_RET(val, low, high) "=a" (low), "=d" (high) 50#else 51#define DECLARE_ARGS(val, low, high) unsigned long long val 52#define EAX_EDX_VAL(val, low, high) (val) 53#define EAX_EDX_RET(val, low, high) "=A" (val) 54#endif 55 56/* 57 * Be very careful with includes. This header is prone to include loops. 58 */ 59#include <asm/atomic.h> 60#include <linux/tracepoint-defs.h> 61 62#ifdef CONFIG_TRACEPOINTS 63DECLARE_TRACEPOINT(read_msr); 64DECLARE_TRACEPOINT(write_msr); 65DECLARE_TRACEPOINT(rdpmc); 66extern void do_trace_write_msr(unsigned int msr, u64 val, int failed); 67extern void do_trace_read_msr(unsigned int msr, u64 val, int failed); 68extern void do_trace_rdpmc(unsigned int msr, u64 val, int failed); 69#else 70static inline void do_trace_write_msr(unsigned int msr, u64 val, int failed) {} 71static inline void do_trace_read_msr(unsigned int msr, u64 val, int failed) {} 72static inline void do_trace_rdpmc(unsigned int msr, u64 val, int failed) {} 73#endif 74 75/* 76 * __rdmsr() and __wrmsr() are the two primitives which are the bare minimum MSR 77 * accessors and should not have any tracing or other functionality piggybacking 78 * on them - those are *purely* for accessing MSRs and nothing more. So don't even 79 * think of extending them - you will be slapped with a stinking trout or a frozen 80 * shark will reach you, wherever you are! You've been warned. 81 */ 82static __always_inline unsigned long long __rdmsr(unsigned int msr) 83{ 84 DECLARE_ARGS(val, low, high); 85 86 asm volatile("1: rdmsr\n" 87 "2:\n" 88 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_RDMSR) 89 : EAX_EDX_RET(val, low, high) : "c" (msr)); 90 91 return EAX_EDX_VAL(val, low, high); 92} 93 94static __always_inline void __wrmsr(unsigned int msr, u32 low, u32 high) 95{ 96 asm volatile("1: wrmsr\n" 97 "2:\n" 98 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR) 99 : : "c" (msr), "a"(low), "d" (high) : "memory"); 100} 101 102/* 103 * WRMSRNS behaves exactly like WRMSR with the only difference being 104 * that it is not a serializing instruction by default. 105 */ 106static __always_inline void __wrmsrns(u32 msr, u32 low, u32 high) 107{ 108 /* Instruction opcode for WRMSRNS; supported in binutils >= 2.40. */ 109 asm volatile("1: .byte 0x0f,0x01,0xc6\n" 110 "2:\n" 111 _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_WRMSR) 112 : : "c" (msr), "a"(low), "d" (high)); 113} 114 115#define native_rdmsr(msr, val1, val2) \ 116do { \ 117 u64 __val = __rdmsr((msr)); \ 118 (void)((val1) = (u32)__val); \ 119 (void)((val2) = (u32)(__val >> 32)); \ 120} while (0) 121 122#define native_wrmsr(msr, low, high) \ 123 __wrmsr(msr, low, high) 124 125#define native_wrmsrl(msr, val) \ 126 __wrmsr((msr), (u32)((u64)(val)), \ 127 (u32)((u64)(val) >> 32)) 128 129static inline unsigned long long native_read_msr(unsigned int msr) 130{ 131 unsigned long long val; 132 133 val = __rdmsr(msr); 134 135 if (tracepoint_enabled(read_msr)) 136 do_trace_read_msr(msr, val, 0); 137 138 return val; 139} 140 141static inline unsigned long long native_read_msr_safe(unsigned int msr, 142 int *err) 143{ 144 DECLARE_ARGS(val, low, high); 145 146 asm volatile("1: rdmsr ; xor %[err],%[err]\n" 147 "2:\n\t" 148 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_RDMSR_SAFE, %[err]) 149 : [err] "=r" (*err), EAX_EDX_RET(val, low, high) 150 : "c" (msr)); 151 if (tracepoint_enabled(read_msr)) 152 do_trace_read_msr(msr, EAX_EDX_VAL(val, low, high), *err); 153 return EAX_EDX_VAL(val, low, high); 154} 155 156/* Can be uninlined because referenced by paravirt */ 157static inline void notrace 158native_write_msr(unsigned int msr, u32 low, u32 high) 159{ 160 __wrmsr(msr, low, high); 161 162 if (tracepoint_enabled(write_msr)) 163 do_trace_write_msr(msr, ((u64)high << 32 | low), 0); 164} 165 166/* Can be uninlined because referenced by paravirt */ 167static inline int notrace 168native_write_msr_safe(unsigned int msr, u32 low, u32 high) 169{ 170 int err; 171 172 asm volatile("1: wrmsr ; xor %[err],%[err]\n" 173 "2:\n\t" 174 _ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_WRMSR_SAFE, %[err]) 175 : [err] "=a" (err) 176 : "c" (msr), "0" (low), "d" (high) 177 : "memory"); 178 if (tracepoint_enabled(write_msr)) 179 do_trace_write_msr(msr, ((u64)high << 32 | low), err); 180 return err; 181} 182 183extern int rdmsr_safe_regs(u32 regs[8]); 184extern int wrmsr_safe_regs(u32 regs[8]); 185 186/** 187 * rdtsc() - returns the current TSC without ordering constraints 188 * 189 * rdtsc() returns the result of RDTSC as a 64-bit integer. The 190 * only ordering constraint it supplies is the ordering implied by 191 * "asm volatile": it will put the RDTSC in the place you expect. The 192 * CPU can and will speculatively execute that RDTSC, though, so the 193 * results can be non-monotonic if compared on different CPUs. 194 */ 195static __always_inline unsigned long long rdtsc(void) 196{ 197 DECLARE_ARGS(val, low, high); 198 199 asm volatile("rdtsc" : EAX_EDX_RET(val, low, high)); 200 201 return EAX_EDX_VAL(val, low, high); 202} 203 204/** 205 * rdtsc_ordered() - read the current TSC in program order 206 * 207 * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer. 208 * It is ordered like a load to a global in-memory counter. It should 209 * be impossible to observe non-monotonic rdtsc_unordered() behavior 210 * across multiple CPUs as long as the TSC is synced. 211 */ 212static __always_inline unsigned long long rdtsc_ordered(void) 213{ 214 DECLARE_ARGS(val, low, high); 215 216 /* 217 * The RDTSC instruction is not ordered relative to memory 218 * access. The Intel SDM and the AMD APM are both vague on this 219 * point, but empirically an RDTSC instruction can be 220 * speculatively executed before prior loads. An RDTSC 221 * immediately after an appropriate barrier appears to be 222 * ordered as a normal load, that is, it provides the same 223 * ordering guarantees as reading from a global memory location 224 * that some other imaginary CPU is updating continuously with a 225 * time stamp. 226 * 227 * Thus, use the preferred barrier on the respective CPU, aiming for 228 * RDTSCP as the default. 229 */ 230 asm volatile(ALTERNATIVE_2("rdtsc", 231 "lfence; rdtsc", X86_FEATURE_LFENCE_RDTSC, 232 "rdtscp", X86_FEATURE_RDTSCP) 233 : EAX_EDX_RET(val, low, high) 234 /* RDTSCP clobbers ECX with MSR_TSC_AUX. */ 235 :: "ecx"); 236 237 return EAX_EDX_VAL(val, low, high); 238} 239 240static inline unsigned long long native_read_pmc(int counter) 241{ 242 DECLARE_ARGS(val, low, high); 243 244 asm volatile("rdpmc" : EAX_EDX_RET(val, low, high) : "c" (counter)); 245 if (tracepoint_enabled(rdpmc)) 246 do_trace_rdpmc(counter, EAX_EDX_VAL(val, low, high), 0); 247 return EAX_EDX_VAL(val, low, high); 248} 249 250#ifdef CONFIG_PARAVIRT_XXL 251#include <asm/paravirt.h> 252#else 253#include <linux/errno.h> 254/* 255 * Access to machine-specific registers (available on 586 and better only) 256 * Note: the rd* operations modify the parameters directly (without using 257 * pointer indirection), this allows gcc to optimize better 258 */ 259 260#define rdmsr(msr, low, high) \ 261do { \ 262 u64 __val = native_read_msr((msr)); \ 263 (void)((low) = (u32)__val); \ 264 (void)((high) = (u32)(__val >> 32)); \ 265} while (0) 266 267static inline void wrmsr(unsigned int msr, u32 low, u32 high) 268{ 269 native_write_msr(msr, low, high); 270} 271 272#define rdmsrl(msr, val) \ 273 ((val) = native_read_msr((msr))) 274 275static inline void wrmsrl(unsigned int msr, u64 val) 276{ 277 native_write_msr(msr, (u32)(val & 0xffffffffULL), (u32)(val >> 32)); 278} 279 280/* wrmsr with exception handling */ 281static inline int wrmsr_safe(unsigned int msr, u32 low, u32 high) 282{ 283 return native_write_msr_safe(msr, low, high); 284} 285 286/* rdmsr with exception handling */ 287#define rdmsr_safe(msr, low, high) \ 288({ \ 289 int __err; \ 290 u64 __val = native_read_msr_safe((msr), &__err); \ 291 (*low) = (u32)__val; \ 292 (*high) = (u32)(__val >> 32); \ 293 __err; \ 294}) 295 296static inline int rdmsrl_safe(unsigned int msr, unsigned long long *p) 297{ 298 int err; 299 300 *p = native_read_msr_safe(msr, &err); 301 return err; 302} 303 304#define rdpmc(counter, low, high) \ 305do { \ 306 u64 _l = native_read_pmc((counter)); \ 307 (low) = (u32)_l; \ 308 (high) = (u32)(_l >> 32); \ 309} while (0) 310 311#define rdpmcl(counter, val) ((val) = native_read_pmc(counter)) 312 313#endif /* !CONFIG_PARAVIRT_XXL */ 314 315static __always_inline void wrmsrns(u32 msr, u64 val) 316{ 317 __wrmsrns(msr, val, val >> 32); 318} 319 320/* 321 * 64-bit version of wrmsr_safe(): 322 */ 323static inline int wrmsrl_safe(u32 msr, u64 val) 324{ 325 return wrmsr_safe(msr, (u32)val, (u32)(val >> 32)); 326} 327 328struct msr __percpu *msrs_alloc(void); 329void msrs_free(struct msr __percpu *msrs); 330int msr_set_bit(u32 msr, u8 bit); 331int msr_clear_bit(u32 msr, u8 bit); 332 333#ifdef CONFIG_SMP 334int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 335int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 336int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); 337int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q); 338void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs); 339void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs); 340int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h); 341int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h); 342int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q); 343int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q); 344int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); 345int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]); 346#else /* CONFIG_SMP */ 347static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) 348{ 349 rdmsr(msr_no, *l, *h); 350 return 0; 351} 352static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 353{ 354 wrmsr(msr_no, l, h); 355 return 0; 356} 357static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) 358{ 359 rdmsrl(msr_no, *q); 360 return 0; 361} 362static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) 363{ 364 wrmsrl(msr_no, q); 365 return 0; 366} 367static inline void rdmsr_on_cpus(const struct cpumask *m, u32 msr_no, 368 struct msr __percpu *msrs) 369{ 370 rdmsr_on_cpu(0, msr_no, raw_cpu_ptr(&msrs->l), raw_cpu_ptr(&msrs->h)); 371} 372static inline void wrmsr_on_cpus(const struct cpumask *m, u32 msr_no, 373 struct msr __percpu *msrs) 374{ 375 wrmsr_on_cpu(0, msr_no, raw_cpu_read(msrs->l), raw_cpu_read(msrs->h)); 376} 377static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, 378 u32 *l, u32 *h) 379{ 380 return rdmsr_safe(msr_no, l, h); 381} 382static inline int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h) 383{ 384 return wrmsr_safe(msr_no, l, h); 385} 386static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) 387{ 388 return rdmsrl_safe(msr_no, q); 389} 390static inline int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) 391{ 392 return wrmsrl_safe(msr_no, q); 393} 394static inline int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) 395{ 396 return rdmsr_safe_regs(regs); 397} 398static inline int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8]) 399{ 400 return wrmsr_safe_regs(regs); 401} 402#endif /* CONFIG_SMP */ 403#endif /* __ASSEMBLY__ */ 404#endif /* _ASM_X86_MSR_H */ 405