1// SPDX-License-Identifier: GPL-2.0
2#include <linux/export.h>
3#include <linux/preempt.h>
4#include <linux/smp.h>
5#include <linux/completion.h>
6#include <asm/msr.h>
7
8static void __rdmsr_on_cpu(void *info)
9{
10	struct msr_info *rv = info;
11	struct msr *reg;
12
13	if (rv->msrs)
14		reg = this_cpu_ptr(rv->msrs);
15	else
16		reg = &rv->reg;
17
18	rdmsr(rv->msr_no, reg->l, reg->h);
19}
20
21static void __wrmsr_on_cpu(void *info)
22{
23	struct msr_info *rv = info;
24	struct msr *reg;
25
26	if (rv->msrs)
27		reg = this_cpu_ptr(rv->msrs);
28	else
29		reg = &rv->reg;
30
31	wrmsr(rv->msr_no, reg->l, reg->h);
32}
33
34int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
35{
36	int err;
37	struct msr_info rv;
38
39	memset(&rv, 0, sizeof(rv));
40
41	rv.msr_no = msr_no;
42	err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
43	*l = rv.reg.l;
44	*h = rv.reg.h;
45
46	return err;
47}
48EXPORT_SYMBOL(rdmsr_on_cpu);
49
50int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
51{
52	int err;
53	struct msr_info rv;
54
55	memset(&rv, 0, sizeof(rv));
56
57	rv.msr_no = msr_no;
58	err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
59	*q = rv.reg.q;
60
61	return err;
62}
63EXPORT_SYMBOL(rdmsrl_on_cpu);
64
65int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
66{
67	int err;
68	struct msr_info rv;
69
70	memset(&rv, 0, sizeof(rv));
71
72	rv.msr_no = msr_no;
73	rv.reg.l = l;
74	rv.reg.h = h;
75	err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
76
77	return err;
78}
79EXPORT_SYMBOL(wrmsr_on_cpu);
80
81int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
82{
83	int err;
84	struct msr_info rv;
85
86	memset(&rv, 0, sizeof(rv));
87
88	rv.msr_no = msr_no;
89	rv.reg.q = q;
90
91	err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
92
93	return err;
94}
95EXPORT_SYMBOL(wrmsrl_on_cpu);
96
97static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no,
98			    struct msr __percpu *msrs,
99			    void (*msr_func) (void *info))
100{
101	struct msr_info rv;
102	int this_cpu;
103
104	memset(&rv, 0, sizeof(rv));
105
106	rv.msrs	  = msrs;
107	rv.msr_no = msr_no;
108
109	this_cpu = get_cpu();
110
111	if (cpumask_test_cpu(this_cpu, mask))
112		msr_func(&rv);
113
114	smp_call_function_many(mask, msr_func, &rv, 1);
115	put_cpu();
116}
117
118/* rdmsr on a bunch of CPUs
119 *
120 * @mask:       which CPUs
121 * @msr_no:     which MSR
122 * @msrs:       array of MSR values
123 *
124 */
125void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs)
126{
127	__rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu);
128}
129EXPORT_SYMBOL(rdmsr_on_cpus);
130
131/*
132 * wrmsr on a bunch of CPUs
133 *
134 * @mask:       which CPUs
135 * @msr_no:     which MSR
136 * @msrs:       array of MSR values
137 *
138 */
139void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs)
140{
141	__rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu);
142}
143EXPORT_SYMBOL(wrmsr_on_cpus);
144
145struct msr_info_completion {
146	struct msr_info		msr;
147	struct completion	done;
148};
149
150/* These "safe" variants are slower and should be used when the target MSR
151   may not actually exist. */
152static void __rdmsr_safe_on_cpu(void *info)
153{
154	struct msr_info_completion *rv = info;
155
156	rv->msr.err = rdmsr_safe(rv->msr.msr_no, &rv->msr.reg.l, &rv->msr.reg.h);
157	complete(&rv->done);
158}
159
160static void __wrmsr_safe_on_cpu(void *info)
161{
162	struct msr_info *rv = info;
163
164	rv->err = wrmsr_safe(rv->msr_no, rv->reg.l, rv->reg.h);
165}
166
167int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
168{
169	struct msr_info_completion rv;
170	call_single_data_t csd;
171	int err;
172
173	INIT_CSD(&csd, __rdmsr_safe_on_cpu, &rv);
174
175	memset(&rv, 0, sizeof(rv));
176	init_completion(&rv.done);
177	rv.msr.msr_no = msr_no;
178
179	err = smp_call_function_single_async(cpu, &csd);
180	if (!err) {
181		wait_for_completion(&rv.done);
182		err = rv.msr.err;
183	}
184	*l = rv.msr.reg.l;
185	*h = rv.msr.reg.h;
186
187	return err;
188}
189EXPORT_SYMBOL(rdmsr_safe_on_cpu);
190
191int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
192{
193	int err;
194	struct msr_info rv;
195
196	memset(&rv, 0, sizeof(rv));
197
198	rv.msr_no = msr_no;
199	rv.reg.l = l;
200	rv.reg.h = h;
201	err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
202
203	return err ? err : rv.err;
204}
205EXPORT_SYMBOL(wrmsr_safe_on_cpu);
206
207int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q)
208{
209	int err;
210	struct msr_info rv;
211
212	memset(&rv, 0, sizeof(rv));
213
214	rv.msr_no = msr_no;
215	rv.reg.q = q;
216
217	err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
218
219	return err ? err : rv.err;
220}
221EXPORT_SYMBOL(wrmsrl_safe_on_cpu);
222
223int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q)
224{
225	u32 low, high;
226	int err;
227
228	err = rdmsr_safe_on_cpu(cpu, msr_no, &low, &high);
229	*q = (u64)high << 32 | low;
230
231	return err;
232}
233EXPORT_SYMBOL(rdmsrl_safe_on_cpu);
234
235/*
236 * These variants are significantly slower, but allows control over
237 * the entire 32-bit GPR set.
238 */
239static void __rdmsr_safe_regs_on_cpu(void *info)
240{
241	struct msr_regs_info *rv = info;
242
243	rv->err = rdmsr_safe_regs(rv->regs);
244}
245
246static void __wrmsr_safe_regs_on_cpu(void *info)
247{
248	struct msr_regs_info *rv = info;
249
250	rv->err = wrmsr_safe_regs(rv->regs);
251}
252
253int rdmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
254{
255	int err;
256	struct msr_regs_info rv;
257
258	rv.regs   = regs;
259	rv.err    = -EIO;
260	err = smp_call_function_single(cpu, __rdmsr_safe_regs_on_cpu, &rv, 1);
261
262	return err ? err : rv.err;
263}
264EXPORT_SYMBOL(rdmsr_safe_regs_on_cpu);
265
266int wrmsr_safe_regs_on_cpu(unsigned int cpu, u32 regs[8])
267{
268	int err;
269	struct msr_regs_info rv;
270
271	rv.regs = regs;
272	rv.err  = -EIO;
273	err = smp_call_function_single(cpu, __wrmsr_safe_regs_on_cpu, &rv, 1);
274
275	return err ? err : rv.err;
276}
277EXPORT_SYMBOL(wrmsr_safe_regs_on_cpu);
278