1181430Sstas/*-
2181430Sstas * Copyright (c) 2006-2008 Stanislav Sedov <stas@FreeBSD.org>
3181430Sstas * All rights reserved.
4181430Sstas *
5181430Sstas * Redistribution and use in source and binary forms, with or without
6181430Sstas * modification, are permitted provided that the following conditions
7181430Sstas * are met:
8181430Sstas * 1. Redistributions of source code must retain the above copyright
9181430Sstas *    notice, this list of conditions and the following disclaimer.
10181430Sstas * 2. Redistributions in binary form must reproduce the above copyright
11181430Sstas *    notice, this list of conditions and the following disclaimer in the
12181430Sstas *    documentation and/or other materials provided with the distribution.
13181430Sstas *
14181430Sstas * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15181430Sstas * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16181430Sstas * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17181430Sstas * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18181430Sstas * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19181430Sstas * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20181430Sstas * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21181430Sstas * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22181430Sstas * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23181430Sstas * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24181430Sstas * SUCH DAMAGE.
25181430Sstas *
26181430Sstas */
27181430Sstas
28181430Sstas#include <sys/cdefs.h>
29181430Sstas__FBSDID("$FreeBSD: stable/11/sys/dev/cpuctl/cpuctl.c 362383 2020-06-19 13:48:23Z kib $");
30181430Sstas
31181430Sstas#include <sys/param.h>
32181430Sstas#include <sys/systm.h>
33181430Sstas#include <sys/conf.h>
34181430Sstas#include <sys/fcntl.h>
35181430Sstas#include <sys/ioccom.h>
36181430Sstas#include <sys/malloc.h>
37181430Sstas#include <sys/module.h>
38181430Sstas#include <sys/mutex.h>
39181430Sstas#include <sys/priv.h>
40181430Sstas#include <sys/proc.h>
41181430Sstas#include <sys/queue.h>
42181430Sstas#include <sys/sched.h>
43181430Sstas#include <sys/kernel.h>
44181430Sstas#include <sys/sysctl.h>
45181430Sstas#include <sys/uio.h>
46181430Sstas#include <sys/pcpu.h>
47181430Sstas#include <sys/smp.h>
48181430Sstas#include <sys/pmckern.h>
49181430Sstas#include <sys/cpuctl.h>
50181430Sstas
51354651Skib#include <vm/vm.h>
52354651Skib#include <vm/vm_param.h>
53354651Skib#include <vm/pmap.h>
54354651Skib
55181430Sstas#include <machine/cpufunc.h>
56181430Sstas#include <machine/md_var.h>
57181430Sstas#include <machine/specialreg.h>
58347700Smarkj#include <x86/ucode.h>
59181430Sstas
60181430Sstasstatic d_open_t cpuctl_open;
61181430Sstasstatic d_ioctl_t cpuctl_ioctl;
62181430Sstas
63181430Sstas#define	CPUCTL_VERSION 1
64181430Sstas
65308481Savg#ifdef CPUCTL_DEBUG
66181430Sstas# define	DPRINTF(format,...) printf(format, __VA_ARGS__);
67181430Sstas#else
68181430Sstas# define	DPRINTF(...)
69181430Sstas#endif
70181430Sstas
71308801Skib#define	UCODE_SIZE_MAX	(4 * 1024 * 1024)
72181430Sstas
73181430Sstasstatic int cpuctl_do_msr(int cpu, cpuctl_msr_args_t *data, u_long cmd,
74181430Sstas    struct thread *td);
75301962Skibstatic int cpuctl_do_cpuid(int cpu, cpuctl_cpuid_args_t *data,
76181430Sstas    struct thread *td);
77301962Skibstatic int cpuctl_do_cpuid_count(int cpu, cpuctl_cpuid_count_args_t *data,
78267651Sattilio    struct thread *td);
79327871Skibstatic int cpuctl_do_eval_cpu_features(int cpu, struct thread *td);
80181430Sstasstatic int cpuctl_do_update(int cpu, cpuctl_update_args_t *data,
81181430Sstas    struct thread *td);
82181430Sstasstatic int update_intel(int cpu, cpuctl_update_args_t *args,
83181430Sstas    struct thread *td);
84181430Sstasstatic int update_amd(int cpu, cpuctl_update_args_t *args, struct thread *td);
85228436Sfabientstatic int update_via(int cpu, cpuctl_update_args_t *args,
86228436Sfabient    struct thread *td);
87181430Sstas
88181430Sstasstatic struct cdev **cpuctl_devs;
89181430Sstasstatic MALLOC_DEFINE(M_CPUCTL, "cpuctl", "CPUCTL buffer");
90181430Sstas
91181430Sstasstatic struct cdevsw cpuctl_cdevsw = {
92181430Sstas        .d_version =    D_VERSION,
93181430Sstas        .d_open =       cpuctl_open,
94181430Sstas        .d_ioctl =      cpuctl_ioctl,
95181430Sstas        .d_name =       "cpuctl",
96181430Sstas};
97181430Sstas
98181430Sstas/*
99181430Sstas * This function checks if specified cpu enabled or not.
100181430Sstas */
101181430Sstasstatic int
102181430Sstascpu_enabled(int cpu)
103181430Sstas{
104181430Sstas
105181430Sstas	return (pmc_cpu_is_disabled(cpu) == 0);
106181430Sstas}
107181430Sstas
108181430Sstas/*
109181430Sstas * Check if the current thread is bound to a specific cpu.
110181430Sstas */
111181430Sstasstatic int
112181430Sstascpu_sched_is_bound(struct thread *td)
113181430Sstas{
114181430Sstas	int ret;
115181430Sstas
116181430Sstas	thread_lock(td);
117181430Sstas	ret = sched_is_bound(td);
118181430Sstas	thread_unlock(td);
119181430Sstas	return (ret);
120181430Sstas}
121181430Sstas
122181430Sstas/*
123181430Sstas * Switch to target cpu to run.
124181430Sstas */
125181430Sstasstatic void
126181430Sstasset_cpu(int cpu, struct thread *td)
127181430Sstas{
128181430Sstas
129302372Snwhitehorn	KASSERT(cpu >= 0 && cpu <= mp_maxid && cpu_enabled(cpu),
130181430Sstas	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, cpu));
131181430Sstas	thread_lock(td);
132181430Sstas	sched_bind(td, cpu);
133181430Sstas	thread_unlock(td);
134181430Sstas	KASSERT(td->td_oncpu == cpu,
135315969Skib	    ("[cpuctl,%d]: cannot bind to target cpu %d on cpu %d", __LINE__,
136315969Skib	    cpu, td->td_oncpu));
137181430Sstas}
138181430Sstas
139181430Sstasstatic void
140181430Sstasrestore_cpu(int oldcpu, int is_bound, struct thread *td)
141181430Sstas{
142181430Sstas
143302372Snwhitehorn	KASSERT(oldcpu >= 0 && oldcpu <= mp_maxid && cpu_enabled(oldcpu),
144181430Sstas	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, oldcpu));
145181430Sstas	thread_lock(td);
146181430Sstas	if (is_bound == 0)
147181430Sstas		sched_unbind(td);
148181430Sstas	else
149181430Sstas		sched_bind(td, oldcpu);
150181430Sstas	thread_unlock(td);
151181430Sstas}
152181430Sstas
153181430Sstasint
154181430Sstascpuctl_ioctl(struct cdev *dev, u_long cmd, caddr_t data,
155315969Skib    int flags, struct thread *td)
156181430Sstas{
157315969Skib	int cpu, ret;
158181430Sstas
159315969Skib	cpu = dev2unit(dev);
160302372Snwhitehorn	if (cpu > mp_maxid || !cpu_enabled(cpu)) {
161181430Sstas		DPRINTF("[cpuctl,%d]: bad cpu number %d\n", __LINE__, cpu);
162181430Sstas		return (ENXIO);
163181430Sstas	}
164181430Sstas	/* Require write flag for "write" requests. */
165315970Skib	if ((cmd == CPUCTL_MSRCBIT || cmd == CPUCTL_MSRSBIT ||
166327871Skib	    cmd == CPUCTL_UPDATE || cmd == CPUCTL_WRMSR ||
167327871Skib	    cmd == CPUCTL_EVAL_CPU_FEATURES) &&
168315970Skib	    (flags & FWRITE) == 0)
169181430Sstas		return (EPERM);
170181430Sstas	switch (cmd) {
171181430Sstas	case CPUCTL_RDMSR:
172181430Sstas		ret = cpuctl_do_msr(cpu, (cpuctl_msr_args_t *)data, cmd, td);
173181430Sstas		break;
174195189Sstas	case CPUCTL_MSRSBIT:
175195189Sstas	case CPUCTL_MSRCBIT:
176181430Sstas	case CPUCTL_WRMSR:
177181430Sstas		ret = priv_check(td, PRIV_CPUCTL_WRMSR);
178181430Sstas		if (ret != 0)
179181430Sstas			goto fail;
180181430Sstas		ret = cpuctl_do_msr(cpu, (cpuctl_msr_args_t *)data, cmd, td);
181181430Sstas		break;
182181430Sstas	case CPUCTL_CPUID:
183301962Skib		ret = cpuctl_do_cpuid(cpu, (cpuctl_cpuid_args_t *)data, td);
184181430Sstas		break;
185181430Sstas	case CPUCTL_UPDATE:
186181430Sstas		ret = priv_check(td, PRIV_CPUCTL_UPDATE);
187181430Sstas		if (ret != 0)
188181430Sstas			goto fail;
189181430Sstas		ret = cpuctl_do_update(cpu, (cpuctl_update_args_t *)data, td);
190181430Sstas		break;
191267651Sattilio	case CPUCTL_CPUID_COUNT:
192301962Skib		ret = cpuctl_do_cpuid_count(cpu,
193301962Skib		    (cpuctl_cpuid_count_args_t *)data, td);
194267651Sattilio		break;
195327871Skib	case CPUCTL_EVAL_CPU_FEATURES:
196327871Skib		ret = cpuctl_do_eval_cpu_features(cpu, td);
197327871Skib		break;
198181430Sstas	default:
199181430Sstas		ret = EINVAL;
200181430Sstas		break;
201181430Sstas	}
202181430Sstasfail:
203181430Sstas	return (ret);
204181430Sstas}
205181430Sstas
206181430Sstas/*
207181430Sstas * Actually perform cpuid operation.
208181430Sstas */
209301962Skibstatic int
210267673Skibcpuctl_do_cpuid_count(int cpu, cpuctl_cpuid_count_args_t *data,
211267673Skib    struct thread *td)
212181430Sstas{
213181430Sstas	int is_bound = 0;
214181430Sstas	int oldcpu;
215181430Sstas
216302372Snwhitehorn	KASSERT(cpu >= 0 && cpu <= mp_maxid,
217181430Sstas	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, cpu));
218181430Sstas
219181430Sstas	/* Explicitly clear cpuid data to avoid returning stale info. */
220181430Sstas	bzero(data->data, sizeof(data->data));
221267651Sattilio	DPRINTF("[cpuctl,%d]: retrieving cpuid lev %#0x type %#0x for %d cpu\n",
222267651Sattilio	    __LINE__, data->level, data->level_type, cpu);
223301962Skib#ifdef __i386__
224301962Skib	if (cpu_id == 0)
225301962Skib		return (ENODEV);
226301962Skib#endif
227181430Sstas	oldcpu = td->td_oncpu;
228181430Sstas	is_bound = cpu_sched_is_bound(td);
229181430Sstas	set_cpu(cpu, td);
230267651Sattilio	cpuid_count(data->level, data->level_type, data->data);
231181430Sstas	restore_cpu(oldcpu, is_bound, td);
232301962Skib	return (0);
233181430Sstas}
234181430Sstas
235301962Skibstatic int
236267651Sattiliocpuctl_do_cpuid(int cpu, cpuctl_cpuid_args_t *data, struct thread *td)
237267651Sattilio{
238267673Skib	cpuctl_cpuid_count_args_t cdata;
239301962Skib	int error;
240267651Sattilio
241267673Skib	cdata.level = data->level;
242267651Sattilio	/* Override the level type. */
243267673Skib	cdata.level_type = 0;
244301962Skib	error = cpuctl_do_cpuid_count(cpu, &cdata, td);
245267673Skib	bcopy(cdata.data, data->data, sizeof(data->data)); /* Ignore error */
246301962Skib	return (error);
247267651Sattilio}
248267651Sattilio
249181430Sstas/*
250181430Sstas * Actually perform MSR operations.
251181430Sstas */
252181430Sstasstatic int
253181430Sstascpuctl_do_msr(int cpu, cpuctl_msr_args_t *data, u_long cmd, struct thread *td)
254181430Sstas{
255195189Sstas	uint64_t reg;
256181430Sstas	int is_bound = 0;
257181430Sstas	int oldcpu;
258181430Sstas	int ret;
259181430Sstas
260302372Snwhitehorn	KASSERT(cpu >= 0 && cpu <= mp_maxid,
261181430Sstas	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, cpu));
262181430Sstas
263181430Sstas	/*
264181430Sstas	 * Explicitly clear cpuid data to avoid returning stale
265181430Sstas	 * info
266181430Sstas	 */
267181430Sstas	DPRINTF("[cpuctl,%d]: operating on MSR %#0x for %d cpu\n", __LINE__,
268181430Sstas	    data->msr, cpu);
269301962Skib#ifdef __i386__
270301962Skib	if ((cpu_feature & CPUID_MSR) == 0)
271301962Skib		return (ENODEV);
272301962Skib#endif
273181430Sstas	oldcpu = td->td_oncpu;
274181430Sstas	is_bound = cpu_sched_is_bound(td);
275181430Sstas	set_cpu(cpu, td);
276195081Sstas	if (cmd == CPUCTL_RDMSR) {
277195081Sstas		data->data = 0;
278195081Sstas		ret = rdmsr_safe(data->msr, &data->data);
279195189Sstas	} else if (cmd == CPUCTL_WRMSR) {
280195081Sstas		ret = wrmsr_safe(data->msr, data->data);
281195189Sstas	} else if (cmd == CPUCTL_MSRSBIT) {
282195189Sstas		critical_enter();
283195189Sstas		ret = rdmsr_safe(data->msr, &reg);
284195189Sstas		if (ret == 0)
285195189Sstas			ret = wrmsr_safe(data->msr, reg | data->data);
286195189Sstas		critical_exit();
287195189Sstas	} else if (cmd == CPUCTL_MSRCBIT) {
288195189Sstas		critical_enter();
289195189Sstas		ret = rdmsr_safe(data->msr, &reg);
290195189Sstas		if (ret == 0)
291195189Sstas			ret = wrmsr_safe(data->msr, reg & ~data->data);
292195189Sstas		critical_exit();
293195189Sstas	} else
294315969Skib		panic("[cpuctl,%d]: unknown operation requested: %lu",
295315969Skib		    __LINE__, cmd);
296181430Sstas	restore_cpu(oldcpu, is_bound, td);
297181430Sstas	return (ret);
298181430Sstas}
299181430Sstas
300181430Sstas/*
301181430Sstas * Actually perform microcode update.
302181430Sstas */
303181430Sstasstatic int
304181430Sstascpuctl_do_update(int cpu, cpuctl_update_args_t *data, struct thread *td)
305181430Sstas{
306181430Sstas	cpuctl_cpuid_args_t args = {
307181430Sstas		.level = 0,
308181430Sstas	};
309181430Sstas	char vendor[13];
310181430Sstas	int ret;
311181430Sstas
312302372Snwhitehorn	KASSERT(cpu >= 0 && cpu <= mp_maxid,
313181430Sstas	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, cpu));
314181430Sstas	DPRINTF("[cpuctl,%d]: XXX %d", __LINE__, cpu);
315181430Sstas
316301962Skib	ret = cpuctl_do_cpuid(cpu, &args, td);
317301962Skib	if (ret != 0)
318301962Skib		return (ret);
319181430Sstas	((uint32_t *)vendor)[0] = args.data[1];
320181430Sstas	((uint32_t *)vendor)[1] = args.data[3];
321181430Sstas	((uint32_t *)vendor)[2] = args.data[2];
322181430Sstas	vendor[12] = '\0';
323181430Sstas	if (strncmp(vendor, INTEL_VENDOR_ID, sizeof(INTEL_VENDOR_ID)) == 0)
324181430Sstas		ret = update_intel(cpu, data, td);
325228436Sfabient	else if(strncmp(vendor, AMD_VENDOR_ID, sizeof(AMD_VENDOR_ID)) == 0)
326181430Sstas		ret = update_amd(cpu, data, td);
327315969Skib	else if(strncmp(vendor, CENTAUR_VENDOR_ID, sizeof(CENTAUR_VENDOR_ID))
328315969Skib	    == 0)
329228436Sfabient		ret = update_via(cpu, data, td);
330181430Sstas	else
331181430Sstas		ret = ENXIO;
332181430Sstas	return (ret);
333181430Sstas}
334181430Sstas
335347702Skibstruct ucode_update_data {
336347702Skib	void *ptr;
337347702Skib	int cpu;
338347702Skib	int ret;
339347702Skib};
340347702Skib
341347702Skibstatic void
342347702Skibucode_intel_load_rv(void *arg)
343347702Skib{
344347702Skib	struct ucode_update_data *d;
345347702Skib
346347702Skib	d = arg;
347347702Skib	if (PCPU_GET(cpuid) == d->cpu)
348347702Skib		d->ret = ucode_intel_load(d->ptr, true, NULL, NULL);
349347702Skib}
350347702Skib
351181430Sstasstatic int
352181430Sstasupdate_intel(int cpu, cpuctl_update_args_t *args, struct thread *td)
353181430Sstas{
354347702Skib	struct ucode_update_data d;
355255439Skib	void *ptr;
356347700Smarkj	int is_bound, oldcpu, ret;
357181430Sstas
358181430Sstas	if (args->size == 0 || args->data == NULL) {
359181430Sstas		DPRINTF("[cpuctl,%d]: zero-sized firmware image", __LINE__);
360181430Sstas		return (EINVAL);
361181430Sstas	}
362181430Sstas	if (args->size > UCODE_SIZE_MAX) {
363181430Sstas		DPRINTF("[cpuctl,%d]: firmware image too large", __LINE__);
364181430Sstas		return (EINVAL);
365181430Sstas	}
366181430Sstas
367181430Sstas	/*
368255439Skib	 * 16 byte alignment required.  Rely on the fact that
369255439Skib	 * malloc(9) always returns the pointer aligned at least on
370255439Skib	 * the size of the allocation.
371181430Sstas	 */
372181430Sstas	ptr = malloc(args->size + 16, M_CPUCTL, M_WAITOK);
373181430Sstas	if (copyin(args->data, ptr, args->size) != 0) {
374181430Sstas		DPRINTF("[cpuctl,%d]: copyin %p->%p of %zd bytes failed",
375181430Sstas		    __LINE__, args->data, ptr, args->size);
376181430Sstas		ret = EFAULT;
377347700Smarkj		goto out;
378181430Sstas	}
379181430Sstas	oldcpu = td->td_oncpu;
380181430Sstas	is_bound = cpu_sched_is_bound(td);
381181430Sstas	set_cpu(cpu, td);
382347702Skib	d.ptr = ptr;
383347702Skib	d.cpu = cpu;
384347702Skib	smp_rendezvous(NULL, ucode_intel_load_rv, NULL, &d);
385347700Smarkj	restore_cpu(oldcpu, is_bound, td);
386347702Skib	ret = d.ret;
387347700Smarkj
388181430Sstas	/*
389347700Smarkj	 * Replace any existing update.  This ensures that the new update
390347700Smarkj	 * will be reloaded automatically during ACPI resume.
391181430Sstas	 */
392347700Smarkj	if (ret == 0)
393347700Smarkj		ptr = ucode_update(ptr);
394181430Sstas
395347700Smarkjout:
396254191Skib	free(ptr, M_CPUCTL);
397181430Sstas	return (ret);
398181430Sstas}
399181430Sstas
400308760Savg/*
401308760Savg * NB: MSR 0xc0010020, MSR_K8_UCODE_UPDATE, is not documented by AMD.
402308760Savg * Coreboot, illumos and Linux source code was used to understand
403308760Savg * its workings.
404308760Savg */
405308760Savgstatic void
406308760Savgamd_ucode_wrmsr(void *ucode_ptr)
407308760Savg{
408308760Savg	uint32_t tmp[4];
409308760Savg
410308760Savg	wrmsr_safe(MSR_K8_UCODE_UPDATE, (uintptr_t)ucode_ptr);
411308760Savg	do_cpuid(0, tmp);
412308760Savg}
413308760Savg
414181430Sstasstatic int
415181430Sstasupdate_amd(int cpu, cpuctl_update_args_t *args, struct thread *td)
416181430Sstas{
417308760Savg	void *ptr;
418181430Sstas	int ret;
419181430Sstas
420181430Sstas	if (args->size == 0 || args->data == NULL) {
421181430Sstas		DPRINTF("[cpuctl,%d]: zero-sized firmware image", __LINE__);
422181430Sstas		return (EINVAL);
423181430Sstas	}
424181430Sstas	if (args->size > UCODE_SIZE_MAX) {
425181430Sstas		DPRINTF("[cpuctl,%d]: firmware image too large", __LINE__);
426181430Sstas		return (EINVAL);
427181430Sstas	}
428308760Savg
429181430Sstas	/*
430308760Savg	 * 16 byte alignment required.  Rely on the fact that
431308760Savg	 * malloc(9) always returns the pointer aligned at least on
432308760Savg	 * the size of the allocation.
433181430Sstas	 */
434308760Savg	ptr = malloc(args->size + 16, M_CPUCTL, M_ZERO | M_WAITOK);
435181430Sstas	if (copyin(args->data, ptr, args->size) != 0) {
436181430Sstas		DPRINTF("[cpuctl,%d]: copyin %p->%p of %zd bytes failed",
437181430Sstas		    __LINE__, args->data, ptr, args->size);
438181430Sstas		ret = EFAULT;
439181430Sstas		goto fail;
440181430Sstas	}
441308760Savg	smp_rendezvous(NULL, amd_ucode_wrmsr, NULL, ptr);
442181430Sstas	ret = 0;
443181430Sstasfail:
444308760Savg	free(ptr, M_CPUCTL);
445181430Sstas	return (ret);
446181430Sstas}
447181430Sstas
448228436Sfabientstatic int
449228436Sfabientupdate_via(int cpu, cpuctl_update_args_t *args, struct thread *td)
450228436Sfabient{
451255439Skib	void *ptr;
452228436Sfabient	uint64_t rev0, rev1, res;
453228436Sfabient	uint32_t tmp[4];
454255439Skib	int is_bound;
455228436Sfabient	int oldcpu;
456228436Sfabient	int ret;
457228436Sfabient
458228436Sfabient	if (args->size == 0 || args->data == NULL) {
459228436Sfabient		DPRINTF("[cpuctl,%d]: zero-sized firmware image", __LINE__);
460228436Sfabient		return (EINVAL);
461228436Sfabient	}
462228436Sfabient	if (args->size > UCODE_SIZE_MAX) {
463228436Sfabient		DPRINTF("[cpuctl,%d]: firmware image too large", __LINE__);
464228436Sfabient		return (EINVAL);
465228436Sfabient	}
466228436Sfabient
467228436Sfabient	/*
468228436Sfabient	 * 4 byte alignment required.
469228436Sfabient	 */
470255439Skib	ptr = malloc(args->size, M_CPUCTL, M_WAITOK);
471228436Sfabient	if (copyin(args->data, ptr, args->size) != 0) {
472228436Sfabient		DPRINTF("[cpuctl,%d]: copyin %p->%p of %zd bytes failed",
473228436Sfabient		    __LINE__, args->data, ptr, args->size);
474228436Sfabient		ret = EFAULT;
475228436Sfabient		goto fail;
476228436Sfabient	}
477228436Sfabient	oldcpu = td->td_oncpu;
478228436Sfabient	is_bound = cpu_sched_is_bound(td);
479228436Sfabient	set_cpu(cpu, td);
480228436Sfabient	critical_enter();
481252592Srpaulo	rdmsr_safe(MSR_BIOS_SIGN, &rev0); /* Get current microcode revision. */
482228436Sfabient
483228436Sfabient	/*
484228436Sfabient	 * Perform update.
485228436Sfabient	 */
486228436Sfabient	wrmsr_safe(MSR_BIOS_UPDT_TRIG, (uintptr_t)(ptr));
487228436Sfabient	do_cpuid(1, tmp);
488228436Sfabient
489228436Sfabient	/*
490228436Sfabient	 * Result are in low byte of MSR FCR5:
491228436Sfabient	 * 0x00: No update has been attempted since RESET.
492228436Sfabient	 * 0x01: The last attempted update was successful.
493228436Sfabient	 * 0x02: The last attempted update was unsuccessful due to a bad
494228436Sfabient	 *       environment. No update was loaded and any preexisting
495228436Sfabient	 *       patches are still active.
496228436Sfabient	 * 0x03: The last attempted update was not applicable to this processor.
497228436Sfabient	 *       No update was loaded and any preexisting patches are still
498228436Sfabient	 *       active.
499228436Sfabient	 * 0x04: The last attempted update was not successful due to an invalid
500228436Sfabient	 *       update data block. No update was loaded and any preexisting
501228436Sfabient	 *       patches are still active
502228436Sfabient	 */
503228436Sfabient	rdmsr_safe(0x1205, &res);
504228436Sfabient	res &= 0xff;
505228436Sfabient	critical_exit();
506228436Sfabient	rdmsr_safe(MSR_BIOS_SIGN, &rev1); /* Get new microcode revision. */
507228436Sfabient	restore_cpu(oldcpu, is_bound, td);
508228436Sfabient
509228436Sfabient	DPRINTF("[cpu,%d]: rev0=%x rev1=%x res=%x\n", __LINE__,
510228436Sfabient	    (unsigned)(rev0 >> 32), (unsigned)(rev1 >> 32), (unsigned)res);
511228436Sfabient
512228436Sfabient	if (res != 0x01)
513228436Sfabient		ret = EINVAL;
514228436Sfabient	else
515228436Sfabient		ret = 0;
516228436Sfabientfail:
517254191Skib	free(ptr, M_CPUCTL);
518228436Sfabient	return (ret);
519228436Sfabient}
520228436Sfabient
521327871Skibstatic int
522327871Skibcpuctl_do_eval_cpu_features(int cpu, struct thread *td)
523327871Skib{
524327871Skib	int is_bound = 0;
525327871Skib	int oldcpu;
526327871Skib
527327871Skib	KASSERT(cpu >= 0 && cpu <= mp_maxid,
528327871Skib	    ("[cpuctl,%d]: bad cpu number %d", __LINE__, cpu));
529327871Skib
530327871Skib#ifdef __i386__
531327871Skib	if (cpu_id == 0)
532327871Skib		return (ENODEV);
533327871Skib#endif
534327871Skib	oldcpu = td->td_oncpu;
535327871Skib	is_bound = cpu_sched_is_bound(td);
536327871Skib	set_cpu(cpu, td);
537327871Skib	identify_cpu1();
538327871Skib	identify_cpu2();
539327871Skib	restore_cpu(oldcpu, is_bound, td);
540358582Skib	hw_ibrs_recalculate(true);
541334152Skib	hw_ssb_recalculate(true);
542354651Skib#ifdef __amd64__
543354651Skib	pmap_allow_2m_x_ept_recalculate();
544354651Skib#endif
545347568Skib	hw_mds_recalculate();
546354764Sscottl	x86_taa_recalculate();
547362383Skib	x86_rngds_mitg_recalculate(true);
548328213Skib	printcpuinfo();
549327871Skib	return (0);
550327871Skib}
551327871Skib
552327871Skib
553181430Sstasint
554181430Sstascpuctl_open(struct cdev *dev, int flags, int fmt __unused, struct thread *td)
555181430Sstas{
556181430Sstas	int ret = 0;
557181430Sstas	int cpu;
558181430Sstas
559183397Sed	cpu = dev2unit(dev);
560302372Snwhitehorn	if (cpu > mp_maxid || !cpu_enabled(cpu)) {
561181430Sstas		DPRINTF("[cpuctl,%d]: incorrect cpu number %d\n", __LINE__,
562181430Sstas		    cpu);
563181430Sstas		return (ENXIO);
564181430Sstas	}
565181430Sstas	if (flags & FWRITE)
566181430Sstas		ret = securelevel_gt(td->td_ucred, 0);
567181430Sstas	return (ret);
568181430Sstas}
569181430Sstas
570181430Sstasstatic int
571181430Sstascpuctl_modevent(module_t mod __unused, int type, void *data __unused)
572181430Sstas{
573181430Sstas	int cpu;
574181430Sstas
575181430Sstas	switch(type) {
576181430Sstas	case MOD_LOAD:
577181430Sstas		if (bootverbose)
578181430Sstas			printf("cpuctl: access to MSR registers/cpuid info.\n");
579302372Snwhitehorn		cpuctl_devs = malloc(sizeof(*cpuctl_devs) * (mp_maxid + 1), M_CPUCTL,
580263080Skib		    M_WAITOK | M_ZERO);
581302372Snwhitehorn		CPU_FOREACH(cpu)
582181430Sstas			if (cpu_enabled(cpu))
583181430Sstas				cpuctl_devs[cpu] = make_dev(&cpuctl_cdevsw, cpu,
584181430Sstas				    UID_ROOT, GID_KMEM, 0640, "cpuctl%d", cpu);
585181430Sstas		break;
586181430Sstas	case MOD_UNLOAD:
587302372Snwhitehorn		CPU_FOREACH(cpu) {
588181430Sstas			if (cpuctl_devs[cpu] != NULL)
589181430Sstas				destroy_dev(cpuctl_devs[cpu]);
590181430Sstas		}
591181430Sstas		free(cpuctl_devs, M_CPUCTL);
592181430Sstas		break;
593181430Sstas	case MOD_SHUTDOWN:
594181430Sstas		break;
595181430Sstas	default:
596181430Sstas		return (EOPNOTSUPP);
597181430Sstas        }
598181430Sstas	return (0);
599181430Sstas}
600181430Sstas
601181430SstasDEV_MODULE(cpuctl, cpuctl_modevent, NULL);
602181430SstasMODULE_VERSION(cpuctl, CPUCTL_VERSION);
603