1190501Smr/*-
2190501Smr * Copyright (c) 2005 Nate Lawson
3190501Smr * Copyright (c) 2004 Colin Percival
4190501Smr * Copyright (c) 2004-2005 Bruno Durcot
5190501Smr * Copyright (c) 2004 FUKUDA Nobuhiko
6190501Smr * Copyright (c) 2009 Michael Reifenberger
7190501Smr * Copyright (c) 2009 Norikatsu Shigemura
8190501Smr * Copyright (c) 2008-2009 Gen Otsuji
9190501Smr *
10190501Smr * This code is depending on kern_cpu.c, est.c, powernow.c, p4tcc.c, smist.c
11190521Smr * in various parts. The authors of these files are Nate Lawson,
12190501Smr * Colin Percival, Bruno Durcot, and FUKUDA Nobuhiko.
13190501Smr * This code contains patches by Michael Reifenberger and Norikatsu Shigemura.
14190501Smr * Thank you.
15190501Smr *
16190501Smr * Redistribution and use in source and binary forms, with or without
17190501Smr * modification, are permitted providing that the following conditions
18190501Smr * are met:
19190501Smr * 1. Redistributions of source code must retain the above copyright
20190501Smr *    notice, this list of conditions and the following disclaimer.
21190501Smr * 2. Redistributions in binary form must reproduce the above copyright
22190501Smr *    notice, this list of conditions and the following disclaimer in the
23190501Smr *    documentation and/or other materials provided with the distribution.
24190501Smr *
25190501Smr * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR
26190501Smr * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
27190501Smr * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28190501Smr * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
29190501Smr * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30190501Smr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31190501Smr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32190501Smr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
33190501Smr * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
34190501Smr * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35190501Smr * POSSIBILITY OF SUCH DAMAGE.
36190501Smr */
37190501Smr
38190501Smr/*
39190501Smr * For more info:
40190501Smr * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 10h Processors
41190501Smr * 31116 Rev 3.20  February 04, 2009
42190501Smr * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 11h Processors
43190501Smr * 41256 Rev 3.00 - July 07, 2008
44190501Smr */
45190501Smr
46190501Smr#include <sys/cdefs.h>
47190501Smr__FBSDID("$FreeBSD: releng/10.3/sys/x86/cpufreq/hwpstate.c 258994 2013-12-05 17:57:51Z sbruno $");
48190501Smr
49190501Smr#include <sys/param.h>
50190501Smr#include <sys/bus.h>
51190501Smr#include <sys/cpu.h>
52190501Smr#include <sys/kernel.h>
53190501Smr#include <sys/module.h>
54190501Smr#include <sys/malloc.h>
55190501Smr#include <sys/proc.h>
56190501Smr#include <sys/pcpu.h>
57190501Smr#include <sys/smp.h>
58190501Smr#include <sys/sched.h>
59190501Smr
60190501Smr#include <machine/md_var.h>
61190501Smr#include <machine/cputypes.h>
62190501Smr#include <machine/specialreg.h>
63190501Smr
64193530Sjkim#include <contrib/dev/acpica/include/acpi.h>
65193530Sjkim
66190501Smr#include <dev/acpica/acpivar.h>
67190501Smr
68190501Smr#include "acpi_if.h"
69190501Smr#include "cpufreq_if.h"
70190501Smr
71190501Smr#define	MSR_AMD_10H_11H_LIMIT	0xc0010061
72190501Smr#define	MSR_AMD_10H_11H_CONTROL	0xc0010062
73190501Smr#define	MSR_AMD_10H_11H_STATUS	0xc0010063
74190501Smr#define	MSR_AMD_10H_11H_CONFIG	0xc0010064
75190501Smr
76190501Smr#define	AMD_10H_11H_MAX_STATES	16
77190501Smr
78190501Smr/* for MSR_AMD_10H_11H_LIMIT C001_0061 */
79190501Smr#define	AMD_10H_11H_GET_PSTATE_MAX_VAL(msr)	(((msr) >> 4) & 0x7)
80190501Smr#define	AMD_10H_11H_GET_PSTATE_LIMIT(msr)	(((msr)) & 0x7)
81190501Smr/* for MSR_AMD_10H_11H_CONFIG 10h:C001_0064:68 / 11h:C001_0064:6B */
82190501Smr#define	AMD_10H_11H_CUR_VID(msr)		(((msr) >> 9) & 0x7F)
83190501Smr#define	AMD_10H_11H_CUR_DID(msr)		(((msr) >> 6) & 0x07)
84190501Smr#define	AMD_10H_11H_CUR_FID(msr)		((msr) & 0x3F)
85190501Smr
86190501Smr#define	HWPSTATE_DEBUG(dev, msg...)			\
87190501Smr	do{						\
88190501Smr		if(hwpstate_verbose)			\
89190501Smr			device_printf(dev, msg);	\
90190501Smr	}while(0)
91190501Smr
92190501Smrstruct hwpstate_setting {
93190501Smr	int	freq;		/* CPU clock in Mhz or 100ths of a percent. */
94190501Smr	int	volts;		/* Voltage in mV. */
95190501Smr	int	power;		/* Power consumed in mW. */
96190501Smr	int	lat;		/* Transition latency in us. */
97190501Smr	int	pstate_id;	/* P-State id */
98190501Smr};
99190501Smr
100190501Smrstruct hwpstate_softc {
101190501Smr	device_t		dev;
102190501Smr	struct hwpstate_setting	hwpstate_settings[AMD_10H_11H_MAX_STATES];
103190501Smr	int			cfnum;
104190501Smr};
105190501Smr
106190501Smrstatic void	hwpstate_identify(driver_t *driver, device_t parent);
107190501Smrstatic int	hwpstate_probe(device_t dev);
108190501Smrstatic int	hwpstate_attach(device_t dev);
109190501Smrstatic int	hwpstate_detach(device_t dev);
110190501Smrstatic int	hwpstate_set(device_t dev, const struct cf_setting *cf);
111190501Smrstatic int	hwpstate_get(device_t dev, struct cf_setting *cf);
112190501Smrstatic int	hwpstate_settings(device_t dev, struct cf_setting *sets, int *count);
113190501Smrstatic int	hwpstate_type(device_t dev, int *type);
114190501Smrstatic int	hwpstate_shutdown(device_t dev);
115190501Smrstatic int	hwpstate_features(driver_t *driver, u_int *features);
116190501Smrstatic int	hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev);
117190501Smrstatic int	hwpstate_get_info_from_msr(device_t dev);
118190501Smrstatic int	hwpstate_goto_pstate(device_t dev, int pstate_id);
119190501Smr
120190501Smrstatic int	hwpstate_verbose = 0;
121215131SavgSYSCTL_INT(_debug, OID_AUTO, hwpstate_verbose, CTLFLAG_RW | CTLFLAG_TUN,
122190501Smr       &hwpstate_verbose, 0, "Debug hwpstate");
123215131SavgTUNABLE_INT("debug.hwpstate_verbose", &hwpstate_verbose);
124190501Smr
125190501Smrstatic device_method_t hwpstate_methods[] = {
126190501Smr	/* Device interface */
127190501Smr	DEVMETHOD(device_identify,	hwpstate_identify),
128190501Smr	DEVMETHOD(device_probe,		hwpstate_probe),
129190501Smr	DEVMETHOD(device_attach,	hwpstate_attach),
130190501Smr	DEVMETHOD(device_detach,	hwpstate_detach),
131190501Smr	DEVMETHOD(device_shutdown,	hwpstate_shutdown),
132190501Smr
133190501Smr	/* cpufreq interface */
134190501Smr	DEVMETHOD(cpufreq_drv_set,	hwpstate_set),
135190501Smr	DEVMETHOD(cpufreq_drv_get,	hwpstate_get),
136190501Smr	DEVMETHOD(cpufreq_drv_settings,	hwpstate_settings),
137190501Smr	DEVMETHOD(cpufreq_drv_type,	hwpstate_type),
138190501Smr
139190501Smr	/* ACPI interface */
140190501Smr	DEVMETHOD(acpi_get_features,	hwpstate_features),
141190501Smr
142190501Smr	{0, 0}
143190501Smr};
144190501Smr
145190501Smrstatic devclass_t hwpstate_devclass;
146190501Smrstatic driver_t hwpstate_driver = {
147190501Smr	"hwpstate",
148190501Smr	hwpstate_methods,
149190501Smr	sizeof(struct hwpstate_softc),
150190501Smr};
151190501Smr
152190501SmrDRIVER_MODULE(hwpstate, cpu, hwpstate_driver, hwpstate_devclass, 0, 0);
153190501Smr
154190501Smr/*
155190501Smr * Go to Px-state on all cpus considering the limit.
156190501Smr */
157190501Smrstatic int
158190501Smrhwpstate_goto_pstate(device_t dev, int pstate)
159190501Smr{
160190501Smr	int i;
161190501Smr	uint64_t msr;
162190501Smr	int j;
163190501Smr	int limit;
164190501Smr	int id = pstate;
165190501Smr	int error;
166190501Smr
167190501Smr	/* get the current pstate limit */
168190501Smr	msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
169190501Smr	limit = AMD_10H_11H_GET_PSTATE_LIMIT(msr);
170190501Smr	if(limit > id)
171190501Smr		id = limit;
172190501Smr
173190501Smr	/*
174190501Smr	 * We are going to the same Px-state on all cpus.
175215398Savg	 * Probably should take _PSD into account.
176190501Smr	 */
177215398Savg	error = 0;
178215398Savg	CPU_FOREACH(i) {
179215398Savg		/* Bind to each cpu. */
180190501Smr		thread_lock(curthread);
181215398Savg		sched_bind(curthread, i);
182190501Smr		thread_unlock(curthread);
183190501Smr		HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n",
184190501Smr			id, PCPU_GET(cpuid));
185190501Smr		/* Go To Px-state */
186190501Smr		wrmsr(MSR_AMD_10H_11H_CONTROL, id);
187258994Ssbruno	}
188258994Ssbruno	CPU_FOREACH(i) {
189258994Ssbruno		/* Bind to each cpu. */
190258994Ssbruno		thread_lock(curthread);
191258994Ssbruno		sched_bind(curthread, i);
192258994Ssbruno		thread_unlock(curthread);
193190501Smr		/* wait loop (100*100 usec is enough ?) */
194190501Smr		for(j = 0; j < 100; j++){
195258994Ssbruno			/* get the result. not assure msr=id */
196190501Smr			msr = rdmsr(MSR_AMD_10H_11H_STATUS);
197190501Smr			if(msr == id){
198190501Smr				break;
199190501Smr			}
200190501Smr			DELAY(100);
201190501Smr		}
202190501Smr		HWPSTATE_DEBUG(dev, "result  P%d-state on cpu%d\n",
203190501Smr		    (int)msr, PCPU_GET(cpuid));
204190501Smr		if (msr != id) {
205190501Smr			HWPSTATE_DEBUG(dev, "error: loop is not enough.\n");
206190501Smr			error = ENXIO;
207190501Smr		}
208190501Smr	}
209215398Savg	thread_lock(curthread);
210215398Savg	sched_unbind(curthread);
211215398Savg	thread_unlock(curthread);
212190501Smr	return (error);
213190501Smr}
214190501Smr
215190501Smrstatic int
216190501Smrhwpstate_set(device_t dev, const struct cf_setting *cf)
217190501Smr{
218190501Smr	struct hwpstate_softc *sc;
219190501Smr	struct hwpstate_setting *set;
220190501Smr	int i;
221190501Smr
222190501Smr	if (cf == NULL)
223190501Smr		return (EINVAL);
224190501Smr	sc = device_get_softc(dev);
225190501Smr	set = sc->hwpstate_settings;
226190501Smr	for (i = 0; i < sc->cfnum; i++)
227190501Smr		if (CPUFREQ_CMP(cf->freq, set[i].freq))
228190501Smr			break;
229190501Smr	if (i == sc->cfnum)
230190501Smr		return (EINVAL);
231190501Smr
232190501Smr	return (hwpstate_goto_pstate(dev, set[i].pstate_id));
233190501Smr}
234190501Smr
235190501Smrstatic int
236190501Smrhwpstate_get(device_t dev, struct cf_setting *cf)
237190501Smr{
238190501Smr	struct hwpstate_softc *sc;
239190501Smr	struct hwpstate_setting set;
240190501Smr	uint64_t msr;
241190501Smr
242190501Smr	sc = device_get_softc(dev);
243190501Smr	if (cf == NULL)
244190501Smr		return (EINVAL);
245190501Smr	msr = rdmsr(MSR_AMD_10H_11H_STATUS);
246190501Smr	if(msr >= sc->cfnum)
247190501Smr		return (EINVAL);
248190501Smr	set = sc->hwpstate_settings[msr];
249190501Smr
250190501Smr	cf->freq = set.freq;
251190501Smr	cf->volts = set.volts;
252190501Smr	cf->power = set.power;
253190501Smr	cf->lat = set.lat;
254190501Smr	cf->dev = dev;
255190501Smr	return (0);
256190501Smr}
257190501Smr
258190501Smrstatic int
259190501Smrhwpstate_settings(device_t dev, struct cf_setting *sets, int *count)
260190501Smr{
261190501Smr	struct hwpstate_softc *sc;
262190501Smr	struct hwpstate_setting set;
263190501Smr	int i;
264190501Smr
265190501Smr	if (sets == NULL || count == NULL)
266190501Smr		return (EINVAL);
267190501Smr	sc = device_get_softc(dev);
268190501Smr	if (*count < sc->cfnum)
269190501Smr		return (E2BIG);
270190501Smr	for (i = 0; i < sc->cfnum; i++, sets++) {
271190501Smr		set = sc->hwpstate_settings[i];
272190501Smr		sets->freq = set.freq;
273190501Smr		sets->volts = set.volts;
274190501Smr		sets->power = set.power;
275190501Smr		sets->lat = set.lat;
276190501Smr		sets->dev = dev;
277190501Smr	}
278190501Smr	*count = sc->cfnum;
279190501Smr
280190501Smr	return (0);
281190501Smr}
282190501Smr
283190501Smrstatic int
284190501Smrhwpstate_type(device_t dev, int *type)
285190501Smr{
286190501Smr
287190501Smr	if (type == NULL)
288190501Smr		return (EINVAL);
289190501Smr
290190501Smr	*type = CPUFREQ_TYPE_ABSOLUTE;
291190501Smr	return (0);
292190501Smr}
293190501Smr
294190501Smrstatic void
295190501Smrhwpstate_identify(driver_t *driver, device_t parent)
296190501Smr{
297190501Smr
298190501Smr	if (device_find_child(parent, "hwpstate", -1) != NULL)
299190501Smr		return;
300190501Smr
301197070Sjkim	if (cpu_vendor_id != CPU_VENDOR_AMD || CPUID_TO_FAMILY(cpu_id) < 0x10)
302190501Smr		return;
303190501Smr
304190501Smr	/*
305190501Smr	 * Check if hardware pstate enable bit is set.
306190501Smr	 */
307190501Smr	if ((amd_pminfo & AMDPM_HW_PSTATE) == 0) {
308190501Smr		HWPSTATE_DEBUG(parent, "hwpstate enable bit is not set.\n");
309190501Smr		return;
310190501Smr	}
311190501Smr
312190501Smr	if (resource_disabled("hwpstate", 0))
313190501Smr		return;
314190501Smr
315192029Sbrueffer	if (BUS_ADD_CHILD(parent, 10, "hwpstate", -1) == NULL)
316190501Smr		device_printf(parent, "hwpstate: add child failed\n");
317190501Smr}
318190501Smr
319190501Smrstatic int
320190501Smrhwpstate_probe(device_t dev)
321190501Smr{
322190501Smr	struct hwpstate_softc *sc;
323190501Smr	device_t perf_dev;
324190501Smr	uint64_t msr;
325190501Smr	int error, type;
326190501Smr
327190501Smr	/*
328190501Smr	 * Only hwpstate0.
329190501Smr	 * It goes well with acpi_throttle.
330190501Smr	 */
331190501Smr	if (device_get_unit(dev) != 0)
332190501Smr		return (ENXIO);
333190501Smr
334190501Smr	sc = device_get_softc(dev);
335190501Smr	sc->dev = dev;
336190501Smr
337190501Smr	/*
338190501Smr	 * Check if acpi_perf has INFO only flag.
339190501Smr	 */
340190501Smr	perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1);
341190501Smr	error = TRUE;
342190501Smr	if (perf_dev && device_is_attached(perf_dev)) {
343190501Smr		error = CPUFREQ_DRV_TYPE(perf_dev, &type);
344190501Smr		if (error == 0) {
345190501Smr			if ((type & CPUFREQ_FLAG_INFO_ONLY) == 0) {
346190501Smr				/*
347190501Smr				 * If acpi_perf doesn't have INFO_ONLY flag,
348190501Smr				 * it will take care of pstate transitions.
349190501Smr				 */
350190501Smr				HWPSTATE_DEBUG(dev, "acpi_perf will take care of pstate transitions.\n");
351190501Smr				return (ENXIO);
352190501Smr			} else {
353190501Smr				/*
354190501Smr				 * If acpi_perf has INFO_ONLY flag, (_PCT has FFixedHW)
355190501Smr				 * we can get _PSS info from acpi_perf
356190501Smr				 * without going into ACPI.
357190501Smr				 */
358190501Smr				HWPSTATE_DEBUG(dev, "going to fetch info from acpi_perf\n");
359190501Smr				error = hwpstate_get_info_from_acpi_perf(dev, perf_dev);
360190501Smr			}
361190501Smr		}
362190501Smr	}
363190501Smr
364190501Smr	if (error == 0) {
365190501Smr		/*
366190501Smr		 * Now we get _PSS info from acpi_perf without error.
367190501Smr		 * Let's check it.
368190501Smr		 */
369190501Smr		msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
370190501Smr		if (sc->cfnum != 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr)) {
371190501Smr			HWPSTATE_DEBUG(dev, "msr and acpi _PSS count mismatch.\n");
372190501Smr			error = TRUE;
373190501Smr		}
374190501Smr	}
375190501Smr
376190501Smr	/*
377190501Smr	 * If we cannot get info from acpi_perf,
378190501Smr	 * Let's get info from MSRs.
379190501Smr	 */
380190501Smr	if (error)
381190501Smr		error = hwpstate_get_info_from_msr(dev);
382190501Smr	if (error)
383190501Smr		return (error);
384190501Smr
385190501Smr	device_set_desc(dev, "Cool`n'Quiet 2.0");
386190501Smr	return (0);
387190501Smr}
388190501Smr
389190501Smrstatic int
390190501Smrhwpstate_attach(device_t dev)
391190501Smr{
392190501Smr
393190501Smr	return (cpufreq_register(dev));
394190501Smr}
395190501Smr
396190501Smrstatic int
397190501Smrhwpstate_get_info_from_msr(device_t dev)
398190501Smr{
399190501Smr	struct hwpstate_softc *sc;
400190501Smr	struct hwpstate_setting *hwpstate_set;
401190501Smr	uint64_t msr;
402190501Smr	int family, i, fid, did;
403190501Smr
404197070Sjkim	family = CPUID_TO_FAMILY(cpu_id);
405190501Smr	sc = device_get_softc(dev);
406190501Smr	/* Get pstate count */
407190501Smr	msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
408190501Smr	sc->cfnum = 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr);
409190501Smr	hwpstate_set = sc->hwpstate_settings;
410190501Smr	for (i = 0; i < sc->cfnum; i++) {
411190501Smr		msr = rdmsr(MSR_AMD_10H_11H_CONFIG + i);
412190501Smr		if ((msr & ((uint64_t)1 << 63)) != ((uint64_t)1 << 63)) {
413190501Smr			HWPSTATE_DEBUG(dev, "msr is not valid.\n");
414190501Smr			return (ENXIO);
415190501Smr		}
416190501Smr		did = AMD_10H_11H_CUR_DID(msr);
417190501Smr		fid = AMD_10H_11H_CUR_FID(msr);
418190501Smr		switch(family) {
419190501Smr		case 0x11:
420190501Smr			/* fid/did to frequency */
421190501Smr			hwpstate_set[i].freq = 100 * (fid + 0x08) / (1 << did);
422190501Smr			break;
423190501Smr		case 0x10:
424190501Smr			/* fid/did to frequency */
425190501Smr			hwpstate_set[i].freq = 100 * (fid + 0x10) / (1 << did);
426190501Smr			break;
427190501Smr		default:
428190501Smr			HWPSTATE_DEBUG(dev, "get_info_from_msr: AMD family %d CPU's are not implemented yet. sorry.\n", family);
429190501Smr			return (ENXIO);
430190501Smr			break;
431190501Smr		}
432190501Smr		hwpstate_set[i].pstate_id = i;
433190501Smr		/* There was volts calculation, but deleted it. */
434190501Smr		hwpstate_set[i].volts = CPUFREQ_VAL_UNKNOWN;
435190501Smr		hwpstate_set[i].power = CPUFREQ_VAL_UNKNOWN;
436190501Smr		hwpstate_set[i].lat = CPUFREQ_VAL_UNKNOWN;
437190501Smr	}
438190501Smr	return (0);
439190501Smr}
440190501Smr
441190501Smrstatic int
442190501Smrhwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev)
443190501Smr{
444190501Smr	struct hwpstate_softc *sc;
445190501Smr	struct cf_setting *perf_set;
446190501Smr	struct hwpstate_setting *hwpstate_set;
447190501Smr	int count, error, i;
448190501Smr
449190501Smr	perf_set = malloc(MAX_SETTINGS * sizeof(*perf_set), M_TEMP, M_NOWAIT);
450190501Smr	if (perf_set == NULL) {
451190501Smr		HWPSTATE_DEBUG(dev, "nomem\n");
452190501Smr		return (ENOMEM);
453190501Smr	}
454190501Smr	/*
455190501Smr	 * Fetch settings from acpi_perf.
456190501Smr	 * Now it is attached, and has info only flag.
457190501Smr	 */
458190501Smr	count = MAX_SETTINGS;
459190501Smr	error = CPUFREQ_DRV_SETTINGS(perf_dev, perf_set, &count);
460190501Smr	if (error) {
461190501Smr		HWPSTATE_DEBUG(dev, "error: CPUFREQ_DRV_SETTINGS.\n");
462190501Smr		goto out;
463190501Smr	}
464190501Smr	sc = device_get_softc(dev);
465190501Smr	sc->cfnum = count;
466190501Smr	hwpstate_set = sc->hwpstate_settings;
467190501Smr	for (i = 0; i < count; i++) {
468190501Smr		if (i == perf_set[i].spec[0]) {
469190501Smr			hwpstate_set[i].pstate_id = i;
470190501Smr			hwpstate_set[i].freq = perf_set[i].freq;
471190501Smr			hwpstate_set[i].volts = perf_set[i].volts;
472190501Smr			hwpstate_set[i].power = perf_set[i].power;
473190501Smr			hwpstate_set[i].lat = perf_set[i].lat;
474190501Smr		} else {
475190501Smr			HWPSTATE_DEBUG(dev, "ACPI _PSS object mismatch.\n");
476190501Smr			error = ENXIO;
477190501Smr			goto out;
478190501Smr		}
479190501Smr	}
480190501Smrout:
481190501Smr	if (perf_set)
482190501Smr		free(perf_set, M_TEMP);
483190501Smr	return (error);
484190501Smr}
485190501Smr
486190501Smrstatic int
487190501Smrhwpstate_detach(device_t dev)
488190501Smr{
489190501Smr
490190501Smr	hwpstate_goto_pstate(dev, 0);
491190501Smr	return (cpufreq_unregister(dev));
492190501Smr}
493190501Smr
494190501Smrstatic int
495190501Smrhwpstate_shutdown(device_t dev)
496190501Smr{
497190501Smr
498190501Smr	/* hwpstate_goto_pstate(dev, 0); */
499190501Smr	return (0);
500190501Smr}
501190501Smr
502190501Smrstatic int
503190501Smrhwpstate_features(driver_t *driver, u_int *features)
504190501Smr{
505190501Smr
506190501Smr	/* Notify the ACPI CPU that we support direct access to MSRs */
507190501Smr	*features = ACPI_CAP_PERF_MSRS;
508190501Smr	return (0);
509190501Smr}
510