1190501Smr/*-
2190501Smr * Copyright (c) 2005 Nate Lawson
3190501Smr * Copyright (c) 2004 Colin Percival
4190501Smr * Copyright (c) 2004-2005 Bruno Durcot
5190501Smr * Copyright (c) 2004 FUKUDA Nobuhiko
6190501Smr * Copyright (c) 2009 Michael Reifenberger
7190501Smr * Copyright (c) 2009 Norikatsu Shigemura
8190501Smr * Copyright (c) 2008-2009 Gen Otsuji
9190501Smr *
10190501Smr * This code is depending on kern_cpu.c, est.c, powernow.c, p4tcc.c, smist.c
11190521Smr * in various parts. The authors of these files are Nate Lawson,
12190501Smr * Colin Percival, Bruno Durcot, and FUKUDA Nobuhiko.
13190501Smr * This code contains patches by Michael Reifenberger and Norikatsu Shigemura.
14190501Smr * Thank you.
15190501Smr *
16190501Smr * Redistribution and use in source and binary forms, with or without
17190501Smr * modification, are permitted providing that the following conditions
18190501Smr * are met:
19190501Smr * 1. Redistributions of source code must retain the above copyright
20190501Smr *    notice, this list of conditions and the following disclaimer.
21190501Smr * 2. Redistributions in binary form must reproduce the above copyright
22190501Smr *    notice, this list of conditions and the following disclaimer in the
23190501Smr *    documentation and/or other materials provided with the distribution.
24190501Smr *
25190501Smr * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR
26190501Smr * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
27190501Smr * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28190501Smr * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
29190501Smr * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30190501Smr * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31190501Smr * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32190501Smr * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
33190501Smr * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
34190501Smr * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35190501Smr * POSSIBILITY OF SUCH DAMAGE.
36190501Smr */
37190501Smr
38190501Smr/*
39190501Smr * For more info:
40190501Smr * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 10h Processors
41190501Smr * 31116 Rev 3.20  February 04, 2009
42190501Smr * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 11h Processors
43190501Smr * 41256 Rev 3.00 - July 07, 2008
44190501Smr */
45190501Smr
46190501Smr#include <sys/cdefs.h>
47190501Smr__FBSDID("$FreeBSD: stable/10/sys/x86/cpufreq/hwpstate.c 326638 2017-12-06 21:40:24Z jkim $");
48190501Smr
49190501Smr#include <sys/param.h>
50190501Smr#include <sys/bus.h>
51190501Smr#include <sys/cpu.h>
52190501Smr#include <sys/kernel.h>
53190501Smr#include <sys/module.h>
54190501Smr#include <sys/malloc.h>
55190501Smr#include <sys/proc.h>
56190501Smr#include <sys/pcpu.h>
57190501Smr#include <sys/smp.h>
58190501Smr#include <sys/sched.h>
59190501Smr
60190501Smr#include <machine/md_var.h>
61190501Smr#include <machine/cputypes.h>
62190501Smr#include <machine/specialreg.h>
63190501Smr
64193530Sjkim#include <contrib/dev/acpica/include/acpi.h>
65193530Sjkim
66190501Smr#include <dev/acpica/acpivar.h>
67190501Smr
68190501Smr#include "acpi_if.h"
69190501Smr#include "cpufreq_if.h"
70190501Smr
71190501Smr#define	MSR_AMD_10H_11H_LIMIT	0xc0010061
72190501Smr#define	MSR_AMD_10H_11H_CONTROL	0xc0010062
73190501Smr#define	MSR_AMD_10H_11H_STATUS	0xc0010063
74190501Smr#define	MSR_AMD_10H_11H_CONFIG	0xc0010064
75190501Smr
76190501Smr#define	AMD_10H_11H_MAX_STATES	16
77190501Smr
78190501Smr/* for MSR_AMD_10H_11H_LIMIT C001_0061 */
79190501Smr#define	AMD_10H_11H_GET_PSTATE_MAX_VAL(msr)	(((msr) >> 4) & 0x7)
80190501Smr#define	AMD_10H_11H_GET_PSTATE_LIMIT(msr)	(((msr)) & 0x7)
81190501Smr/* for MSR_AMD_10H_11H_CONFIG 10h:C001_0064:68 / 11h:C001_0064:6B */
82190501Smr#define	AMD_10H_11H_CUR_VID(msr)		(((msr) >> 9) & 0x7F)
83190501Smr#define	AMD_10H_11H_CUR_DID(msr)		(((msr) >> 6) & 0x07)
84190501Smr#define	AMD_10H_11H_CUR_FID(msr)		((msr) & 0x3F)
85190501Smr
86326638Sjkim#define	AMD_17H_CUR_VID(msr)			(((msr) >> 14) & 0xFF)
87326638Sjkim#define	AMD_17H_CUR_DID(msr)			(((msr) >> 8) & 0x3F)
88326638Sjkim#define	AMD_17H_CUR_FID(msr)			((msr) & 0xFF)
89326638Sjkim
90190501Smr#define	HWPSTATE_DEBUG(dev, msg...)			\
91326638Sjkim	do {						\
92326638Sjkim		if (hwpstate_verbose)			\
93190501Smr			device_printf(dev, msg);	\
94326638Sjkim	} while (0)
95190501Smr
96190501Smrstruct hwpstate_setting {
97190501Smr	int	freq;		/* CPU clock in Mhz or 100ths of a percent. */
98190501Smr	int	volts;		/* Voltage in mV. */
99190501Smr	int	power;		/* Power consumed in mW. */
100190501Smr	int	lat;		/* Transition latency in us. */
101190501Smr	int	pstate_id;	/* P-State id */
102190501Smr};
103190501Smr
104190501Smrstruct hwpstate_softc {
105190501Smr	device_t		dev;
106190501Smr	struct hwpstate_setting	hwpstate_settings[AMD_10H_11H_MAX_STATES];
107190501Smr	int			cfnum;
108190501Smr};
109190501Smr
110190501Smrstatic void	hwpstate_identify(driver_t *driver, device_t parent);
111190501Smrstatic int	hwpstate_probe(device_t dev);
112190501Smrstatic int	hwpstate_attach(device_t dev);
113190501Smrstatic int	hwpstate_detach(device_t dev);
114190501Smrstatic int	hwpstate_set(device_t dev, const struct cf_setting *cf);
115190501Smrstatic int	hwpstate_get(device_t dev, struct cf_setting *cf);
116190501Smrstatic int	hwpstate_settings(device_t dev, struct cf_setting *sets, int *count);
117190501Smrstatic int	hwpstate_type(device_t dev, int *type);
118190501Smrstatic int	hwpstate_shutdown(device_t dev);
119190501Smrstatic int	hwpstate_features(driver_t *driver, u_int *features);
120190501Smrstatic int	hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev);
121190501Smrstatic int	hwpstate_get_info_from_msr(device_t dev);
122190501Smrstatic int	hwpstate_goto_pstate(device_t dev, int pstate_id);
123190501Smr
124326638Sjkimstatic int	hwpstate_verbose;
125326638SjkimSYSCTL_INT(_debug, OID_AUTO, hwpstate_verbose, CTLFLAG_RWTUN,
126326638Sjkim    &hwpstate_verbose, 0, "Debug hwpstate");
127190501Smr
128326638Sjkimstatic int	hwpstate_verify;
129326638SjkimSYSCTL_INT(_debug, OID_AUTO, hwpstate_verify, CTLFLAG_RWTUN,
130326638Sjkim    &hwpstate_verify, 0, "Verify P-state after setting");
131326638Sjkim
132190501Smrstatic device_method_t hwpstate_methods[] = {
133190501Smr	/* Device interface */
134190501Smr	DEVMETHOD(device_identify,	hwpstate_identify),
135190501Smr	DEVMETHOD(device_probe,		hwpstate_probe),
136190501Smr	DEVMETHOD(device_attach,	hwpstate_attach),
137190501Smr	DEVMETHOD(device_detach,	hwpstate_detach),
138190501Smr	DEVMETHOD(device_shutdown,	hwpstate_shutdown),
139190501Smr
140190501Smr	/* cpufreq interface */
141190501Smr	DEVMETHOD(cpufreq_drv_set,	hwpstate_set),
142190501Smr	DEVMETHOD(cpufreq_drv_get,	hwpstate_get),
143190501Smr	DEVMETHOD(cpufreq_drv_settings,	hwpstate_settings),
144190501Smr	DEVMETHOD(cpufreq_drv_type,	hwpstate_type),
145190501Smr
146190501Smr	/* ACPI interface */
147190501Smr	DEVMETHOD(acpi_get_features,	hwpstate_features),
148190501Smr
149190501Smr	{0, 0}
150190501Smr};
151190501Smr
152190501Smrstatic devclass_t hwpstate_devclass;
153190501Smrstatic driver_t hwpstate_driver = {
154190501Smr	"hwpstate",
155190501Smr	hwpstate_methods,
156190501Smr	sizeof(struct hwpstate_softc),
157190501Smr};
158190501Smr
159190501SmrDRIVER_MODULE(hwpstate, cpu, hwpstate_driver, hwpstate_devclass, 0, 0);
160190501Smr
161190501Smr/*
162190501Smr * Go to Px-state on all cpus considering the limit.
163190501Smr */
164190501Smrstatic int
165326638Sjkimhwpstate_goto_pstate(device_t dev, int id)
166190501Smr{
167326638Sjkim	sbintime_t sbt;
168190501Smr	uint64_t msr;
169326638Sjkim	int cpu, i, j, limit;
170326638Sjkim
171190501Smr	/* get the current pstate limit */
172190501Smr	msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
173190501Smr	limit = AMD_10H_11H_GET_PSTATE_LIMIT(msr);
174326638Sjkim	if (limit > id)
175190501Smr		id = limit;
176190501Smr
177326638Sjkim	cpu = curcpu;
178326638Sjkim	HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n", id, cpu);
179326638Sjkim	/* Go To Px-state */
180326638Sjkim	wrmsr(MSR_AMD_10H_11H_CONTROL, id);
181326638Sjkim
182190501Smr	/*
183190501Smr	 * We are going to the same Px-state on all cpus.
184215398Savg	 * Probably should take _PSD into account.
185190501Smr	 */
186215398Savg	CPU_FOREACH(i) {
187326638Sjkim		if (i == cpu)
188326638Sjkim			continue;
189326638Sjkim
190215398Savg		/* Bind to each cpu. */
191190501Smr		thread_lock(curthread);
192215398Savg		sched_bind(curthread, i);
193190501Smr		thread_unlock(curthread);
194326638Sjkim		HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n", id, i);
195190501Smr		/* Go To Px-state */
196190501Smr		wrmsr(MSR_AMD_10H_11H_CONTROL, id);
197258994Ssbruno	}
198326638Sjkim
199326638Sjkim	/*
200326638Sjkim	 * Verify whether each core is in the requested P-state.
201326638Sjkim	 */
202326638Sjkim	if (hwpstate_verify) {
203326638Sjkim		CPU_FOREACH(i) {
204326638Sjkim			thread_lock(curthread);
205326638Sjkim			sched_bind(curthread, i);
206326638Sjkim			thread_unlock(curthread);
207326638Sjkim			/* wait loop (100*100 usec is enough ?) */
208326638Sjkim			for (j = 0; j < 100; j++) {
209326638Sjkim				/* get the result. not assure msr=id */
210326638Sjkim				msr = rdmsr(MSR_AMD_10H_11H_STATUS);
211326638Sjkim				if (msr == id)
212326638Sjkim					break;
213326638Sjkim				sbt = SBT_1MS / 10;
214326638Sjkim				tsleep_sbt(dev, PZERO, "pstate_goto", sbt,
215326638Sjkim				    sbt >> tc_precexp, 0);
216190501Smr			}
217326638Sjkim			HWPSTATE_DEBUG(dev, "result: P%d-state on cpu%d\n",
218326638Sjkim			    (int)msr, i);
219326638Sjkim			if (msr != id) {
220326638Sjkim				HWPSTATE_DEBUG(dev,
221326638Sjkim				    "error: loop is not enough.\n");
222326638Sjkim				return (ENXIO);
223326638Sjkim			}
224190501Smr		}
225190501Smr	}
226326638Sjkim
227326638Sjkim	return (0);
228190501Smr}
229190501Smr
230190501Smrstatic int
231190501Smrhwpstate_set(device_t dev, const struct cf_setting *cf)
232190501Smr{
233190501Smr	struct hwpstate_softc *sc;
234190501Smr	struct hwpstate_setting *set;
235190501Smr	int i;
236190501Smr
237190501Smr	if (cf == NULL)
238190501Smr		return (EINVAL);
239190501Smr	sc = device_get_softc(dev);
240190501Smr	set = sc->hwpstate_settings;
241190501Smr	for (i = 0; i < sc->cfnum; i++)
242190501Smr		if (CPUFREQ_CMP(cf->freq, set[i].freq))
243190501Smr			break;
244190501Smr	if (i == sc->cfnum)
245190501Smr		return (EINVAL);
246190501Smr
247190501Smr	return (hwpstate_goto_pstate(dev, set[i].pstate_id));
248190501Smr}
249190501Smr
250190501Smrstatic int
251190501Smrhwpstate_get(device_t dev, struct cf_setting *cf)
252190501Smr{
253190501Smr	struct hwpstate_softc *sc;
254190501Smr	struct hwpstate_setting set;
255190501Smr	uint64_t msr;
256190501Smr
257190501Smr	sc = device_get_softc(dev);
258190501Smr	if (cf == NULL)
259190501Smr		return (EINVAL);
260190501Smr	msr = rdmsr(MSR_AMD_10H_11H_STATUS);
261326638Sjkim	if (msr >= sc->cfnum)
262190501Smr		return (EINVAL);
263190501Smr	set = sc->hwpstate_settings[msr];
264190501Smr
265190501Smr	cf->freq = set.freq;
266190501Smr	cf->volts = set.volts;
267190501Smr	cf->power = set.power;
268190501Smr	cf->lat = set.lat;
269190501Smr	cf->dev = dev;
270190501Smr	return (0);
271190501Smr}
272190501Smr
273190501Smrstatic int
274190501Smrhwpstate_settings(device_t dev, struct cf_setting *sets, int *count)
275190501Smr{
276190501Smr	struct hwpstate_softc *sc;
277190501Smr	struct hwpstate_setting set;
278190501Smr	int i;
279190501Smr
280190501Smr	if (sets == NULL || count == NULL)
281190501Smr		return (EINVAL);
282190501Smr	sc = device_get_softc(dev);
283190501Smr	if (*count < sc->cfnum)
284190501Smr		return (E2BIG);
285190501Smr	for (i = 0; i < sc->cfnum; i++, sets++) {
286190501Smr		set = sc->hwpstate_settings[i];
287190501Smr		sets->freq = set.freq;
288190501Smr		sets->volts = set.volts;
289190501Smr		sets->power = set.power;
290190501Smr		sets->lat = set.lat;
291190501Smr		sets->dev = dev;
292190501Smr	}
293190501Smr	*count = sc->cfnum;
294190501Smr
295190501Smr	return (0);
296190501Smr}
297190501Smr
298190501Smrstatic int
299190501Smrhwpstate_type(device_t dev, int *type)
300190501Smr{
301190501Smr
302190501Smr	if (type == NULL)
303190501Smr		return (EINVAL);
304190501Smr
305190501Smr	*type = CPUFREQ_TYPE_ABSOLUTE;
306190501Smr	return (0);
307190501Smr}
308190501Smr
309190501Smrstatic void
310190501Smrhwpstate_identify(driver_t *driver, device_t parent)
311190501Smr{
312190501Smr
313190501Smr	if (device_find_child(parent, "hwpstate", -1) != NULL)
314190501Smr		return;
315190501Smr
316197070Sjkim	if (cpu_vendor_id != CPU_VENDOR_AMD || CPUID_TO_FAMILY(cpu_id) < 0x10)
317190501Smr		return;
318190501Smr
319190501Smr	/*
320190501Smr	 * Check if hardware pstate enable bit is set.
321190501Smr	 */
322190501Smr	if ((amd_pminfo & AMDPM_HW_PSTATE) == 0) {
323190501Smr		HWPSTATE_DEBUG(parent, "hwpstate enable bit is not set.\n");
324190501Smr		return;
325190501Smr	}
326190501Smr
327190501Smr	if (resource_disabled("hwpstate", 0))
328190501Smr		return;
329190501Smr
330192029Sbrueffer	if (BUS_ADD_CHILD(parent, 10, "hwpstate", -1) == NULL)
331190501Smr		device_printf(parent, "hwpstate: add child failed\n");
332190501Smr}
333190501Smr
334190501Smrstatic int
335190501Smrhwpstate_probe(device_t dev)
336190501Smr{
337190501Smr	struct hwpstate_softc *sc;
338190501Smr	device_t perf_dev;
339190501Smr	uint64_t msr;
340190501Smr	int error, type;
341190501Smr
342190501Smr	/*
343190501Smr	 * Only hwpstate0.
344190501Smr	 * It goes well with acpi_throttle.
345190501Smr	 */
346190501Smr	if (device_get_unit(dev) != 0)
347190501Smr		return (ENXIO);
348190501Smr
349190501Smr	sc = device_get_softc(dev);
350190501Smr	sc->dev = dev;
351190501Smr
352190501Smr	/*
353190501Smr	 * Check if acpi_perf has INFO only flag.
354190501Smr	 */
355190501Smr	perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1);
356190501Smr	error = TRUE;
357190501Smr	if (perf_dev && device_is_attached(perf_dev)) {
358190501Smr		error = CPUFREQ_DRV_TYPE(perf_dev, &type);
359190501Smr		if (error == 0) {
360190501Smr			if ((type & CPUFREQ_FLAG_INFO_ONLY) == 0) {
361190501Smr				/*
362190501Smr				 * If acpi_perf doesn't have INFO_ONLY flag,
363190501Smr				 * it will take care of pstate transitions.
364190501Smr				 */
365190501Smr				HWPSTATE_DEBUG(dev, "acpi_perf will take care of pstate transitions.\n");
366190501Smr				return (ENXIO);
367190501Smr			} else {
368190501Smr				/*
369190501Smr				 * If acpi_perf has INFO_ONLY flag, (_PCT has FFixedHW)
370190501Smr				 * we can get _PSS info from acpi_perf
371190501Smr				 * without going into ACPI.
372190501Smr				 */
373190501Smr				HWPSTATE_DEBUG(dev, "going to fetch info from acpi_perf\n");
374190501Smr				error = hwpstate_get_info_from_acpi_perf(dev, perf_dev);
375190501Smr			}
376190501Smr		}
377190501Smr	}
378190501Smr
379190501Smr	if (error == 0) {
380190501Smr		/*
381190501Smr		 * Now we get _PSS info from acpi_perf without error.
382190501Smr		 * Let's check it.
383190501Smr		 */
384190501Smr		msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
385190501Smr		if (sc->cfnum != 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr)) {
386326638Sjkim			HWPSTATE_DEBUG(dev, "MSR (%jd) and ACPI _PSS (%d)"
387326638Sjkim			    " count mismatch\n", (intmax_t)msr, sc->cfnum);
388190501Smr			error = TRUE;
389190501Smr		}
390190501Smr	}
391190501Smr
392190501Smr	/*
393190501Smr	 * If we cannot get info from acpi_perf,
394190501Smr	 * Let's get info from MSRs.
395190501Smr	 */
396190501Smr	if (error)
397190501Smr		error = hwpstate_get_info_from_msr(dev);
398190501Smr	if (error)
399190501Smr		return (error);
400190501Smr
401190501Smr	device_set_desc(dev, "Cool`n'Quiet 2.0");
402190501Smr	return (0);
403190501Smr}
404190501Smr
405190501Smrstatic int
406190501Smrhwpstate_attach(device_t dev)
407190501Smr{
408190501Smr
409190501Smr	return (cpufreq_register(dev));
410190501Smr}
411190501Smr
412190501Smrstatic int
413190501Smrhwpstate_get_info_from_msr(device_t dev)
414190501Smr{
415190501Smr	struct hwpstate_softc *sc;
416190501Smr	struct hwpstate_setting *hwpstate_set;
417190501Smr	uint64_t msr;
418190501Smr	int family, i, fid, did;
419190501Smr
420197070Sjkim	family = CPUID_TO_FAMILY(cpu_id);
421190501Smr	sc = device_get_softc(dev);
422190501Smr	/* Get pstate count */
423190501Smr	msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
424190501Smr	sc->cfnum = 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr);
425190501Smr	hwpstate_set = sc->hwpstate_settings;
426190501Smr	for (i = 0; i < sc->cfnum; i++) {
427190501Smr		msr = rdmsr(MSR_AMD_10H_11H_CONFIG + i);
428309443Sjhb		if ((msr & ((uint64_t)1 << 63)) == 0) {
429190501Smr			HWPSTATE_DEBUG(dev, "msr is not valid.\n");
430190501Smr			return (ENXIO);
431190501Smr		}
432190501Smr		did = AMD_10H_11H_CUR_DID(msr);
433190501Smr		fid = AMD_10H_11H_CUR_FID(msr);
434309443Sjhb
435309443Sjhb		/* Convert fid/did to frequency. */
436326638Sjkim		switch (family) {
437190501Smr		case 0x11:
438309443Sjhb			hwpstate_set[i].freq = (100 * (fid + 0x08)) >> did;
439190501Smr			break;
440190501Smr		case 0x10:
441309443Sjhb		case 0x12:
442309443Sjhb		case 0x15:
443309443Sjhb		case 0x16:
444309443Sjhb			hwpstate_set[i].freq = (100 * (fid + 0x10)) >> did;
445190501Smr			break;
446326638Sjkim		case 0x17:
447326638Sjkim			did = AMD_17H_CUR_DID(msr);
448326638Sjkim			if (did == 0) {
449326638Sjkim				HWPSTATE_DEBUG(dev, "unexpected did: 0\n");
450326638Sjkim				did = 1;
451326638Sjkim			}
452326638Sjkim			fid = AMD_17H_CUR_FID(msr);
453326638Sjkim			hwpstate_set[i].freq = (200 * fid) / did;
454326638Sjkim			break;
455190501Smr		default:
456326638Sjkim			HWPSTATE_DEBUG(dev, "get_info_from_msr: AMD family"
457326638Sjkim			    " 0x%02x CPUs are not supported yet\n", family);
458190501Smr			return (ENXIO);
459190501Smr		}
460190501Smr		hwpstate_set[i].pstate_id = i;
461190501Smr		/* There was volts calculation, but deleted it. */
462190501Smr		hwpstate_set[i].volts = CPUFREQ_VAL_UNKNOWN;
463190501Smr		hwpstate_set[i].power = CPUFREQ_VAL_UNKNOWN;
464190501Smr		hwpstate_set[i].lat = CPUFREQ_VAL_UNKNOWN;
465190501Smr	}
466190501Smr	return (0);
467190501Smr}
468190501Smr
469190501Smrstatic int
470190501Smrhwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev)
471190501Smr{
472190501Smr	struct hwpstate_softc *sc;
473190501Smr	struct cf_setting *perf_set;
474190501Smr	struct hwpstate_setting *hwpstate_set;
475190501Smr	int count, error, i;
476190501Smr
477190501Smr	perf_set = malloc(MAX_SETTINGS * sizeof(*perf_set), M_TEMP, M_NOWAIT);
478190501Smr	if (perf_set == NULL) {
479190501Smr		HWPSTATE_DEBUG(dev, "nomem\n");
480190501Smr		return (ENOMEM);
481190501Smr	}
482190501Smr	/*
483190501Smr	 * Fetch settings from acpi_perf.
484190501Smr	 * Now it is attached, and has info only flag.
485190501Smr	 */
486190501Smr	count = MAX_SETTINGS;
487190501Smr	error = CPUFREQ_DRV_SETTINGS(perf_dev, perf_set, &count);
488190501Smr	if (error) {
489190501Smr		HWPSTATE_DEBUG(dev, "error: CPUFREQ_DRV_SETTINGS.\n");
490190501Smr		goto out;
491190501Smr	}
492190501Smr	sc = device_get_softc(dev);
493190501Smr	sc->cfnum = count;
494190501Smr	hwpstate_set = sc->hwpstate_settings;
495190501Smr	for (i = 0; i < count; i++) {
496190501Smr		if (i == perf_set[i].spec[0]) {
497190501Smr			hwpstate_set[i].pstate_id = i;
498190501Smr			hwpstate_set[i].freq = perf_set[i].freq;
499190501Smr			hwpstate_set[i].volts = perf_set[i].volts;
500190501Smr			hwpstate_set[i].power = perf_set[i].power;
501190501Smr			hwpstate_set[i].lat = perf_set[i].lat;
502190501Smr		} else {
503190501Smr			HWPSTATE_DEBUG(dev, "ACPI _PSS object mismatch.\n");
504190501Smr			error = ENXIO;
505190501Smr			goto out;
506190501Smr		}
507190501Smr	}
508190501Smrout:
509190501Smr	if (perf_set)
510190501Smr		free(perf_set, M_TEMP);
511190501Smr	return (error);
512190501Smr}
513190501Smr
514190501Smrstatic int
515190501Smrhwpstate_detach(device_t dev)
516190501Smr{
517190501Smr
518190501Smr	hwpstate_goto_pstate(dev, 0);
519190501Smr	return (cpufreq_unregister(dev));
520190501Smr}
521190501Smr
522190501Smrstatic int
523190501Smrhwpstate_shutdown(device_t dev)
524190501Smr{
525190501Smr
526190501Smr	/* hwpstate_goto_pstate(dev, 0); */
527190501Smr	return (0);
528190501Smr}
529190501Smr
530190501Smrstatic int
531190501Smrhwpstate_features(driver_t *driver, u_int *features)
532190501Smr{
533190501Smr
534190501Smr	/* Notify the ACPI CPU that we support direct access to MSRs */
535190501Smr	*features = ACPI_CAP_PERF_MSRS;
536190501Smr	return (0);
537190501Smr}
538