1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2005 Nate Lawson
5 * Copyright (c) 2004 Colin Percival
6 * Copyright (c) 2004-2005 Bruno Durcot
7 * Copyright (c) 2004 FUKUDA Nobuhiko
8 * Copyright (c) 2009 Michael Reifenberger
9 * Copyright (c) 2009 Norikatsu Shigemura
10 * Copyright (c) 2008-2009 Gen Otsuji
11 *
12 * This code is depending on kern_cpu.c, est.c, powernow.c, p4tcc.c, smist.c
13 * in various parts. The authors of these files are Nate Lawson,
14 * Colin Percival, Bruno Durcot, and FUKUDA Nobuhiko.
15 * This code contains patches by Michael Reifenberger and Norikatsu Shigemura.
16 * Thank you.
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted providing that the following conditions
20 * are met:
21 * 1. Redistributions of source code must retain the above copyright
22 *    notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 *    notice, this list of conditions and the following disclaimer in the
25 *    documentation and/or other materials provided with the distribution.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR``AS IS'' AND ANY EXPRESS OR
28 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
29 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
31 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
35 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
36 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40/*
41 * For more info:
42 * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 10h Processors
43 * 31116 Rev 3.20  February 04, 2009
44 * BIOS and Kernel Developer's Guide(BKDG) for AMD Family 11h Processors
45 * 41256 Rev 3.00 - July 07, 2008
46 */
47
48#include <sys/cdefs.h>
49__FBSDID("$FreeBSD$");
50
51#include <sys/param.h>
52#include <sys/bus.h>
53#include <sys/cpu.h>
54#include <sys/kernel.h>
55#include <sys/module.h>
56#include <sys/malloc.h>
57#include <sys/proc.h>
58#include <sys/pcpu.h>
59#include <sys/smp.h>
60#include <sys/sched.h>
61
62#include <machine/md_var.h>
63#include <machine/cputypes.h>
64#include <machine/specialreg.h>
65
66#include <contrib/dev/acpica/include/acpi.h>
67
68#include <dev/acpica/acpivar.h>
69
70#include "acpi_if.h"
71#include "cpufreq_if.h"
72
73#define	MSR_AMD_10H_11H_LIMIT	0xc0010061
74#define	MSR_AMD_10H_11H_CONTROL	0xc0010062
75#define	MSR_AMD_10H_11H_STATUS	0xc0010063
76#define	MSR_AMD_10H_11H_CONFIG	0xc0010064
77
78#define	AMD_10H_11H_MAX_STATES	16
79
80/* for MSR_AMD_10H_11H_LIMIT C001_0061 */
81#define	AMD_10H_11H_GET_PSTATE_MAX_VAL(msr)	(((msr) >> 4) & 0x7)
82#define	AMD_10H_11H_GET_PSTATE_LIMIT(msr)	(((msr)) & 0x7)
83/* for MSR_AMD_10H_11H_CONFIG 10h:C001_0064:68 / 11h:C001_0064:6B */
84#define	AMD_10H_11H_CUR_VID(msr)		(((msr) >> 9) & 0x7F)
85#define	AMD_10H_11H_CUR_DID(msr)		(((msr) >> 6) & 0x07)
86#define	AMD_10H_11H_CUR_FID(msr)		((msr) & 0x3F)
87
88#define	AMD_17H_CUR_VID(msr)			(((msr) >> 14) & 0xFF)
89#define	AMD_17H_CUR_DID(msr)			(((msr) >> 8) & 0x3F)
90#define	AMD_17H_CUR_FID(msr)			((msr) & 0xFF)
91
92#define	HWPSTATE_DEBUG(dev, msg...)			\
93	do {						\
94		if (hwpstate_verbose)			\
95			device_printf(dev, msg);	\
96	} while (0)
97
98struct hwpstate_setting {
99	int	freq;		/* CPU clock in Mhz or 100ths of a percent. */
100	int	volts;		/* Voltage in mV. */
101	int	power;		/* Power consumed in mW. */
102	int	lat;		/* Transition latency in us. */
103	int	pstate_id;	/* P-State id */
104};
105
106struct hwpstate_softc {
107	device_t		dev;
108	struct hwpstate_setting	hwpstate_settings[AMD_10H_11H_MAX_STATES];
109	int			cfnum;
110};
111
112static void	hwpstate_identify(driver_t *driver, device_t parent);
113static int	hwpstate_probe(device_t dev);
114static int	hwpstate_attach(device_t dev);
115static int	hwpstate_detach(device_t dev);
116static int	hwpstate_set(device_t dev, const struct cf_setting *cf);
117static int	hwpstate_get(device_t dev, struct cf_setting *cf);
118static int	hwpstate_settings(device_t dev, struct cf_setting *sets, int *count);
119static int	hwpstate_type(device_t dev, int *type);
120static int	hwpstate_shutdown(device_t dev);
121static int	hwpstate_features(driver_t *driver, u_int *features);
122static int	hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev);
123static int	hwpstate_get_info_from_msr(device_t dev);
124static int	hwpstate_goto_pstate(device_t dev, int pstate_id);
125
126static int	hwpstate_verbose;
127SYSCTL_INT(_debug, OID_AUTO, hwpstate_verbose, CTLFLAG_RWTUN,
128    &hwpstate_verbose, 0, "Debug hwpstate");
129
130static int	hwpstate_verify;
131SYSCTL_INT(_debug, OID_AUTO, hwpstate_verify, CTLFLAG_RWTUN,
132    &hwpstate_verify, 0, "Verify P-state after setting");
133
134static bool	hwpstate_pstate_limit;
135SYSCTL_BOOL(_debug, OID_AUTO, hwpstate_pstate_limit, CTLFLAG_RWTUN,
136    &hwpstate_pstate_limit, 0,
137    "If enabled (1), limit administrative control of P-states to the value in "
138    "CurPstateLimit");
139
140static device_method_t hwpstate_methods[] = {
141	/* Device interface */
142	DEVMETHOD(device_identify,	hwpstate_identify),
143	DEVMETHOD(device_probe,		hwpstate_probe),
144	DEVMETHOD(device_attach,	hwpstate_attach),
145	DEVMETHOD(device_detach,	hwpstate_detach),
146	DEVMETHOD(device_shutdown,	hwpstate_shutdown),
147
148	/* cpufreq interface */
149	DEVMETHOD(cpufreq_drv_set,	hwpstate_set),
150	DEVMETHOD(cpufreq_drv_get,	hwpstate_get),
151	DEVMETHOD(cpufreq_drv_settings,	hwpstate_settings),
152	DEVMETHOD(cpufreq_drv_type,	hwpstate_type),
153
154	/* ACPI interface */
155	DEVMETHOD(acpi_get_features,	hwpstate_features),
156	{0, 0}
157};
158
159static devclass_t hwpstate_devclass;
160static driver_t hwpstate_driver = {
161	"hwpstate",
162	hwpstate_methods,
163	sizeof(struct hwpstate_softc),
164};
165
166DRIVER_MODULE(hwpstate, cpu, hwpstate_driver, hwpstate_devclass, 0, 0);
167
168/*
169 * Go to Px-state on all cpus, considering the limit register (if so
170 * configured).
171 */
172static int
173hwpstate_goto_pstate(device_t dev, int id)
174{
175	sbintime_t sbt;
176	uint64_t msr;
177	int cpu, i, j, limit;
178
179	if (hwpstate_pstate_limit) {
180		/* get the current pstate limit */
181		msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
182		limit = AMD_10H_11H_GET_PSTATE_LIMIT(msr);
183		if (limit > id) {
184			HWPSTATE_DEBUG(dev, "Restricting requested P%d to P%d "
185			    "due to HW limit\n", id, limit);
186			id = limit;
187		}
188	}
189
190	cpu = curcpu;
191	HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n", id, cpu);
192	/* Go To Px-state */
193	wrmsr(MSR_AMD_10H_11H_CONTROL, id);
194
195	/*
196	 * We are going to the same Px-state on all cpus.
197	 * Probably should take _PSD into account.
198	 */
199	CPU_FOREACH(i) {
200		if (i == cpu)
201			continue;
202
203		/* Bind to each cpu. */
204		thread_lock(curthread);
205		sched_bind(curthread, i);
206		thread_unlock(curthread);
207		HWPSTATE_DEBUG(dev, "setting P%d-state on cpu%d\n", id, i);
208		/* Go To Px-state */
209		wrmsr(MSR_AMD_10H_11H_CONTROL, id);
210	}
211
212	/*
213	 * Verify whether each core is in the requested P-state.
214	 */
215	if (hwpstate_verify) {
216		CPU_FOREACH(i) {
217			thread_lock(curthread);
218			sched_bind(curthread, i);
219			thread_unlock(curthread);
220			/* wait loop (100*100 usec is enough ?) */
221			for (j = 0; j < 100; j++) {
222				/* get the result. not assure msr=id */
223				msr = rdmsr(MSR_AMD_10H_11H_STATUS);
224				if (msr == id)
225					break;
226				sbt = SBT_1MS / 10;
227				tsleep_sbt(dev, PZERO, "pstate_goto", sbt,
228				    sbt >> tc_precexp, 0);
229			}
230			HWPSTATE_DEBUG(dev, "result: P%d-state on cpu%d\n",
231			    (int)msr, i);
232			if (msr != id) {
233				HWPSTATE_DEBUG(dev,
234				    "error: loop is not enough.\n");
235				return (ENXIO);
236			}
237		}
238	}
239
240	return (0);
241}
242
243static int
244hwpstate_set(device_t dev, const struct cf_setting *cf)
245{
246	struct hwpstate_softc *sc;
247	struct hwpstate_setting *set;
248	int i;
249
250	if (cf == NULL)
251		return (EINVAL);
252	sc = device_get_softc(dev);
253	set = sc->hwpstate_settings;
254	for (i = 0; i < sc->cfnum; i++)
255		if (CPUFREQ_CMP(cf->freq, set[i].freq))
256			break;
257	if (i == sc->cfnum)
258		return (EINVAL);
259
260	return (hwpstate_goto_pstate(dev, set[i].pstate_id));
261}
262
263static int
264hwpstate_get(device_t dev, struct cf_setting *cf)
265{
266	struct hwpstate_softc *sc;
267	struct hwpstate_setting set;
268	uint64_t msr;
269
270	sc = device_get_softc(dev);
271	if (cf == NULL)
272		return (EINVAL);
273	msr = rdmsr(MSR_AMD_10H_11H_STATUS);
274	if (msr >= sc->cfnum)
275		return (EINVAL);
276	set = sc->hwpstate_settings[msr];
277
278	cf->freq = set.freq;
279	cf->volts = set.volts;
280	cf->power = set.power;
281	cf->lat = set.lat;
282	cf->dev = dev;
283	return (0);
284}
285
286static int
287hwpstate_settings(device_t dev, struct cf_setting *sets, int *count)
288{
289	struct hwpstate_softc *sc;
290	struct hwpstate_setting set;
291	int i;
292
293	if (sets == NULL || count == NULL)
294		return (EINVAL);
295	sc = device_get_softc(dev);
296	if (*count < sc->cfnum)
297		return (E2BIG);
298	for (i = 0; i < sc->cfnum; i++, sets++) {
299		set = sc->hwpstate_settings[i];
300		sets->freq = set.freq;
301		sets->volts = set.volts;
302		sets->power = set.power;
303		sets->lat = set.lat;
304		sets->dev = dev;
305	}
306	*count = sc->cfnum;
307
308	return (0);
309}
310
311static int
312hwpstate_type(device_t dev, int *type)
313{
314
315	if (type == NULL)
316		return (EINVAL);
317
318	*type = CPUFREQ_TYPE_ABSOLUTE;
319	return (0);
320}
321
322static void
323hwpstate_identify(driver_t *driver, device_t parent)
324{
325
326	if (device_find_child(parent, "hwpstate", -1) != NULL)
327		return;
328
329	if ((cpu_vendor_id != CPU_VENDOR_AMD || CPUID_TO_FAMILY(cpu_id) < 0x10) &&
330	    cpu_vendor_id != CPU_VENDOR_HYGON)
331		return;
332
333	/*
334	 * Check if hardware pstate enable bit is set.
335	 */
336	if ((amd_pminfo & AMDPM_HW_PSTATE) == 0) {
337		HWPSTATE_DEBUG(parent, "hwpstate enable bit is not set.\n");
338		return;
339	}
340
341	if (resource_disabled("hwpstate", 0))
342		return;
343
344	if (BUS_ADD_CHILD(parent, 10, "hwpstate", -1) == NULL)
345		device_printf(parent, "hwpstate: add child failed\n");
346}
347
348static int
349hwpstate_probe(device_t dev)
350{
351	struct hwpstate_softc *sc;
352	device_t perf_dev;
353	uint64_t msr;
354	int error, type;
355
356	/*
357	 * Only hwpstate0.
358	 * It goes well with acpi_throttle.
359	 */
360	if (device_get_unit(dev) != 0)
361		return (ENXIO);
362
363	sc = device_get_softc(dev);
364	sc->dev = dev;
365
366	/*
367	 * Check if acpi_perf has INFO only flag.
368	 */
369	perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1);
370	error = TRUE;
371	if (perf_dev && device_is_attached(perf_dev)) {
372		error = CPUFREQ_DRV_TYPE(perf_dev, &type);
373		if (error == 0) {
374			if ((type & CPUFREQ_FLAG_INFO_ONLY) == 0) {
375				/*
376				 * If acpi_perf doesn't have INFO_ONLY flag,
377				 * it will take care of pstate transitions.
378				 */
379				HWPSTATE_DEBUG(dev, "acpi_perf will take care of pstate transitions.\n");
380				return (ENXIO);
381			} else {
382				/*
383				 * If acpi_perf has INFO_ONLY flag, (_PCT has FFixedHW)
384				 * we can get _PSS info from acpi_perf
385				 * without going into ACPI.
386				 */
387				HWPSTATE_DEBUG(dev, "going to fetch info from acpi_perf\n");
388				error = hwpstate_get_info_from_acpi_perf(dev, perf_dev);
389			}
390		}
391	}
392
393	if (error == 0) {
394		/*
395		 * Now we get _PSS info from acpi_perf without error.
396		 * Let's check it.
397		 */
398		msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
399		if (sc->cfnum != 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr)) {
400			HWPSTATE_DEBUG(dev, "MSR (%jd) and ACPI _PSS (%d)"
401			    " count mismatch\n", (intmax_t)msr, sc->cfnum);
402			error = TRUE;
403		}
404	}
405
406	/*
407	 * If we cannot get info from acpi_perf,
408	 * Let's get info from MSRs.
409	 */
410	if (error)
411		error = hwpstate_get_info_from_msr(dev);
412	if (error)
413		return (error);
414
415	device_set_desc(dev, "Cool`n'Quiet 2.0");
416	return (0);
417}
418
419static int
420hwpstate_attach(device_t dev)
421{
422
423	return (cpufreq_register(dev));
424}
425
426static int
427hwpstate_get_info_from_msr(device_t dev)
428{
429	struct hwpstate_softc *sc;
430	struct hwpstate_setting *hwpstate_set;
431	uint64_t msr;
432	int family, i, fid, did;
433
434	family = CPUID_TO_FAMILY(cpu_id);
435	sc = device_get_softc(dev);
436	/* Get pstate count */
437	msr = rdmsr(MSR_AMD_10H_11H_LIMIT);
438	sc->cfnum = 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr);
439	hwpstate_set = sc->hwpstate_settings;
440	for (i = 0; i < sc->cfnum; i++) {
441		msr = rdmsr(MSR_AMD_10H_11H_CONFIG + i);
442		if ((msr & ((uint64_t)1 << 63)) == 0) {
443			HWPSTATE_DEBUG(dev, "msr is not valid.\n");
444			return (ENXIO);
445		}
446		did = AMD_10H_11H_CUR_DID(msr);
447		fid = AMD_10H_11H_CUR_FID(msr);
448
449		/* Convert fid/did to frequency. */
450		switch (family) {
451		case 0x11:
452			hwpstate_set[i].freq = (100 * (fid + 0x08)) >> did;
453			break;
454		case 0x10:
455		case 0x12:
456		case 0x15:
457		case 0x16:
458			hwpstate_set[i].freq = (100 * (fid + 0x10)) >> did;
459			break;
460		case 0x17:
461		case 0x18:
462			did = AMD_17H_CUR_DID(msr);
463			if (did == 0) {
464				HWPSTATE_DEBUG(dev, "unexpected did: 0\n");
465				did = 1;
466			}
467			fid = AMD_17H_CUR_FID(msr);
468			hwpstate_set[i].freq = (200 * fid) / did;
469			break;
470		default:
471			HWPSTATE_DEBUG(dev, "get_info_from_msr: %s family"
472			    " 0x%02x CPUs are not supported yet\n",
473			    cpu_vendor_id == CPU_VENDOR_HYGON ? "Hygon" : "AMD",
474			    family);
475			return (ENXIO);
476		}
477		hwpstate_set[i].pstate_id = i;
478		/* There was volts calculation, but deleted it. */
479		hwpstate_set[i].volts = CPUFREQ_VAL_UNKNOWN;
480		hwpstate_set[i].power = CPUFREQ_VAL_UNKNOWN;
481		hwpstate_set[i].lat = CPUFREQ_VAL_UNKNOWN;
482	}
483	return (0);
484}
485
486static int
487hwpstate_get_info_from_acpi_perf(device_t dev, device_t perf_dev)
488{
489	struct hwpstate_softc *sc;
490	struct cf_setting *perf_set;
491	struct hwpstate_setting *hwpstate_set;
492	int count, error, i;
493
494	perf_set = malloc(MAX_SETTINGS * sizeof(*perf_set), M_TEMP, M_NOWAIT);
495	if (perf_set == NULL) {
496		HWPSTATE_DEBUG(dev, "nomem\n");
497		return (ENOMEM);
498	}
499	/*
500	 * Fetch settings from acpi_perf.
501	 * Now it is attached, and has info only flag.
502	 */
503	count = MAX_SETTINGS;
504	error = CPUFREQ_DRV_SETTINGS(perf_dev, perf_set, &count);
505	if (error) {
506		HWPSTATE_DEBUG(dev, "error: CPUFREQ_DRV_SETTINGS.\n");
507		goto out;
508	}
509	sc = device_get_softc(dev);
510	sc->cfnum = count;
511	hwpstate_set = sc->hwpstate_settings;
512	for (i = 0; i < count; i++) {
513		if (i == perf_set[i].spec[0]) {
514			hwpstate_set[i].pstate_id = i;
515			hwpstate_set[i].freq = perf_set[i].freq;
516			hwpstate_set[i].volts = perf_set[i].volts;
517			hwpstate_set[i].power = perf_set[i].power;
518			hwpstate_set[i].lat = perf_set[i].lat;
519		} else {
520			HWPSTATE_DEBUG(dev, "ACPI _PSS object mismatch.\n");
521			error = ENXIO;
522			goto out;
523		}
524	}
525out:
526	if (perf_set)
527		free(perf_set, M_TEMP);
528	return (error);
529}
530
531static int
532hwpstate_detach(device_t dev)
533{
534
535	hwpstate_goto_pstate(dev, 0);
536	return (cpufreq_unregister(dev));
537}
538
539static int
540hwpstate_shutdown(device_t dev)
541{
542
543	/* hwpstate_goto_pstate(dev, 0); */
544	return (0);
545}
546
547static int
548hwpstate_features(driver_t *driver, u_int *features)
549{
550
551	/* Notify the ACPI CPU that we support direct access to MSRs */
552	*features = ACPI_CAP_PERF_MSRS;
553	return (0);
554}
555