acpi_cpu.c revision 109760
1/*-
2 * Copyright (c) 2001 Michael Smith
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in the
12 *    documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 *	$FreeBSD: head/sys/dev/acpica/acpi_cpu.c 109760 2003-01-23 22:18:14Z njl $
27 */
28
29#include "opt_acpi.h"
30#include <sys/param.h>
31#include <sys/kernel.h>
32#include <sys/bus.h>
33#include <sys/power.h>
34
35#include <machine/bus_pio.h>
36#include <machine/bus.h>
37#include <machine/resource.h>
38#include <sys/rman.h>
39
40#include "acpi.h"
41
42#include <dev/acpica/acpivar.h>
43
44/*
45 * Support for ACPI Processor devices.
46 *
47 * Note that this only provides ACPI 1.0 support (with the exception of the
48 * PSTATE_CNT field).  2.0 support will involve implementing _PTC, _PCT,
49 * _PSS and _PPC.
50 */
51
52/*
53 * Hooks for the ACPI CA debugging infrastructure
54 */
55#define _COMPONENT	ACPI_PROCESSOR
56ACPI_MODULE_NAME("PROCESSOR")
57
58struct acpi_cpu_softc {
59    device_t		cpu_dev;
60    ACPI_HANDLE		cpu_handle;
61
62    u_int32_t		cpu_id;
63
64    /* CPU throttling control register */
65    struct resource	*cpu_p_blk;
66#define CPU_GET_P_CNT(sc)	(bus_space_read_4(rman_get_bustag((sc)->cpu_p_blk), 	\
67						  rman_get_bushandle((sc)->cpu_p_blk),	\
68						  0))
69#define CPU_SET_P_CNT(sc, val)	(bus_space_write_4(rman_get_bustag((sc)->cpu_p_blk), 	\
70						  rman_get_bushandle((sc)->cpu_p_blk),	\
71						  0, (val)))
72#define CPU_P_CNT_THT_EN	(1<<4)
73};
74
75/*
76 * Speeds are stored in counts, from 1 - CPU_MAX_SPEED, and
77 * reported to the user in tenths of a percent.
78 */
79static u_int32_t	cpu_duty_offset;
80static u_int32_t	cpu_duty_width;
81#define CPU_MAX_SPEED		(1 << cpu_duty_width)
82#define CPU_SPEED_PERCENT(x)	((1000 * (x)) / CPU_MAX_SPEED)
83#define CPU_SPEED_PRINTABLE(x)	(CPU_SPEED_PERCENT(x) / 10),(CPU_SPEED_PERCENT(x) % 10)
84
85static u_int32_t	cpu_smi_cmd;	/* should be a generic way to do this */
86static u_int8_t		cpu_pstate_cnt;
87
88static u_int32_t	cpu_current_state;
89static u_int32_t	cpu_performance_state;
90static u_int32_t	cpu_economy_state;
91static u_int32_t	cpu_max_state;
92
93static device_t		*cpu_devices;
94static int		cpu_ndevices;
95
96static struct sysctl_ctx_list	acpi_cpu_sysctl_ctx;
97static struct sysctl_oid	*acpi_cpu_sysctl_tree;
98
99static int	acpi_cpu_probe(device_t dev);
100static int	acpi_cpu_attach(device_t dev);
101static void	acpi_cpu_init_throttling(void *arg);
102static void	acpi_cpu_set_speed(u_int32_t speed);
103static void	acpi_cpu_power_profile(void *arg);
104static int	acpi_cpu_speed_sysctl(SYSCTL_HANDLER_ARGS);
105
106static device_method_t acpi_cpu_methods[] = {
107    /* Device interface */
108    DEVMETHOD(device_probe,	acpi_cpu_probe),
109    DEVMETHOD(device_attach,	acpi_cpu_attach),
110
111    {0, 0}
112};
113
114static driver_t acpi_cpu_driver = {
115    "acpi_cpu",
116    acpi_cpu_methods,
117    sizeof(struct acpi_cpu_softc),
118};
119
120static devclass_t acpi_cpu_devclass;
121DRIVER_MODULE(acpi_cpu, acpi, acpi_cpu_driver, acpi_cpu_devclass, 0, 0);
122
123static int
124acpi_cpu_probe(device_t dev)
125{
126    if (!acpi_disabled("cpu") &&
127	(acpi_get_type(dev) == ACPI_TYPE_PROCESSOR)) {
128	device_set_desc(dev, "CPU");	/* XXX get more verbose description? */
129	return(0);
130    }
131    return(ENXIO);
132}
133
134static int
135acpi_cpu_attach(device_t dev)
136{
137    struct acpi_cpu_softc	*sc;
138    struct acpi_softc		*acpi_sc;
139    ACPI_OBJECT			processor;
140    ACPI_BUFFER			buf;
141    ACPI_STATUS			status;
142    u_int32_t			p_blk;
143    u_int32_t			p_blk_length;
144    u_int32_t			duty_end;
145    int				rid;
146
147    ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__);
148
149    ACPI_ASSERTLOCK;
150
151    sc = device_get_softc(dev);
152    sc->cpu_dev = dev;
153    sc->cpu_handle = acpi_get_handle(dev);
154
155    /*
156     * Get global parameters from the FADT.
157     */
158    if (device_get_unit(sc->cpu_dev) == 0) {
159	cpu_duty_offset = AcpiGbl_FADT->DutyOffset;
160	cpu_duty_width = AcpiGbl_FADT->DutyWidth;
161	cpu_smi_cmd = AcpiGbl_FADT->SmiCmd;
162	cpu_pstate_cnt = AcpiGbl_FADT->PstateCnt;
163
164	/* validate the offset/width */
165	if (cpu_duty_width > 0) {
166		duty_end = cpu_duty_offset + cpu_duty_width - 1;
167		/* check that it fits */
168		if (duty_end > 31) {
169			printf("acpi_cpu: CLK_VAL field overflows P_CNT register\n");
170			cpu_duty_width = 0;
171		}
172		/* check for overlap with the THT_EN bit */
173		if ((cpu_duty_offset <= 4) && (duty_end >= 4)) {
174			printf("acpi_cpu: CLK_VAL field overlaps THT_EN bit\n");
175			cpu_duty_width = 0;
176		}
177	}
178
179	/*
180	 * Start the throttling process once the probe phase completes, if we think that
181	 * it's going to be useful.  If the duty width value is zero, there are no significant
182	 * bits in the register and thus no throttled states.
183	 */
184	if (cpu_duty_width > 0) {
185	    AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_cpu_init_throttling, NULL);
186
187	    acpi_sc = acpi_device_get_parent_softc(dev);
188	    sysctl_ctx_init(&acpi_cpu_sysctl_ctx);
189	    acpi_cpu_sysctl_tree = SYSCTL_ADD_NODE(&acpi_cpu_sysctl_ctx,
190						  SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree),
191						  OID_AUTO, "cpu", CTLFLAG_RD, 0, "");
192
193	    SYSCTL_ADD_INT(&acpi_cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
194			   OID_AUTO, "max_speed", CTLFLAG_RD,
195			   &cpu_max_state, 0, "maximum CPU speed");
196	    SYSCTL_ADD_INT(&acpi_cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
197			   OID_AUTO, "current_speed", CTLFLAG_RD,
198			   &cpu_current_state, 0, "current CPU speed");
199	    SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
200			    OID_AUTO, "performance_speed", CTLTYPE_INT | CTLFLAG_RW,
201			    &cpu_performance_state, 0, acpi_cpu_speed_sysctl, "I", "");
202	    SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx, SYSCTL_CHILDREN(acpi_cpu_sysctl_tree),
203			    OID_AUTO, "economy_speed", CTLTYPE_INT | CTLFLAG_RW,
204			    &cpu_economy_state, 0, acpi_cpu_speed_sysctl, "I", "");
205	}
206    }
207
208    /*
209     * Get the processor object.
210     */
211    buf.Pointer = &processor;
212    buf.Length = sizeof(processor);
213    if (ACPI_FAILURE(status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf))) {
214	device_printf(sc->cpu_dev, "couldn't get Processor object - %s\n", AcpiFormatException(status));
215	return_VALUE(ENXIO);
216    }
217    if (processor.Type != ACPI_TYPE_PROCESSOR) {
218	device_printf(sc->cpu_dev, "Processor object has bad type %d\n", processor.Type);
219	return_VALUE(ENXIO);
220    }
221    sc->cpu_id = processor.Processor.ProcId;
222
223    /*
224     * If it looks like we support throttling, find this CPU's P_BLK.
225     *
226     * Note that some systems seem to duplicate the P_BLK pointer across
227     * multiple CPUs, so not getting the resource is not fatal.
228     *
229     * XXX should support _PTC here as well, once we work out how to parse it.
230     *
231     * XXX is it valid to assume that the P_BLK must be 6 bytes long?
232     */
233    if (cpu_duty_width > 0) {
234	p_blk = processor.Processor.PblkAddress;
235	p_blk_length = processor.Processor.PblkLength;
236
237	/* allocate bus space if possible */
238	if ((p_blk > 0) && (p_blk_length == 6)) {
239	    rid = 0;
240	    bus_set_resource(sc->cpu_dev, SYS_RES_IOPORT, rid, p_blk, p_blk_length);
241	    sc->cpu_p_blk = bus_alloc_resource(sc->cpu_dev, SYS_RES_IOPORT, &rid, 0, ~0, 1,
242					       RF_ACTIVE);
243
244	    ACPI_DEBUG_PRINT((ACPI_DB_IO, "acpi_cpu%d: throttling with P_BLK at 0x%x/%d%s\n",
245			      device_get_unit(sc->cpu_dev), p_blk, p_blk_length,
246			      sc->cpu_p_blk ? "" : " (shadowed)"));
247	}
248    }
249    return_VALUE(0);
250}
251
252/*
253 * Call this *after* all CPUs have been attached.
254 *
255 * Takes the ACPI lock to avoid fighting anyone over the SMI command
256 * port.  Could probably lock less code.
257 */
258static void
259acpi_cpu_init_throttling(void *arg)
260{
261    int cpu_temp_speed;
262    ACPI_LOCK_DECL;
263
264    ACPI_LOCK;
265
266    /* get set of CPU devices */
267    devclass_get_devices(acpi_cpu_devclass, &cpu_devices, &cpu_ndevices);
268
269    /* initialise throttling states */
270    cpu_max_state = CPU_MAX_SPEED;
271    cpu_performance_state = cpu_max_state;
272    cpu_economy_state = cpu_performance_state / 2;
273    if (cpu_economy_state == 0)		/* 0 is 'reserved' */
274	cpu_economy_state++;
275    if (TUNABLE_INT_FETCH("hw.acpi.cpu.performance_speed",
276	&cpu_temp_speed) && cpu_temp_speed > 0 &&
277	cpu_temp_speed <= cpu_max_state)
278	cpu_performance_state = cpu_temp_speed;
279    if (TUNABLE_INT_FETCH("hw.acpi.cpu.economy_speed",
280	&cpu_temp_speed) && cpu_temp_speed > 0 &&
281	cpu_temp_speed <= cpu_max_state)
282	cpu_economy_state = cpu_temp_speed;
283
284    /* register performance profile change handler */
285    EVENTHANDLER_REGISTER(power_profile_change, acpi_cpu_power_profile, NULL, 0);
286
287    /* if ACPI 2.0+, signal platform that we are taking over throttling */
288    if (cpu_pstate_cnt != 0) {
289	/* XXX should be a generic interface for this */
290	AcpiOsWritePort(cpu_smi_cmd, cpu_pstate_cnt, 8);
291    }
292
293    ACPI_UNLOCK;
294
295    /* set initial speed */
296    acpi_cpu_power_profile(NULL);
297
298    printf("acpi_cpu: throttling enabled, %d steps (100%% to %d.%d%%), "
299	   "currently %d.%d%%\n", CPU_MAX_SPEED, CPU_SPEED_PRINTABLE(1),
300	   CPU_SPEED_PRINTABLE(cpu_current_state));
301}
302
303/*
304 * Set CPUs to the new state.
305 *
306 * Must be called with the ACPI lock held.
307 */
308static void
309acpi_cpu_set_speed(u_int32_t speed)
310{
311    struct acpi_cpu_softc	*sc;
312    int				i;
313    u_int32_t			p_cnt, clk_val;
314
315    ACPI_ASSERTLOCK;
316
317    /* iterate over processors */
318    for (i = 0; i < cpu_ndevices; i++) {
319	sc = device_get_softc(cpu_devices[i]);
320	if (sc->cpu_p_blk == NULL)
321	    continue;
322
323	/* get the current P_CNT value and disable throttling */
324	p_cnt = CPU_GET_P_CNT(sc);
325	p_cnt &= ~CPU_P_CNT_THT_EN;
326	CPU_SET_P_CNT(sc, p_cnt);
327
328	/* if we're at maximum speed, that's all */
329	if (speed < CPU_MAX_SPEED) {
330
331	    /* mask the old CLK_VAL off and or-in the new value */
332	    clk_val = CPU_MAX_SPEED << cpu_duty_offset;
333	    p_cnt &= ~clk_val;
334	    p_cnt |= (speed << cpu_duty_offset);
335
336	    /* write the new P_CNT value and then enable throttling */
337	    CPU_SET_P_CNT(sc, p_cnt);
338	    p_cnt |= CPU_P_CNT_THT_EN;
339	    CPU_SET_P_CNT(sc, p_cnt);
340	}
341	ACPI_VPRINT(sc->cpu_dev, acpi_device_get_parent_softc(sc->cpu_dev),
342	    "set speed to %d.%d%%\n", CPU_SPEED_PRINTABLE(speed));
343    }
344    cpu_current_state = speed;
345}
346
347/*
348 * Power profile change hook.
349 *
350 * Uses the ACPI lock to avoid reentrancy.
351 */
352static void
353acpi_cpu_power_profile(void *arg)
354{
355    int		state;
356    u_int32_t	new;
357    ACPI_LOCK_DECL;
358
359    state = power_profile_get_state();
360    if (state != POWER_PROFILE_PERFORMANCE &&
361        state != POWER_PROFILE_ECONOMY) {
362	return;
363    }
364
365    ACPI_LOCK;
366
367    switch (state) {
368    case POWER_PROFILE_PERFORMANCE:
369	new = cpu_performance_state;
370	break;
371    case POWER_PROFILE_ECONOMY:
372	new = cpu_economy_state;
373	break;
374    default:
375	new = cpu_current_state;
376	break;
377    }
378
379    if (cpu_current_state != new)
380	acpi_cpu_set_speed(new);
381
382    ACPI_UNLOCK;
383}
384
385/*
386 * Handle changes in the performance/ecomony CPU settings.
387 *
388 * Does not need the ACPI lock (although setting *argp should
389 * probably be atomic).
390 */
391static int
392acpi_cpu_speed_sysctl(SYSCTL_HANDLER_ARGS)
393{
394    u_int32_t	*argp;
395    u_int32_t	arg;
396    int		error;
397
398    argp = (u_int32_t *)oidp->oid_arg1;
399    arg = *argp;
400    error = sysctl_handle_int(oidp, &arg, 0, req);
401
402    /* error or no new value */
403    if ((error != 0) || (req->newptr == NULL))
404	return(error);
405
406    /* range check */
407    if ((arg < 1) || (arg > cpu_max_state))
408	return(EINVAL);
409
410    /* set new value and possibly switch */
411    *argp = arg;
412    acpi_cpu_power_profile(NULL);
413
414    return(0);
415}
416