acpi_cpu.c revision 170214
118334Speter/*- 218334Speter * Copyright (c) 2003-2005 Nate Lawson (SDG) 318334Speter * Copyright (c) 2001 Michael Smith 418334Speter * All rights reserved. 518334Speter * 618334Speter * Redistribution and use in source and binary forms, with or without 718334Speter * modification, are permitted provided that the following conditions 818334Speter * are met: 918334Speter * 1. Redistributions of source code must retain the above copyright 1018334Speter * notice, this list of conditions and the following disclaimer. 1118334Speter * 2. Redistributions in binary form must reproduce the above copyright 1218334Speter * notice, this list of conditions and the following disclaimer in the 1318334Speter * documentation and/or other materials provided with the distribution. 1418334Speter * 1518334Speter * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 1618334Speter * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 1718334Speter * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 1818334Speter * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 1918334Speter * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 2018334Speter * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 2118334Speter * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 2218334Speter * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 2318334Speter * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 2418334Speter * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 2518334Speter * SUCH DAMAGE. 2618334Speter */ 2718334Speter 2818334Speter#include <sys/cdefs.h> 2918334Speter__FBSDID("$FreeBSD: head/sys/dev/acpica/acpi_cpu.c 170214 2007-06-02 20:01:40Z njl $"); 3018334Speter 3118334Speter#include "opt_acpi.h" 3218334Speter#include <sys/param.h> 3318334Speter#include <sys/bus.h> 3418334Speter#include <sys/cpu.h> 3518334Speter#include <sys/kernel.h> 3618334Speter#include <sys/malloc.h> 3718334Speter#include <sys/module.h> 3818334Speter#include <sys/pcpu.h> 3918334Speter#include <sys/power.h> 4018334Speter#include <sys/proc.h> 4118334Speter#include <sys/sbuf.h> 4218334Speter#include <sys/smp.h> 4318334Speter 4418334Speter#include <dev/pci/pcivar.h> 4518334Speter#include <machine/atomic.h> 4618334Speter#include <machine/bus.h> 4718334Speter#include <sys/rman.h> 4818334Speter 4918334Speter#include <contrib/dev/acpica/acpi.h> 5018334Speter#include <dev/acpica/acpivar.h> 5118334Speter 5218334Speter/* 5318334Speter * Support for ACPI Processor devices, including C[1-3] sleep states. 5418334Speter */ 5518334Speter 5618334Speter/* Hooks for the ACPI CA debugging infrastructure */ 5718334Speter#define _COMPONENT ACPI_PROCESSOR 5818334SpeterACPI_MODULE_NAME("PROCESSOR") 5918334Speter 6018334Speterstruct acpi_cx { 6118334Speter struct resource *p_lvlx; /* Register to read to enter state. */ 6218334Speter uint32_t type; /* C1-3 (C4 and up treated as C3). */ 6318334Speter uint32_t trans_lat; /* Transition latency (usec). */ 6418334Speter uint32_t power; /* Power consumed (mW). */ 6518334Speter int res_type; /* Resource type for p_lvlx. */ 6618334Speter}; 6718334Speter#define MAX_CX_STATES 8 6818334Speter 6918334Speterstruct acpi_cpu_softc { 7018334Speter device_t cpu_dev; 7118334Speter ACPI_HANDLE cpu_handle; 7218334Speter struct pcpu *cpu_pcpu; 7318334Speter uint32_t cpu_acpi_id; /* ACPI processor id */ 7418334Speter uint32_t cpu_p_blk; /* ACPI P_BLK location */ 7518334Speter uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */ 7618334Speter struct acpi_cx cpu_cx_states[MAX_CX_STATES]; 7718334Speter int cpu_cx_count; /* Number of valid Cx states. */ 7818334Speter int cpu_prev_sleep;/* Last idle sleep duration. */ 7918334Speter int cpu_features; /* Child driver supported features. */ 8018334Speter /* Runtime state. */ 8118334Speter int cpu_non_c3; /* Index of lowest non-C3 state. */ 8218334Speter int cpu_short_slp; /* Count of < 1us sleeps. */ 8318334Speter u_int cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */ 8418334Speter /* Values for sysctl. */ 8518334Speter struct sysctl_ctx_list cpu_sysctl_ctx; 8618334Speter struct sysctl_oid *cpu_sysctl_tree; 8718334Speter int cpu_cx_lowest; 8818334Speter char cpu_cx_supported[64]; 8918334Speter int cpu_rid; 9018334Speter}; 9118334Speter 9218334Speterstruct acpi_cpu_device { 9318334Speter struct resource_list ad_rl; 9418334Speter}; 9518334Speter 9618334Speter#define CPU_GET_REG(reg, width) \ 9718334Speter (bus_space_read_ ## width(rman_get_bustag((reg)), \ 9818334Speter rman_get_bushandle((reg)), 0)) 9918334Speter#define CPU_SET_REG(reg, width, val) \ 10018334Speter (bus_space_write_ ## width(rman_get_bustag((reg)), \ 10118334Speter rman_get_bushandle((reg)), 0, (val))) 10218334Speter 10318334Speter#define PM_USEC(x) ((x) >> 2) /* ~4 clocks per usec (3.57955 Mhz) */ 10418334Speter 10518334Speter#define ACPI_NOTIFY_CX_STATES 0x81 /* _CST changed. */ 10618334Speter 10718334Speter#define CPU_QUIRK_NO_C3 (1<<0) /* C3-type states are not usable. */ 10818334Speter#define CPU_QUIRK_NO_BM_CTRL (1<<2) /* No bus mastering control. */ 10918334Speter 11018334Speter#define PCI_VENDOR_INTEL 0x8086 11118334Speter#define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */ 11218334Speter#define PCI_REVISION_A_STEP 0 11318334Speter#define PCI_REVISION_B_STEP 1 11418334Speter#define PCI_REVISION_4E 2 11518334Speter#define PCI_REVISION_4M 3 11618334Speter 11718334Speter/* Platform hardware resource information. */ 11818334Speterstatic uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */ 11918334Speterstatic uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */ 12018334Speterstatic int cpu_quirks; /* Indicate any hardware bugs. */ 12118334Speter 12218334Speter/* Runtime state. */ 12318334Speterstatic int cpu_disable_idle; /* Disable entry to idle function */ 12418334Speterstatic int cpu_cx_count; /* Number of valid Cx states */ 12518334Speter 12618334Speter/* Values for sysctl. */ 12718334Speterstatic struct sysctl_ctx_list cpu_sysctl_ctx; 12818334Speterstatic struct sysctl_oid *cpu_sysctl_tree; 12918334Speterstatic int cpu_cx_generic; 13018334Speterstatic int cpu_cx_lowest; 13118334Speter 13218334Speterstatic device_t *cpu_devices; 13318334Speterstatic int cpu_ndevices; 13418334Speterstatic struct acpi_cpu_softc **cpu_softc; 13518334SpeterACPI_SERIAL_DECL(cpu, "ACPI CPU"); 13618334Speter 13718334Speterstatic int acpi_cpu_probe(device_t dev); 13818334Speterstatic int acpi_cpu_attach(device_t dev); 13918334Speterstatic int acpi_pcpu_get_id(uint32_t idx, uint32_t *acpi_id, 14018334Speter uint32_t *cpu_id); 14118334Speterstatic struct resource_list *acpi_cpu_get_rlist(device_t dev, device_t child); 14218334Speterstatic device_t acpi_cpu_add_child(device_t dev, int order, const char *name, 14318334Speter int unit); 14418334Speterstatic int acpi_cpu_read_ivar(device_t dev, device_t child, int index, 14518334Speter uintptr_t *result); 14618334Speterstatic int acpi_cpu_shutdown(device_t dev); 14718334Speterstatic void acpi_cpu_cx_probe(struct acpi_cpu_softc *sc); 14818334Speterstatic void acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc); 14918334Speterstatic int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc); 15018334Speterstatic void acpi_cpu_startup(void *arg); 15118334Speterstatic void acpi_cpu_startup_cx(struct acpi_cpu_softc *sc); 15218334Speterstatic void acpi_cpu_idle(void); 15318334Speterstatic void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context); 15418334Speterstatic int acpi_cpu_quirks(void); 15518334Speterstatic int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS); 15618334Speterstatic int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS); 15718334Speterstatic int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS); 15818334Speter 15918334Speterstatic device_method_t acpi_cpu_methods[] = { 16018334Speter /* Device interface */ 16118334Speter DEVMETHOD(device_probe, acpi_cpu_probe), 16218334Speter DEVMETHOD(device_attach, acpi_cpu_attach), 16318334Speter DEVMETHOD(device_detach, bus_generic_detach), 16418334Speter DEVMETHOD(device_shutdown, acpi_cpu_shutdown), 16518334Speter DEVMETHOD(device_suspend, bus_generic_suspend), 16618334Speter DEVMETHOD(device_resume, bus_generic_resume), 16718334Speter 16818334Speter /* Bus interface */ 16918334Speter DEVMETHOD(bus_add_child, acpi_cpu_add_child), 17018334Speter DEVMETHOD(bus_read_ivar, acpi_cpu_read_ivar), 17118334Speter DEVMETHOD(bus_get_resource_list, acpi_cpu_get_rlist), 17218334Speter DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), 17318334Speter DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), 17418334Speter DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource), 17518334Speter DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource), 17618334Speter DEVMETHOD(bus_driver_added, bus_generic_driver_added), 17718334Speter DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), 17818334Speter DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), 17918334Speter DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 18018334Speter DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 18118334Speter 18218334Speter {0, 0} 18318334Speter}; 18418334Speter 18518334Speterstatic driver_t acpi_cpu_driver = { 18618334Speter "cpu", 18718334Speter acpi_cpu_methods, 18818334Speter sizeof(struct acpi_cpu_softc), 18918334Speter}; 19018334Speter 19118334Speterstatic devclass_t acpi_cpu_devclass; 19218334SpeterDRIVER_MODULE(cpu, acpi, acpi_cpu_driver, acpi_cpu_devclass, 0, 0); 19318334SpeterMODULE_DEPEND(cpu, acpi, 1, 1, 1); 19418334Speter 19518334Speterstatic int 19618334Speteracpi_cpu_probe(device_t dev) 19718334Speter{ 19818334Speter int acpi_id, cpu_id; 19918334Speter ACPI_BUFFER buf; 20018334Speter ACPI_HANDLE handle; 20118334Speter ACPI_OBJECT *obj; 20218334Speter ACPI_STATUS status; 20318334Speter 20418334Speter if (acpi_disabled("cpu") || acpi_get_type(dev) != ACPI_TYPE_PROCESSOR) 20518334Speter return (ENXIO); 20618334Speter 20718334Speter handle = acpi_get_handle(dev); 20818334Speter if (cpu_softc == NULL) 20918334Speter cpu_softc = malloc(sizeof(struct acpi_cpu_softc *) * 21018334Speter (mp_maxid + 1), M_TEMP /* XXX */, M_WAITOK | M_ZERO); 21118334Speter 21218334Speter /* Get our Processor object. */ 21318334Speter buf.Pointer = NULL; 21418334Speter buf.Length = ACPI_ALLOCATE_BUFFER; 21518334Speter status = AcpiEvaluateObject(handle, NULL, NULL, &buf); 21618334Speter if (ACPI_FAILURE(status)) { 21718334Speter device_printf(dev, "probe failed to get Processor obj - %s\n", 21818334Speter AcpiFormatException(status)); 21918334Speter return (ENXIO); 22018334Speter } 22118334Speter obj = (ACPI_OBJECT *)buf.Pointer; 22218334Speter if (obj->Type != ACPI_TYPE_PROCESSOR) { 22318334Speter device_printf(dev, "Processor object has bad type %d\n", obj->Type); 22418334Speter AcpiOsFree(obj); 22518334Speter return (ENXIO); 22618334Speter } 22718334Speter 22818334Speter /* 22918334Speter * Find the processor associated with our unit. We could use the 23018334Speter * ProcId as a key, however, some boxes do not have the same values 23118334Speter * in their Processor object as the ProcId values in the MADT. 23218334Speter */ 23318334Speter acpi_id = obj->Processor.ProcId; 23418334Speter AcpiOsFree(obj); 23518334Speter if (acpi_pcpu_get_id(device_get_unit(dev), &acpi_id, &cpu_id) != 0) 23618334Speter return (ENXIO); 23718334Speter 23818334Speter /* 23918334Speter * Check if we already probed this processor. We scan the bus twice 24018334Speter * so it's possible we've already seen this one. 24118334Speter */ 24218334Speter if (cpu_softc[cpu_id] != NULL) 24318334Speter return (ENXIO); 24418334Speter 24518334Speter /* Mark this processor as in-use and save our derived id for attach. */ 24618334Speter cpu_softc[cpu_id] = (void *)1; 24718334Speter acpi_set_magic(dev, cpu_id); 24818334Speter device_set_desc(dev, "ACPI CPU"); 24918334Speter 25018334Speter return (0); 25118334Speter} 25218334Speter 25318334Speterstatic int 25418334Speteracpi_cpu_attach(device_t dev) 25518334Speter{ 25618334Speter ACPI_BUFFER buf; 25718334Speter ACPI_OBJECT arg, *obj; 25818334Speter ACPI_OBJECT_LIST arglist; 25918334Speter struct pcpu *pcpu_data; 26018334Speter struct acpi_cpu_softc *sc; 26118334Speter struct acpi_softc *acpi_sc; 26218334Speter ACPI_STATUS status; 26318334Speter u_int features; 26418334Speter int cpu_id, drv_count, i; 26518334Speter driver_t **drivers; 26618334Speter uint32_t cap_set[3]; 26718334Speter 26818334Speter ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 26918334Speter 27018334Speter sc = device_get_softc(dev); 27118334Speter sc->cpu_dev = dev; 27218334Speter sc->cpu_handle = acpi_get_handle(dev); 27318334Speter cpu_id = acpi_get_magic(dev); 27418334Speter cpu_softc[cpu_id] = sc; 27518334Speter pcpu_data = pcpu_find(cpu_id); 27618334Speter pcpu_data->pc_device = dev; 27718334Speter sc->cpu_pcpu = pcpu_data; 27818334Speter cpu_smi_cmd = AcpiGbl_FADT.SmiCommand; 27918334Speter cpu_cst_cnt = AcpiGbl_FADT.CstControl; 28018334Speter 28118334Speter buf.Pointer = NULL; 28218334Speter buf.Length = ACPI_ALLOCATE_BUFFER; 28318334Speter status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf); 28418334Speter if (ACPI_FAILURE(status)) { 28518334Speter device_printf(dev, "attach failed to get Processor obj - %s\n", 28618334Speter AcpiFormatException(status)); 28718334Speter return (ENXIO); 28818334Speter } 28918334Speter obj = (ACPI_OBJECT *)buf.Pointer; 29018334Speter sc->cpu_p_blk = obj->Processor.PblkAddress; 29118334Speter sc->cpu_p_blk_len = obj->Processor.PblkLength; 29218334Speter sc->cpu_acpi_id = obj->Processor.ProcId; 29318334Speter AcpiOsFree(obj); 29418334Speter ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n", 29518334Speter device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len)); 29618334Speter 29718334Speter /* 29818334Speter * If this is the first cpu we attach, create and initialize the generic 29918334Speter * resources that will be used by all acpi cpu devices. 30018334Speter */ 30118334Speter if (device_get_unit(dev) == 0) { 30218334Speter /* Assume we won't be using generic Cx mode by default */ 30318334Speter cpu_cx_generic = FALSE; 30418334Speter 30518334Speter /* Install hw.acpi.cpu sysctl tree */ 30618334Speter acpi_sc = acpi_device_get_parent_softc(dev); 30718334Speter sysctl_ctx_init(&cpu_sysctl_ctx); 30818334Speter cpu_sysctl_tree = SYSCTL_ADD_NODE(&cpu_sysctl_ctx, 30918334Speter SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "cpu", 31018334Speter CTLFLAG_RD, 0, "node for CPU children"); 31118334Speter 31218334Speter /* Queue post cpu-probing task handler */ 31318334Speter AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cpu_startup, NULL); 31418334Speter } 31518334Speter 31618334Speter /* 31718334Speter * Before calling any CPU methods, collect child driver feature hints 31818334Speter * and notify ACPI of them. We support unified SMP power control 31918334Speter * so advertise this ourselves. Note this is not the same as independent 32018334Speter * SMP control where each CPU can have different settings. 32118334Speter */ 32218334Speter sc->cpu_features = ACPI_CAP_SMP_SAME | ACPI_CAP_SMP_SAME_C3; 32318334Speter if (devclass_get_drivers(acpi_cpu_devclass, &drivers, &drv_count) == 0) { 32418334Speter for (i = 0; i < drv_count; i++) { 32518334Speter if (ACPI_GET_FEATURES(drivers[i], &features) == 0) 32618334Speter sc->cpu_features |= features; 32718334Speter } 32818334Speter free(drivers, M_TEMP); 32918334Speter } 33018334Speter 33118334Speter /* 33218334Speter * CPU capabilities are specified as a buffer of 32-bit integers: 33318334Speter * revision, count, and one or more capabilities. The revision of 33418334Speter * "1" is not specified anywhere but seems to match Linux. We should 33518334Speter * also support _OSC here. 33618334Speter */ 33718334Speter if (sc->cpu_features) { 33818334Speter arglist.Pointer = &arg; 33918334Speter arglist.Count = 1; 34018334Speter arg.Type = ACPI_TYPE_BUFFER; 34118334Speter arg.Buffer.Length = sizeof(cap_set); 34218334Speter arg.Buffer.Pointer = (uint8_t *)cap_set; 34318334Speter cap_set[0] = 1; /* revision */ 34418334Speter cap_set[1] = 1; /* number of capabilities integers */ 34518334Speter cap_set[2] = sc->cpu_features; 34618334Speter AcpiEvaluateObject(sc->cpu_handle, "_PDC", &arglist, NULL); 34718334Speter } 34818334Speter 34918334Speter /* Probe for Cx state support. */ 35018334Speter acpi_cpu_cx_probe(sc); 35118334Speter 35218334Speter /* Finally, call identify and probe/attach for child devices. */ 35318334Speter bus_generic_probe(dev); 35418334Speter bus_generic_attach(dev); 35518334Speter 35618334Speter return (0); 35718334Speter} 35818334Speter 35918334Speter/* 36018334Speter * Find the nth present CPU and return its pc_cpuid as well as set the 36118334Speter * pc_acpi_id from the most reliable source. 36218334Speter */ 36318334Speterstatic int 36418334Speteracpi_pcpu_get_id(uint32_t idx, uint32_t *acpi_id, uint32_t *cpu_id) 36518334Speter{ 36618334Speter struct pcpu *pcpu_data; 36718334Speter uint32_t i; 36818334Speter 36918334Speter KASSERT(acpi_id != NULL, ("Null acpi_id")); 37018334Speter KASSERT(cpu_id != NULL, ("Null cpu_id")); 37118334Speter for (i = 0; i <= mp_maxid; i++) { 37218334Speter if (CPU_ABSENT(i)) 37318334Speter continue; 37418334Speter pcpu_data = pcpu_find(i); 37518334Speter KASSERT(pcpu_data != NULL, ("no pcpu data for %d", i)); 37618334Speter if (idx-- == 0) { 37718334Speter /* 37818334Speter * If pc_acpi_id was not initialized (e.g., a non-APIC UP box) 37918334Speter * override it with the value from the ASL. Otherwise, if the 38018334Speter * two don't match, prefer the MADT-derived value. Finally, 38118334Speter * return the pc_cpuid to reference this processor. 38218334Speter */ 38318334Speter if (pcpu_data->pc_acpi_id == 0xffffffff) 38418334Speter pcpu_data->pc_acpi_id = *acpi_id; 38518334Speter else if (pcpu_data->pc_acpi_id != *acpi_id) 38618334Speter *acpi_id = pcpu_data->pc_acpi_id; 38718334Speter *cpu_id = pcpu_data->pc_cpuid; 38818334Speter return (0); 38918334Speter } 39018334Speter } 39118334Speter 39218334Speter return (ESRCH); 39318334Speter} 39418334Speter 39518334Speterstatic struct resource_list * 39618334Speteracpi_cpu_get_rlist(device_t dev, device_t child) 39718334Speter{ 39818334Speter struct acpi_cpu_device *ad; 39918334Speter 40018334Speter ad = device_get_ivars(child); 40118334Speter if (ad == NULL) 40218334Speter return (NULL); 40318334Speter return (&ad->ad_rl); 40418334Speter} 40518334Speter 40618334Speterstatic device_t 40718334Speteracpi_cpu_add_child(device_t dev, int order, const char *name, int unit) 40818334Speter{ 40918334Speter struct acpi_cpu_device *ad; 41018334Speter device_t child; 41118334Speter 41218334Speter if ((ad = malloc(sizeof(*ad), M_TEMP, M_NOWAIT | M_ZERO)) == NULL) 41318334Speter return (NULL); 41418334Speter 41518334Speter resource_list_init(&ad->ad_rl); 41618334Speter 41718334Speter child = device_add_child_ordered(dev, order, name, unit); 41818334Speter if (child != NULL) 41918334Speter device_set_ivars(child, ad); 42018334Speter else 42118334Speter free(ad, M_TEMP); 42218334Speter return (child); 42318334Speter} 42418334Speter 42518334Speterstatic int 42618334Speteracpi_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 42718334Speter{ 42818334Speter struct acpi_cpu_softc *sc; 42918334Speter 43018334Speter sc = device_get_softc(dev); 43118334Speter switch (index) { 43218334Speter case ACPI_IVAR_HANDLE: 43318334Speter *result = (uintptr_t)sc->cpu_handle; 43418334Speter break; 43518334Speter case CPU_IVAR_PCPU: 43618334Speter *result = (uintptr_t)sc->cpu_pcpu; 43718334Speter break; 43818334Speter default: 43918334Speter return (ENOENT); 44018334Speter } 44118334Speter return (0); 44218334Speter} 44318334Speter 44418334Speterstatic int 44518334Speteracpi_cpu_shutdown(device_t dev) 44618334Speter{ 44718334Speter ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 44818334Speter 44918334Speter /* Allow children to shutdown first. */ 45018334Speter bus_generic_shutdown(dev); 45118334Speter 45218334Speter /* Disable any entry to the idle function. */ 45318334Speter cpu_disable_idle = TRUE; 45418334Speter 45518334Speter /* Signal and wait for all processors to exit acpi_cpu_idle(). */ 45618334Speter smp_rendezvous(NULL, NULL, NULL, NULL); 45718334Speter 45818334Speter return_VALUE (0); 45918334Speter} 46018334Speter 46118334Speterstatic void 46218334Speteracpi_cpu_cx_probe(struct acpi_cpu_softc *sc) 46318334Speter{ 46418334Speter ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 46518334Speter 46618334Speter /* Use initial sleep value of 1 sec. to start with lowest idle state. */ 46718334Speter sc->cpu_prev_sleep = 1000000; 46818334Speter sc->cpu_cx_lowest = 0; 46918334Speter 47018334Speter /* 47118334Speter * Check for the ACPI 2.0 _CST sleep states object. If we can't find 47218334Speter * any, we'll revert to generic FADT/P_BLK Cx control method which will 47318334Speter * be handled by acpi_cpu_startup. We need to defer to after having 47418334Speter * probed all the cpus in the system before probing for generic Cx 47518334Speter * states as we may already have found cpus with valid _CST packages 47618334Speter */ 47718334Speter if (!cpu_cx_generic && acpi_cpu_cx_cst(sc) != 0) { 47818334Speter /* 47918334Speter * We were unable to find a _CST package for this cpu or there 48018334Speter * was an error parsing it. Switch back to generic mode. 48118334Speter */ 48218334Speter cpu_cx_generic = TRUE; 48318334Speter if (bootverbose) 48418334Speter device_printf(sc->cpu_dev, "switching to generic Cx mode\n"); 48518334Speter } 48618334Speter 48718334Speter /* 48818334Speter * TODO: _CSD Package should be checked here. 48918334Speter */ 49018334Speter} 49118334Speter 49218334Speterstatic void 49318334Speteracpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc) 49418334Speter{ 49518334Speter ACPI_GENERIC_ADDRESS gas; 49618334Speter struct acpi_cx *cx_ptr; 49718334Speter 49818334Speter sc->cpu_cx_count = 0; 49918334Speter cx_ptr = sc->cpu_cx_states; 50018334Speter 50118334Speter /* Use initial sleep value of 1 sec. to start with lowest idle state. */ 50218334Speter sc->cpu_prev_sleep = 1000000; 50318334Speter 50418334Speter /* C1 has been required since just after ACPI 1.0 */ 50518334Speter cx_ptr->type = ACPI_STATE_C1; 50618334Speter cx_ptr->trans_lat = 0; 50718334Speter cx_ptr++; 50818334Speter sc->cpu_cx_count++; 50918334Speter 51018334Speter /* 51118334Speter * The spec says P_BLK must be 6 bytes long. However, some systems 51218334Speter * use it to indicate a fractional set of features present so we 51318334Speter * take 5 as C2. Some may also have a value of 7 to indicate 51418334Speter * another C3 but most use _CST for this (as required) and having 51518334Speter * "only" C1-C3 is not a hardship. 51618334Speter */ 51718334Speter if (sc->cpu_p_blk_len < 5) 51818334Speter return; 51918334Speter 52018334Speter /* Validate and allocate resources for C2 (P_LVL2). */ 52118334Speter gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO; 52218334Speter gas.BitWidth = 8; 52318334Speter if (AcpiGbl_FADT.C2Latency <= 100) { 52418334Speter gas.Address = sc->cpu_p_blk + 4; 52518334Speter acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &sc->cpu_rid, 52618334Speter &gas, &cx_ptr->p_lvlx, RF_SHAREABLE); 52718334Speter if (cx_ptr->p_lvlx != NULL) { 52818334Speter sc->cpu_rid++; 52918334Speter cx_ptr->type = ACPI_STATE_C2; 53018334Speter cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency; 53118334Speter cx_ptr++; 53218334Speter sc->cpu_cx_count++; 53318334Speter } 53418334Speter } 53518334Speter if (sc->cpu_p_blk_len < 6) 53618334Speter return; 53718334Speter 53818334Speter /* Validate and allocate resources for C3 (P_LVL3). */ 53918334Speter if (AcpiGbl_FADT.C3Latency <= 1000) { 54018334Speter gas.Address = sc->cpu_p_blk + 5; 54118334Speter acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &sc->cpu_rid, &gas, 54218334Speter &cx_ptr->p_lvlx, RF_SHAREABLE); 54318334Speter if (cx_ptr->p_lvlx != NULL) { 54418334Speter sc->cpu_rid++; 54518334Speter cx_ptr->type = ACPI_STATE_C3; 54618334Speter cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency; 54718334Speter cx_ptr++; 54818334Speter sc->cpu_cx_count++; 54918334Speter } 55018334Speter } 55118334Speter 55218334Speter /* Update the largest cx_count seen so far */ 55318334Speter if (sc->cpu_cx_count > cpu_cx_count) 55418334Speter cpu_cx_count = sc->cpu_cx_count; 55518334Speter} 55618334Speter 55718334Speter/* 55818334Speter * Parse a _CST package and set up its Cx states. Since the _CST object 55918334Speter * can change dynamically, our notify handler may call this function 56018334Speter * to clean up and probe the new _CST package. 56118334Speter */ 56218334Speterstatic int 56318334Speteracpi_cpu_cx_cst(struct acpi_cpu_softc *sc) 56418334Speter{ 56518334Speter struct acpi_cx *cx_ptr; 56618334Speter ACPI_STATUS status; 56718334Speter ACPI_BUFFER buf; 56818334Speter ACPI_OBJECT *top; 56918334Speter ACPI_OBJECT *pkg; 57018334Speter uint32_t count; 57118334Speter int i; 57218334Speter 57318334Speter ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 57418334Speter 57518334Speter buf.Pointer = NULL; 57618334Speter buf.Length = ACPI_ALLOCATE_BUFFER; 57718334Speter status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf); 57818334Speter if (ACPI_FAILURE(status)) 57918334Speter return (ENXIO); 58018334Speter 58118334Speter /* _CST is a package with a count and at least one Cx package. */ 58218334Speter top = (ACPI_OBJECT *)buf.Pointer; 58318334Speter if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) { 58418334Speter device_printf(sc->cpu_dev, "invalid _CST package\n"); 58518334Speter AcpiOsFree(buf.Pointer); 58618334Speter return (ENXIO); 58718334Speter } 58818334Speter if (count != top->Package.Count - 1) { 58918334Speter device_printf(sc->cpu_dev, "invalid _CST state count (%d != %d)\n", 59018334Speter count, top->Package.Count - 1); 59118334Speter count = top->Package.Count - 1; 59218334Speter } 59318334Speter if (count > MAX_CX_STATES) { 59418334Speter device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count); 59518334Speter count = MAX_CX_STATES; 59618334Speter } 59718334Speter 59818334Speter /* Set up all valid states. */ 59918334Speter sc->cpu_cx_count = 0; 60018334Speter cx_ptr = sc->cpu_cx_states; 60118334Speter for (i = 0; i < count; i++) { 60218334Speter pkg = &top->Package.Elements[i + 1]; 60318334Speter if (!ACPI_PKG_VALID(pkg, 4) || 60418334Speter acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 || 60518334Speter acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 || 60618334Speter acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) { 60718334Speter 60818334Speter device_printf(sc->cpu_dev, "skipping invalid Cx state package\n"); 60918334Speter continue; 61018334Speter } 61118334Speter 61218334Speter /* Validate the state to see if we should use it. */ 61318334Speter switch (cx_ptr->type) { 61418334Speter case ACPI_STATE_C1: 61518334Speter sc->cpu_non_c3 = i; 61618334Speter cx_ptr++; 61718334Speter sc->cpu_cx_count++; 61818334Speter continue; 61918334Speter case ACPI_STATE_C2: 62018334Speter if (cx_ptr->trans_lat > 100) { 62118334Speter ACPI_DEBUG_PRINT((ACPI_DB_INFO, 62218334Speter "acpi_cpu%d: C2[%d] not available.\n", 62318334Speter device_get_unit(sc->cpu_dev), i)); 62418334Speter continue; 62518334Speter } 62618334Speter sc->cpu_non_c3 = i; 62718334Speter break; 62818334Speter case ACPI_STATE_C3: 62918334Speter default: 63018334Speter if (cx_ptr->trans_lat > 1000 || 63118334Speter (cpu_quirks & CPU_QUIRK_NO_C3) != 0) { 63218334Speter 63318334Speter ACPI_DEBUG_PRINT((ACPI_DB_INFO, 63418334Speter "acpi_cpu%d: C3[%d] not available.\n", 63518334Speter device_get_unit(sc->cpu_dev), i)); 63618334Speter continue; 63718334Speter } 63818334Speter break; 63918334Speter } 64018334Speter 64118334Speter#ifdef notyet 64218334Speter /* Free up any previous register. */ 64318334Speter if (cx_ptr->p_lvlx != NULL) { 64418334Speter bus_release_resource(sc->cpu_dev, 0, 0, cx_ptr->p_lvlx); 64518334Speter cx_ptr->p_lvlx = NULL; 64618334Speter } 64718334Speter#endif 64818334Speter 64918334Speter /* Allocate the control register for C2 or C3. */ 65018334Speter acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type, &sc->cpu_rid, 65118334Speter &cx_ptr->p_lvlx, RF_SHAREABLE); 65218334Speter if (cx_ptr->p_lvlx) { 65318334Speter sc->cpu_rid++; 65418334Speter ACPI_DEBUG_PRINT((ACPI_DB_INFO, 65518334Speter "acpi_cpu%d: Got C%d - %d latency\n", 65618334Speter device_get_unit(sc->cpu_dev), cx_ptr->type, 65718334Speter cx_ptr->trans_lat)); 65818334Speter cx_ptr++; 65918334Speter sc->cpu_cx_count++; 66018334Speter } 66118334Speter } 66218334Speter AcpiOsFree(buf.Pointer); 66318334Speter 66418334Speter return (0); 66518334Speter} 66618334Speter 66718334Speter/* 66818334Speter * Call this *after* all CPUs have been attached. 66918334Speter */ 67018334Speterstatic void 67118334Speteracpi_cpu_startup(void *arg) 67218334Speter{ 67318334Speter struct acpi_cpu_softc *sc; 67418334Speter int i; 67518334Speter 67618334Speter /* Get set of CPU devices */ 67718334Speter devclass_get_devices(acpi_cpu_devclass, &cpu_devices, &cpu_ndevices); 67818334Speter 67918334Speter /* 68018334Speter * Setup any quirks that might necessary now that we have probed 68118334Speter * all the CPUs 68218334Speter */ 68318334Speter acpi_cpu_quirks(); 68418334Speter 68518334Speter cpu_cx_count = 0; 68618334Speter if (cpu_cx_generic) { 68718334Speter /* 68818334Speter * We are using generic Cx mode, probe for available Cx states 68918334Speter * for all processors. 69018334Speter */ 69118334Speter for (i = 0; i < cpu_ndevices; i++) { 69218334Speter sc = device_get_softc(cpu_devices[i]); 69318334Speter acpi_cpu_generic_cx_probe(sc); 69418334Speter } 69518334Speter 69618334Speter /* 69718334Speter * Find the highest Cx state common to all CPUs 69818334Speter * in the system, taking quirks into account. 69918334Speter */ 70018334Speter for (i = 0; i < cpu_ndevices; i++) { 70118334Speter sc = device_get_softc(cpu_devices[i]); 70218334Speter if (sc->cpu_cx_count < cpu_cx_count) 70318334Speter cpu_cx_count = sc->cpu_cx_count; 70418334Speter } 70518334Speter } else { 70618334Speter /* 70718334Speter * We are using _CST mode, remove C3 state if necessary. 70818334Speter * Update the largest Cx state supported in the global cpu_cx_count. 70918334Speter * It will be used in the global Cx sysctl handler. 71018334Speter * As we now know for sure that we will be using _CST mode 71118334Speter * install our notify handler. 71218334Speter */ 71318334Speter for (i = 0; i < cpu_ndevices; i++) { 71418334Speter sc = device_get_softc(cpu_devices[i]); 71518334Speter if (cpu_quirks && CPU_QUIRK_NO_C3) { 71618334Speter sc->cpu_cx_count = sc->cpu_non_c3 + 1; 71718334Speter } 71818334Speter if (sc->cpu_cx_count > cpu_cx_count) 71918334Speter cpu_cx_count = sc->cpu_cx_count; 72018334Speter AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY, 72118334Speter acpi_cpu_notify, sc); 72218334Speter } 72318334Speter } 72418334Speter 72518334Speter /* Perform Cx final initialization. */ 72618334Speter for (i = 0; i < cpu_ndevices; i++) { 72718334Speter sc = device_get_softc(cpu_devices[i]); 72818334Speter acpi_cpu_startup_cx(sc); 72918334Speter } 73018334Speter 73118334Speter /* Add a sysctl handler to handle global Cx lowest setting */ 73218334Speter SYSCTL_ADD_PROC(&cpu_sysctl_ctx, SYSCTL_CHILDREN(cpu_sysctl_tree), 73318334Speter OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW, 73418334Speter NULL, 0, acpi_cpu_global_cx_lowest_sysctl, "A", 73518334Speter "Global lowest Cx sleep state to use"); 73618334Speter 73718334Speter /* Take over idling from cpu_idle_default(). */ 73818334Speter cpu_cx_lowest = 0; 73918334Speter cpu_disable_idle = FALSE; 74018334Speter cpu_idle_hook = acpi_cpu_idle; 74118334Speter} 74218334Speter 74318334Speterstatic void 74418334Speteracpi_cpu_startup_cx(struct acpi_cpu_softc *sc) 74518334Speter{ 74618334Speter struct sbuf sb; 74718334Speter int i; 74818334Speter 74918334Speter /* 75018334Speter * Set up the list of Cx states 75118334Speter */ 75218334Speter sc->cpu_non_c3 = 0; 75318334Speter sbuf_new(&sb, sc->cpu_cx_supported, sizeof(sc->cpu_cx_supported), 75418334Speter SBUF_FIXEDLEN); 75518334Speter for (i = 0; i < sc->cpu_cx_count; i++) { 75618334Speter sbuf_printf(&sb, "C%d/%d ", i + 1, sc->cpu_cx_states[i].trans_lat); 75718334Speter if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) 75818334Speter sc->cpu_non_c3 = i; 75918334Speter } 76018334Speter sbuf_trim(&sb); 76118334Speter sbuf_finish(&sb); 76218334Speter 76318334Speter SYSCTL_ADD_STRING(&sc->cpu_sysctl_ctx, 76418334Speter SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), 76518334Speter OID_AUTO, "cx_supported", CTLFLAG_RD, 76618334Speter sc->cpu_cx_supported, 0, 76718334Speter "Cx/microsecond values for supported Cx states"); 76818334Speter SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx, 76918334Speter SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), 77018334Speter OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW, 77118334Speter (void *)sc, 0, acpi_cpu_cx_lowest_sysctl, "A", 77218334Speter "lowest Cx sleep state to use"); 77318334Speter SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx, 77418334Speter SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), 77518334Speter OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD, 77618334Speter (void *)sc, 0, acpi_cpu_usage_sysctl, "A", 77718334Speter "percent usage for each Cx state"); 77818334Speter 77918334Speter#ifdef notyet 78018334Speter /* Signal platform that we can handle _CST notification. */ 78118334Speter if (!cpu_cx_generic && cpu_cst_cnt != 0) { 78218334Speter ACPI_LOCK(acpi); 78318334Speter AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8); 78418334Speter ACPI_UNLOCK(acpi); 78518334Speter } 78618334Speter#endif 78718334Speter} 78818334Speter 78918334Speter/* 79018334Speter * Idle the CPU in the lowest state possible. This function is called with 79118334Speter * interrupts disabled. Note that once it re-enables interrupts, a task 79218334Speter * switch can occur so do not access shared data (i.e. the softc) after 79318334Speter * interrupts are re-enabled. 79418334Speter */ 79518334Speterstatic void 79618334Speteracpi_cpu_idle() 79718334Speter{ 79818334Speter struct acpi_cpu_softc *sc; 79918334Speter struct acpi_cx *cx_next; 80018334Speter uint32_t start_time, end_time; 80118334Speter int bm_active, cx_next_idx, i; 80218334Speter 80318334Speter /* If disabled, return immediately. */ 80418334Speter if (cpu_disable_idle) { 80518334Speter ACPI_ENABLE_IRQS(); 80618334Speter return; 80718334Speter } 80818334Speter 80918334Speter /* 81018334Speter * Look up our CPU id to get our softc. If it's NULL, we'll use C1 81118334Speter * since there is no ACPI processor object for this CPU. This occurs 81218334Speter * for logical CPUs in the HTT case. 81318334Speter */ 81418334Speter sc = cpu_softc[PCPU_GET(cpuid)]; 81518334Speter if (sc == NULL) { 81618334Speter acpi_cpu_c1(); 81718334Speter return; 81818334Speter } 81918334Speter 82018334Speter /* 82118334Speter * If we slept 100 us or more, use the lowest Cx state. Otherwise, 82218334Speter * find the lowest state that has a latency less than or equal to 82318334Speter * the length of our last sleep. 82418334Speter */ 82518334Speter cx_next_idx = sc->cpu_cx_lowest; 82618334Speter if (sc->cpu_prev_sleep < 100) { 82718334Speter /* 82818334Speter * If we sleep too short all the time, this system may not implement 82918334Speter * C2/3 correctly (i.e. reads return immediately). In this case, 83018334Speter * back off and use the next higher level. 83118334Speter * It seems that when you have a dual core cpu (like the Intel Core Duo) 83218334Speter * that both cores will get out of C3 state as soon as one of them 83318334Speter * requires it. This breaks the sleep detection logic as the sleep 83418334Speter * counter is local to each cpu. Disable the sleep logic for now as a 83518334Speter * workaround if there's more than one CPU. The right fix would probably 83618334Speter * be to add quirks for system that don't really support C3 state. 83718334Speter */ 83818334Speter if (mp_ncpus < 2 && sc->cpu_prev_sleep <= 1) { 83918334Speter sc->cpu_short_slp++; 84018334Speter if (sc->cpu_short_slp == 1000 && sc->cpu_cx_lowest != 0) { 84118334Speter if (sc->cpu_non_c3 == sc->cpu_cx_lowest && sc->cpu_non_c3 != 0) 84218334Speter sc->cpu_non_c3--; 84318334Speter sc->cpu_cx_lowest--; 84418334Speter sc->cpu_short_slp = 0; 84518334Speter device_printf(sc->cpu_dev, 84618334Speter "too many short sleeps, backing off to C%d\n", 84718334Speter sc->cpu_cx_lowest + 1); 84818334Speter } 84918334Speter } else 85018334Speter sc->cpu_short_slp = 0; 85118334Speter 85218334Speter for (i = sc->cpu_cx_lowest; i >= 0; i--) 85318334Speter if (sc->cpu_cx_states[i].trans_lat <= sc->cpu_prev_sleep) { 85418334Speter cx_next_idx = i; 85518334Speter break; 85618334Speter } 85718334Speter } 85818334Speter 85918334Speter /* 86018334Speter * Check for bus master activity. If there was activity, clear 86118334Speter * the bit and use the lowest non-C3 state. Note that the USB 86218334Speter * driver polling for new devices keeps this bit set all the 86318334Speter * time if USB is loaded. 86418334Speter */ 86518334Speter if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) { 86618334Speter AcpiGetRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active); 86718334Speter if (bm_active != 0) { 86818334Speter AcpiSetRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1); 86918334Speter cx_next_idx = min(cx_next_idx, sc->cpu_non_c3); 87018334Speter } 87118334Speter } 87218334Speter 87318334Speter /* Select the next state and update statistics. */ 87418334Speter cx_next = &sc->cpu_cx_states[cx_next_idx]; 87518334Speter sc->cpu_cx_stats[cx_next_idx]++; 87618334Speter KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep")); 87718334Speter 87818334Speter /* 87918334Speter * Execute HLT (or equivalent) and wait for an interrupt. We can't 88018334Speter * calculate the time spent in C1 since the place we wake up is an 88118334Speter * ISR. Assume we slept one quantum and return. 88218334Speter */ 88318334Speter if (cx_next->type == ACPI_STATE_C1) { 88418334Speter sc->cpu_prev_sleep = 1000000 / hz; 88518334Speter acpi_cpu_c1(); 88618334Speter return; 88718334Speter } 88818334Speter 88918334Speter /* 89018334Speter * For C3, disable bus master arbitration and enable bus master wake 89118334Speter * if BM control is available, otherwise flush the CPU cache. 89218334Speter */ 89318334Speter if (cx_next->type == ACPI_STATE_C3) { 89418334Speter if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) { 89518334Speter AcpiSetRegister(ACPI_BITREG_ARB_DISABLE, 1); 89618334Speter AcpiSetRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); 89718334Speter } else 89818334Speter ACPI_FLUSH_CPU_CACHE(); 89918334Speter } 90018334Speter 90118334Speter /* 90218334Speter * Read from P_LVLx to enter C2(+), checking time spent asleep. 90318334Speter * Use the ACPI timer for measuring sleep time. Since we need to 90418334Speter * get the time very close to the CPU start/stop clock logic, this 90518334Speter * is the only reliable time source. 90618334Speter */ 90718334Speter AcpiHwLowLevelRead(32, &start_time, &AcpiGbl_FADT.XPmTimerBlock); 90818334Speter CPU_GET_REG(cx_next->p_lvlx, 1); 90918334Speter 91018334Speter /* 91118334Speter * Read the end time twice. Since it may take an arbitrary time 91218334Speter * to enter the idle state, the first read may be executed before 91318334Speter * the processor has stopped. Doing it again provides enough 91418334Speter * margin that we are certain to have a correct value. 91518334Speter */ 91618334Speter AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT.XPmTimerBlock); 91718334Speter AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT.XPmTimerBlock); 91818334Speter 91918334Speter /* Enable bus master arbitration and disable bus master wakeup. */ 92018334Speter if (cx_next->type == ACPI_STATE_C3 && 92118334Speter (cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) { 92218334Speter AcpiSetRegister(ACPI_BITREG_ARB_DISABLE, 0); 92318334Speter AcpiSetRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); 92418334Speter } 92518334Speter ACPI_ENABLE_IRQS(); 92618334Speter 92718334Speter /* Find the actual time asleep in microseconds, minus overhead. */ 92818334Speter end_time = acpi_TimerDelta(end_time, start_time); 92918334Speter sc->cpu_prev_sleep = PM_USEC(end_time) - cx_next->trans_lat; 93018334Speter} 93118334Speter 93218334Speter/* 93318334Speter * Re-evaluate the _CST object when we are notified that it changed. 93418334Speter * 93518334Speter * XXX Re-evaluation disabled until locking is done. 93618334Speter */ 93718334Speterstatic void 93818334Speteracpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context) 93918334Speter{ 94018334Speter struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)context; 94118334Speter 94218334Speter if (notify != ACPI_NOTIFY_CX_STATES) 94318334Speter return; 94418334Speter 94518334Speter device_printf(sc->cpu_dev, "Cx states changed\n"); 94618334Speter /* acpi_cpu_cx_cst(sc); */ 94718334Speter} 94818334Speter 94918334Speterstatic int 95018334Speteracpi_cpu_quirks(void) 95118334Speter{ 95218334Speter device_t acpi_dev; 95318334Speter 95418334Speter ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 95518334Speter 95618334Speter /* 95718334Speter * Bus mastering arbitration control is needed to keep caches coherent 95818334Speter * while sleeping in C3. If it's not present but a working flush cache 95918334Speter * instruction is present, flush the caches before entering C3 instead. 96018334Speter * Otherwise, just disable C3 completely. 96118334Speter */ 96218334Speter if (AcpiGbl_FADT.Pm2ControlBlock == 0 || 96318334Speter AcpiGbl_FADT.Pm2ControlLength == 0) { 96418334Speter if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) && 96518334Speter (AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) { 96618334Speter cpu_quirks |= CPU_QUIRK_NO_BM_CTRL; 96718334Speter ACPI_DEBUG_PRINT((ACPI_DB_INFO, 96818334Speter "acpi_cpu: no BM control, using flush cache method\n")); 96918334Speter } else { 97018334Speter cpu_quirks |= CPU_QUIRK_NO_C3; 97118334Speter ACPI_DEBUG_PRINT((ACPI_DB_INFO, 97218334Speter "acpi_cpu: no BM control, C3 not available\n")); 97318334Speter } 97418334Speter } 97518334Speter 97618334Speter /* 97718334Speter * If we are using generic Cx mode, C3 on multiple CPUs requires using 97818334Speter * the expensive flush cache instruction. 97918334Speter */ 98018334Speter if (cpu_cx_generic && mp_ncpus > 1) { 98118334Speter cpu_quirks |= CPU_QUIRK_NO_BM_CTRL; 98218334Speter ACPI_DEBUG_PRINT((ACPI_DB_INFO, 98318334Speter "acpi_cpu: SMP, using flush cache mode for C3\n")); 98418334Speter } 98518334Speter 98618334Speter /* Look for various quirks of the PIIX4 part. */ 98718334Speter acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3); 98818334Speter if (acpi_dev != NULL) { 98918334Speter switch (pci_get_revid(acpi_dev)) { 99018334Speter /* 99118334Speter * Disable C3 support for all PIIX4 chipsets. Some of these parts 99218334Speter * do not report the BMIDE status to the BM status register and 99318334Speter * others have a livelock bug if Type-F DMA is enabled. Linux 99418334Speter * works around the BMIDE bug by reading the BM status directly 99518334Speter * but we take the simpler approach of disabling C3 for these 99618334Speter * parts. 99718334Speter * 99818334Speter * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA 99918334Speter * Livelock") from the January 2002 PIIX4 specification update. 100018334Speter * Applies to all PIIX4 models. 100118334Speter */ 100218334Speter case PCI_REVISION_4E: 100318334Speter case PCI_REVISION_4M: 100418334Speter cpu_quirks |= CPU_QUIRK_NO_C3; 100518334Speter ACPI_DEBUG_PRINT((ACPI_DB_INFO, 100618334Speter "acpi_cpu: working around PIIX4 bug, disabling C3\n")); 100718334Speter break; 100818334Speter default: 100918334Speter break; 101018334Speter } 101118334Speter } 101218334Speter 101318334Speter return (0); 101418334Speter} 101518334Speter 101618334Speterstatic int 101718334Speteracpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS) 101818334Speter{ 101918334Speter struct acpi_cpu_softc *sc; 102018334Speter struct sbuf sb; 102118334Speter char buf[128]; 102218334Speter int i; 102318334Speter uintmax_t fract, sum, whole; 102418334Speter 102518334Speter sc = (struct acpi_cpu_softc *) arg1; 102618334Speter sum = 0; 102718334Speter for (i = 0; i < sc->cpu_cx_count; i++) 102818334Speter sum += sc->cpu_cx_stats[i]; 102918334Speter sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); 103018334Speter for (i = 0; i < sc->cpu_cx_count; i++) { 103118334Speter if (sum > 0) { 103218334Speter whole = (uintmax_t)sc->cpu_cx_stats[i] * 100; 103318334Speter fract = (whole % sum) * 100; 103418334Speter sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum), 103518334Speter (u_int)(fract / sum)); 103618334Speter } else 103718334Speter sbuf_printf(&sb, "0%% "); 103818334Speter } 103918334Speter sbuf_trim(&sb); 104018334Speter sbuf_finish(&sb); 104118334Speter sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 104218334Speter sbuf_delete(&sb); 104318334Speter 104418334Speter return (0); 104518334Speter} 104618334Speter 104718334Speterstatic int 104818334Speteracpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc, int val) 104918334Speter{ 105018334Speter int i; 105118334Speter 105218334Speter ACPI_SERIAL_ASSERT(cpu); 105318334Speter sc->cpu_cx_lowest = val; 105418334Speter 105518334Speter /* If not disabling, cache the new lowest non-C3 state. */ 105618334Speter sc->cpu_non_c3 = 0; 105718334Speter for (i = sc->cpu_cx_lowest; i >= 0; i--) { 105818334Speter if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) { 105918334Speter sc->cpu_non_c3 = i; 106018334Speter break; 106118334Speter } 106218334Speter } 106318334Speter 106418334Speter /* Reset the statistics counters. */ 106518334Speter bzero(sc->cpu_cx_stats, sizeof(sc->cpu_cx_stats)); 106618334Speter return (0); 106718334Speter} 106818334Speter 106918334Speterstatic int 107018334Speteracpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS) 107118334Speter{ 107218334Speter struct acpi_cpu_softc *sc; 107318334Speter char state[8]; 107418334Speter int val, error; 107518334Speter 107618334Speter sc = (struct acpi_cpu_softc *) arg1; 107718334Speter snprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest + 1); 107818334Speter error = sysctl_handle_string(oidp, state, sizeof(state), req); 107918334Speter if (error != 0 || req->newptr == NULL) 108018334Speter return (error); 108118334Speter if (strlen(state) < 2 || toupper(state[0]) != 'C') 108218334Speter return (EINVAL); 108318334Speter val = (int) strtol(state + 1, NULL, 10) - 1; 108418334Speter if (val < 0 || val > sc->cpu_cx_count - 1) 108518334Speter return (EINVAL); 108618334Speter 108718334Speter ACPI_SERIAL_BEGIN(cpu); 108818334Speter acpi_cpu_set_cx_lowest(sc, val); 108918334Speter ACPI_SERIAL_END(cpu); 109018334Speter 109118334Speter return (0); 109218334Speter} 109318334Speter 109418334Speterstatic int 109518334Speteracpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS) 109618334Speter{ 109718334Speter struct acpi_cpu_softc *sc; 109818334Speter char state[8]; 109918334Speter int val, error, i; 110018334Speter 110118334Speter snprintf(state, sizeof(state), "C%d", cpu_cx_lowest + 1); 110218334Speter error = sysctl_handle_string(oidp, state, sizeof(state), req); 110318334Speter if (error != 0 || req->newptr == NULL) 110418334Speter return (error); 110518334Speter if (strlen(state) < 2 || toupper(state[0]) != 'C') 110618334Speter return (EINVAL); 110718334Speter val = (int) strtol(state + 1, NULL, 10) - 1; 110818334Speter if (val < 0 || val > cpu_cx_count - 1) 110918334Speter return (EINVAL); 111018334Speter cpu_cx_lowest = val; 111118334Speter 111218334Speter /* Update the new lowest useable Cx state for all CPUs. */ 111318334Speter ACPI_SERIAL_BEGIN(cpu); 111418334Speter for (i = 0; i < cpu_ndevices; i++) { 111518334Speter sc = device_get_softc(cpu_devices[i]); 111618334Speter acpi_cpu_set_cx_lowest(sc, val); 111718334Speter } 111818334Speter ACPI_SERIAL_END(cpu); 111918334Speter 112018334Speter return (0); 112118334Speter} 112218334Speter