acpi_cpu.c revision 165875
1/*- 2 * Copyright (c) 2003-2005 Nate Lawson (SDG) 3 * Copyright (c) 2001 Michael Smith 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: head/sys/dev/acpica/acpi_cpu.c 165875 2007-01-07 21:53:42Z njl $"); 30 31#include "opt_acpi.h" 32#include <sys/param.h> 33#include <sys/bus.h> 34#include <sys/cpu.h> 35#include <sys/kernel.h> 36#include <sys/malloc.h> 37#include <sys/module.h> 38#include <sys/pcpu.h> 39#include <sys/power.h> 40#include <sys/proc.h> 41#include <sys/sbuf.h> 42#include <sys/smp.h> 43 44#include <dev/pci/pcivar.h> 45#include <machine/atomic.h> 46#include <machine/bus.h> 47#include <sys/rman.h> 48 49#include <contrib/dev/acpica/acpi.h> 50#include <dev/acpica/acpivar.h> 51 52/* 53 * Support for ACPI Processor devices, including C[1-3] sleep states. 54 */ 55 56/* Hooks for the ACPI CA debugging infrastructure */ 57#define _COMPONENT ACPI_PROCESSOR 58ACPI_MODULE_NAME("PROCESSOR") 59 60struct acpi_cx { 61 struct resource *p_lvlx; /* Register to read to enter state. */ 62 uint32_t type; /* C1-3 (C4 and up treated as C3). */ 63 uint32_t trans_lat; /* Transition latency (usec). */ 64 uint32_t power; /* Power consumed (mW). */ 65 int res_type; /* Resource type for p_lvlx. */ 66}; 67#define MAX_CX_STATES 8 68 69struct acpi_cpu_softc { 70 device_t cpu_dev; 71 ACPI_HANDLE cpu_handle; 72 struct pcpu *cpu_pcpu; 73 uint32_t cpu_acpi_id; /* ACPI processor id */ 74 uint32_t cpu_p_blk; /* ACPI P_BLK location */ 75 uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */ 76 struct acpi_cx cpu_cx_states[MAX_CX_STATES]; 77 int cpu_cx_count; /* Number of valid Cx states. */ 78 int cpu_prev_sleep;/* Last idle sleep duration. */ 79 int cpu_features; /* Child driver supported features. */ 80 /* Runtime state. */ 81 int cpu_non_c3; /* Index of lowest non-C3 state. */ 82 int cpu_short_slp; /* Count of < 1us sleeps. */ 83 u_int cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */ 84 /* Values for sysctl. */ 85 struct sysctl_ctx_list cpu_sysctl_ctx; 86 struct sysctl_oid *cpu_sysctl_tree; 87 int cpu_cx_lowest; 88 char cpu_cx_supported[64]; 89 int cpu_rid; 90}; 91 92struct acpi_cpu_device { 93 struct resource_list ad_rl; 94}; 95 96#define CPU_GET_REG(reg, width) \ 97 (bus_space_read_ ## width(rman_get_bustag((reg)), \ 98 rman_get_bushandle((reg)), 0)) 99#define CPU_SET_REG(reg, width, val) \ 100 (bus_space_write_ ## width(rman_get_bustag((reg)), \ 101 rman_get_bushandle((reg)), 0, (val))) 102 103#define PM_USEC(x) ((x) >> 2) /* ~4 clocks per usec (3.57955 Mhz) */ 104 105#define ACPI_NOTIFY_CX_STATES 0x81 /* _CST changed. */ 106 107#define CPU_QUIRK_NO_C3 (1<<0) /* C3-type states are not usable. */ 108#define CPU_QUIRK_NO_BM_CTRL (1<<2) /* No bus mastering control. */ 109 110#define PCI_VENDOR_INTEL 0x8086 111#define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */ 112#define PCI_REVISION_A_STEP 0 113#define PCI_REVISION_B_STEP 1 114#define PCI_REVISION_4E 2 115#define PCI_REVISION_4M 3 116 117/* Platform hardware resource information. */ 118static uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */ 119static uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */ 120static int cpu_quirks; /* Indicate any hardware bugs. */ 121 122/* Runtime state. */ 123static int cpu_disable_idle; /* Disable entry to idle function */ 124static int cpu_cx_count; /* Number of valid Cx states */ 125 126/* Values for sysctl. */ 127static struct sysctl_ctx_list cpu_sysctl_ctx; 128static struct sysctl_oid *cpu_sysctl_tree; 129static int cpu_cx_generic; 130static int cpu_cx_lowest; 131 132static device_t *cpu_devices; 133static int cpu_ndevices; 134static struct acpi_cpu_softc **cpu_softc; 135ACPI_SERIAL_DECL(cpu, "ACPI CPU"); 136 137static int acpi_cpu_probe(device_t dev); 138static int acpi_cpu_attach(device_t dev); 139static int acpi_pcpu_get_id(uint32_t idx, uint32_t *acpi_id, 140 uint32_t *cpu_id); 141static struct resource_list *acpi_cpu_get_rlist(device_t dev, device_t child); 142static device_t acpi_cpu_add_child(device_t dev, int order, const char *name, 143 int unit); 144static int acpi_cpu_read_ivar(device_t dev, device_t child, int index, 145 uintptr_t *result); 146static int acpi_cpu_shutdown(device_t dev); 147static void acpi_cpu_cx_probe(struct acpi_cpu_softc *sc); 148static void acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc); 149static int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc); 150static void acpi_cpu_startup(void *arg); 151static void acpi_cpu_startup_cx(struct acpi_cpu_softc *sc); 152static void acpi_cpu_idle(void); 153static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context); 154static int acpi_cpu_quirks(void); 155static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS); 156static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS); 157static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS); 158 159static device_method_t acpi_cpu_methods[] = { 160 /* Device interface */ 161 DEVMETHOD(device_probe, acpi_cpu_probe), 162 DEVMETHOD(device_attach, acpi_cpu_attach), 163 DEVMETHOD(device_detach, bus_generic_detach), 164 DEVMETHOD(device_shutdown, acpi_cpu_shutdown), 165 DEVMETHOD(device_suspend, bus_generic_suspend), 166 DEVMETHOD(device_resume, bus_generic_resume), 167 168 /* Bus interface */ 169 DEVMETHOD(bus_add_child, acpi_cpu_add_child), 170 DEVMETHOD(bus_read_ivar, acpi_cpu_read_ivar), 171 DEVMETHOD(bus_get_resource_list, acpi_cpu_get_rlist), 172 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), 173 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), 174 DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource), 175 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource), 176 DEVMETHOD(bus_driver_added, bus_generic_driver_added), 177 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), 178 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), 179 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 180 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 181 182 {0, 0} 183}; 184 185static driver_t acpi_cpu_driver = { 186 "cpu", 187 acpi_cpu_methods, 188 sizeof(struct acpi_cpu_softc), 189}; 190 191static devclass_t acpi_cpu_devclass; 192DRIVER_MODULE(cpu, acpi, acpi_cpu_driver, acpi_cpu_devclass, 0, 0); 193MODULE_DEPEND(cpu, acpi, 1, 1, 1); 194 195static int 196acpi_cpu_probe(device_t dev) 197{ 198 int acpi_id, cpu_id; 199 ACPI_BUFFER buf; 200 ACPI_HANDLE handle; 201 ACPI_OBJECT *obj; 202 ACPI_STATUS status; 203 204 if (acpi_disabled("cpu") || acpi_get_type(dev) != ACPI_TYPE_PROCESSOR) 205 return (ENXIO); 206 207 handle = acpi_get_handle(dev); 208 if (cpu_softc == NULL) 209 cpu_softc = malloc(sizeof(struct acpi_cpu_softc *) * 210 (mp_maxid + 1), M_TEMP /* XXX */, M_WAITOK | M_ZERO); 211 212 /* Get our Processor object. */ 213 buf.Pointer = NULL; 214 buf.Length = ACPI_ALLOCATE_BUFFER; 215 status = AcpiEvaluateObject(handle, NULL, NULL, &buf); 216 if (ACPI_FAILURE(status)) { 217 device_printf(dev, "probe failed to get Processor obj - %s\n", 218 AcpiFormatException(status)); 219 return (ENXIO); 220 } 221 obj = (ACPI_OBJECT *)buf.Pointer; 222 if (obj->Type != ACPI_TYPE_PROCESSOR) { 223 device_printf(dev, "Processor object has bad type %d\n", obj->Type); 224 AcpiOsFree(obj); 225 return (ENXIO); 226 } 227 228 /* 229 * Find the processor associated with our unit. We could use the 230 * ProcId as a key, however, some boxes do not have the same values 231 * in their Processor object as the ProcId values in the MADT. 232 */ 233 acpi_id = obj->Processor.ProcId; 234 AcpiOsFree(obj); 235 if (acpi_pcpu_get_id(device_get_unit(dev), &acpi_id, &cpu_id) != 0) 236 return (ENXIO); 237 238 /* 239 * Check if we already probed this processor. We scan the bus twice 240 * so it's possible we've already seen this one. 241 */ 242 if (cpu_softc[cpu_id] != NULL) 243 return (ENXIO); 244 245 /* Mark this processor as in-use and save our derived id for attach. */ 246 cpu_softc[cpu_id] = (void *)1; 247 acpi_set_magic(dev, cpu_id); 248 device_set_desc(dev, "ACPI CPU"); 249 250 return (0); 251} 252 253static int 254acpi_cpu_attach(device_t dev) 255{ 256 ACPI_BUFFER buf; 257 ACPI_OBJECT arg, *obj; 258 ACPI_OBJECT_LIST arglist; 259 struct pcpu *pcpu_data; 260 struct acpi_cpu_softc *sc; 261 struct acpi_softc *acpi_sc; 262 ACPI_STATUS status; 263 u_int features; 264 int cpu_id, drv_count, i; 265 driver_t **drivers; 266 uint32_t cap_set[3]; 267 268 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 269 270 sc = device_get_softc(dev); 271 sc->cpu_dev = dev; 272 sc->cpu_handle = acpi_get_handle(dev); 273 cpu_id = acpi_get_magic(dev); 274 cpu_softc[cpu_id] = sc; 275 pcpu_data = pcpu_find(cpu_id); 276 pcpu_data->pc_device = dev; 277 sc->cpu_pcpu = pcpu_data; 278 cpu_smi_cmd = AcpiGbl_FADT->SmiCmd; 279 cpu_cst_cnt = AcpiGbl_FADT->CstCnt; 280 281 buf.Pointer = NULL; 282 buf.Length = ACPI_ALLOCATE_BUFFER; 283 status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf); 284 if (ACPI_FAILURE(status)) { 285 device_printf(dev, "attach failed to get Processor obj - %s\n", 286 AcpiFormatException(status)); 287 return (ENXIO); 288 } 289 obj = (ACPI_OBJECT *)buf.Pointer; 290 sc->cpu_p_blk = obj->Processor.PblkAddress; 291 sc->cpu_p_blk_len = obj->Processor.PblkLength; 292 sc->cpu_acpi_id = obj->Processor.ProcId; 293 AcpiOsFree(obj); 294 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n", 295 device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len)); 296 297 /* 298 * If this is the first cpu we attach, create and initialize the generic 299 * resources that will be used by all acpi cpu devices. 300 */ 301 if (device_get_unit(dev) == 0) { 302 /* Assume we won't be using generic Cx mode by default */ 303 cpu_cx_generic = FALSE; 304 305 /* Install hw.acpi.cpu sysctl tree */ 306 acpi_sc = acpi_device_get_parent_softc(dev); 307 sysctl_ctx_init(&cpu_sysctl_ctx); 308 cpu_sysctl_tree = SYSCTL_ADD_NODE(&cpu_sysctl_ctx, 309 SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "cpu", 310 CTLFLAG_RD, 0, "node for CPU children"); 311 312 /* Queue post cpu-probing task handler */ 313 AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_cpu_startup, NULL); 314 } 315 316 /* 317 * Before calling any CPU methods, collect child driver feature hints 318 * and notify ACPI of them. We support unified SMP power control 319 * so advertise this ourselves. Note this is not the same as independent 320 * SMP control where each CPU can have different settings. 321 */ 322 sc->cpu_features = ACPI_CAP_SMP_SAME | ACPI_CAP_SMP_SAME_C3; 323 if (devclass_get_drivers(acpi_cpu_devclass, &drivers, &drv_count) == 0) { 324 for (i = 0; i < drv_count; i++) { 325 if (ACPI_GET_FEATURES(drivers[i], &features) == 0) 326 sc->cpu_features |= features; 327 } 328 free(drivers, M_TEMP); 329 } 330 331 /* 332 * CPU capabilities are specified as a buffer of 32-bit integers: 333 * revision, count, and one or more capabilities. The revision of 334 * "1" is not specified anywhere but seems to match Linux. We should 335 * also support _OSC here. 336 */ 337 if (sc->cpu_features) { 338 arglist.Pointer = &arg; 339 arglist.Count = 1; 340 arg.Type = ACPI_TYPE_BUFFER; 341 arg.Buffer.Length = sizeof(cap_set); 342 arg.Buffer.Pointer = (uint8_t *)cap_set; 343 cap_set[0] = 1; /* revision */ 344 cap_set[1] = 1; /* number of capabilities integers */ 345 cap_set[2] = sc->cpu_features; 346 AcpiEvaluateObject(sc->cpu_handle, "_PDC", &arglist, NULL); 347 } 348 349 /* Probe for Cx state support. */ 350 acpi_cpu_cx_probe(sc); 351 352 /* Finally, call identify and probe/attach for child devices. */ 353 bus_generic_probe(dev); 354 bus_generic_attach(dev); 355 356 return (0); 357} 358 359/* 360 * Find the nth present CPU and return its pc_cpuid as well as set the 361 * pc_acpi_id from the most reliable source. 362 */ 363static int 364acpi_pcpu_get_id(uint32_t idx, uint32_t *acpi_id, uint32_t *cpu_id) 365{ 366 struct pcpu *pcpu_data; 367 uint32_t i; 368 369 KASSERT(acpi_id != NULL, ("Null acpi_id")); 370 KASSERT(cpu_id != NULL, ("Null cpu_id")); 371 for (i = 0; i <= mp_maxid; i++) { 372 if (CPU_ABSENT(i)) 373 continue; 374 pcpu_data = pcpu_find(i); 375 KASSERT(pcpu_data != NULL, ("no pcpu data for %d", i)); 376 if (idx-- == 0) { 377 /* 378 * If pc_acpi_id was not initialized (e.g., a non-APIC UP box) 379 * override it with the value from the ASL. Otherwise, if the 380 * two don't match, prefer the MADT-derived value. Finally, 381 * return the pc_cpuid to reference this processor. 382 */ 383 if (pcpu_data->pc_acpi_id == 0xffffffff) 384 pcpu_data->pc_acpi_id = *acpi_id; 385 else if (pcpu_data->pc_acpi_id != *acpi_id) 386 *acpi_id = pcpu_data->pc_acpi_id; 387 *cpu_id = pcpu_data->pc_cpuid; 388 return (0); 389 } 390 } 391 392 return (ESRCH); 393} 394 395static struct resource_list * 396acpi_cpu_get_rlist(device_t dev, device_t child) 397{ 398 struct acpi_cpu_device *ad; 399 400 ad = device_get_ivars(child); 401 if (ad == NULL) 402 return (NULL); 403 return (&ad->ad_rl); 404} 405 406static device_t 407acpi_cpu_add_child(device_t dev, int order, const char *name, int unit) 408{ 409 struct acpi_cpu_device *ad; 410 device_t child; 411 412 if ((ad = malloc(sizeof(*ad), M_TEMP, M_NOWAIT | M_ZERO)) == NULL) 413 return (NULL); 414 415 resource_list_init(&ad->ad_rl); 416 417 child = device_add_child_ordered(dev, order, name, unit); 418 if (child != NULL) 419 device_set_ivars(child, ad); 420 else 421 free(ad, M_TEMP); 422 return (child); 423} 424 425static int 426acpi_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 427{ 428 struct acpi_cpu_softc *sc; 429 430 sc = device_get_softc(dev); 431 switch (index) { 432 case ACPI_IVAR_HANDLE: 433 *result = (uintptr_t)sc->cpu_handle; 434 break; 435 case CPU_IVAR_PCPU: 436 *result = (uintptr_t)sc->cpu_pcpu; 437 break; 438 default: 439 return (ENOENT); 440 } 441 return (0); 442} 443 444static int 445acpi_cpu_shutdown(device_t dev) 446{ 447 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 448 449 /* Allow children to shutdown first. */ 450 bus_generic_shutdown(dev); 451 452 /* Disable any entry to the idle function. */ 453 cpu_disable_idle = TRUE; 454 455 /* Signal and wait for all processors to exit acpi_cpu_idle(). */ 456 smp_rendezvous(NULL, NULL, NULL, NULL); 457 458 return_VALUE (0); 459} 460 461static void 462acpi_cpu_cx_probe(struct acpi_cpu_softc *sc) 463{ 464 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 465 466 /* Use initial sleep value of 1 sec. to start with lowest idle state. */ 467 sc->cpu_prev_sleep = 1000000; 468 sc->cpu_cx_lowest = 0; 469 470 /* 471 * Check for the ACPI 2.0 _CST sleep states object. If we can't find 472 * any, we'll revert to generic FADT/P_BLK Cx control method which will 473 * be handled by acpi_cpu_startup. We need to defer to after having 474 * probed all the cpus in the system before probing for generic Cx 475 * states as we may already have found cpus with valid _CST packages 476 */ 477 if (!cpu_cx_generic && acpi_cpu_cx_cst(sc) != 0) { 478 /* 479 * We were unable to find a _CST package for this cpu or there 480 * was an error parsing it. Switch back to generic mode. 481 */ 482 cpu_cx_generic = TRUE; 483 device_printf(sc->cpu_dev, "Switching to generic Cx mode\n"); 484 } 485 486 /* 487 * TODO: _CSD Package should be checked here. 488 */ 489} 490 491static void 492acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc) 493{ 494 ACPI_GENERIC_ADDRESS gas; 495 struct acpi_cx *cx_ptr; 496 497 sc->cpu_cx_count = 0; 498 cx_ptr = sc->cpu_cx_states; 499 500 /* Use initial sleep value of 1 sec. to start with lowest idle state. */ 501 sc->cpu_prev_sleep = 1000000; 502 503 /* C1 has been required since just after ACPI 1.0 */ 504 cx_ptr->type = ACPI_STATE_C1; 505 cx_ptr->trans_lat = 0; 506 cx_ptr++; 507 sc->cpu_cx_count++; 508 509 /* 510 * The spec says P_BLK must be 6 bytes long. However, some systems 511 * use it to indicate a fractional set of features present so we 512 * take 5 as C2. Some may also have a value of 7 to indicate 513 * another C3 but most use _CST for this (as required) and having 514 * "only" C1-C3 is not a hardship. 515 */ 516 if (sc->cpu_p_blk_len < 5) 517 return; 518 519 /* Validate and allocate resources for C2 (P_LVL2). */ 520 gas.AddressSpaceId = ACPI_ADR_SPACE_SYSTEM_IO; 521 gas.RegisterBitWidth = 8; 522 if (AcpiGbl_FADT->Plvl2Lat <= 100) { 523 gas.Address = sc->cpu_p_blk + 4; 524 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &sc->cpu_rid, 525 &gas, &cx_ptr->p_lvlx, RF_SHAREABLE); 526 if (cx_ptr->p_lvlx != NULL) { 527 sc->cpu_rid++; 528 cx_ptr->type = ACPI_STATE_C2; 529 cx_ptr->trans_lat = AcpiGbl_FADT->Plvl2Lat; 530 cx_ptr++; 531 sc->cpu_cx_count++; 532 } 533 } 534 if (sc->cpu_p_blk_len < 6) 535 return; 536 537 /* Validate and allocate resources for C3 (P_LVL3). */ 538 if (AcpiGbl_FADT->Plvl3Lat <= 1000) { 539 gas.Address = sc->cpu_p_blk + 5; 540 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &sc->cpu_rid, &gas, 541 &cx_ptr->p_lvlx, RF_SHAREABLE); 542 if (cx_ptr->p_lvlx != NULL) { 543 sc->cpu_rid++; 544 cx_ptr->type = ACPI_STATE_C3; 545 cx_ptr->trans_lat = AcpiGbl_FADT->Plvl3Lat; 546 cx_ptr++; 547 sc->cpu_cx_count++; 548 } 549 } 550 551 /* Update the largest cx_count seen so far */ 552 if (sc->cpu_cx_count > cpu_cx_count) 553 cpu_cx_count = sc->cpu_cx_count; 554} 555 556/* 557 * Parse a _CST package and set up its Cx states. Since the _CST object 558 * can change dynamically, our notify handler may call this function 559 * to clean up and probe the new _CST package. 560 */ 561static int 562acpi_cpu_cx_cst(struct acpi_cpu_softc *sc) 563{ 564 struct acpi_cx *cx_ptr; 565 ACPI_STATUS status; 566 ACPI_BUFFER buf; 567 ACPI_OBJECT *top; 568 ACPI_OBJECT *pkg; 569 uint32_t count; 570 int i; 571 572 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 573 574 buf.Pointer = NULL; 575 buf.Length = ACPI_ALLOCATE_BUFFER; 576 status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf); 577 if (ACPI_FAILURE(status)) { 578 device_printf(sc->cpu_dev, "Unable to find _CST method\n"); 579 return (ENXIO); 580 } 581 582 /* _CST is a package with a count and at least one Cx package. */ 583 top = (ACPI_OBJECT *)buf.Pointer; 584 if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) { 585 device_printf(sc->cpu_dev, "Invalid _CST package\n"); 586 AcpiOsFree(buf.Pointer); 587 return (ENXIO); 588 } 589 if (count != top->Package.Count - 1) { 590 device_printf(sc->cpu_dev, "Invalid _CST state count (%d != %d)\n", 591 count, top->Package.Count - 1); 592 count = top->Package.Count - 1; 593 } 594 if (count > MAX_CX_STATES) { 595 device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count); 596 count = MAX_CX_STATES; 597 } 598 599 /* Set up all valid states. */ 600 sc->cpu_cx_count = 0; 601 cx_ptr = sc->cpu_cx_states; 602 for (i = 0; i < count; i++) { 603 pkg = &top->Package.Elements[i + 1]; 604 if (!ACPI_PKG_VALID(pkg, 4) || 605 acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 || 606 acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 || 607 acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) { 608 609 device_printf(sc->cpu_dev, "skipping invalid Cx state package\n"); 610 continue; 611 } 612 613 /* Validate the state to see if we should use it. */ 614 switch (cx_ptr->type) { 615 case ACPI_STATE_C1: 616 sc->cpu_non_c3 = i; 617 cx_ptr++; 618 sc->cpu_cx_count++; 619 continue; 620 case ACPI_STATE_C2: 621 if (cx_ptr->trans_lat > 100) { 622 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 623 "acpi_cpu%d: C2[%d] not available.\n", 624 device_get_unit(sc->cpu_dev), i)); 625 continue; 626 } 627 sc->cpu_non_c3 = i; 628 break; 629 case ACPI_STATE_C3: 630 default: 631 if (cx_ptr->trans_lat > 1000 || 632 (cpu_quirks & CPU_QUIRK_NO_C3) != 0) { 633 634 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 635 "acpi_cpu%d: C3[%d] not available.\n", 636 device_get_unit(sc->cpu_dev), i)); 637 continue; 638 } 639 break; 640 } 641 642#ifdef notyet 643 /* Free up any previous register. */ 644 if (cx_ptr->p_lvlx != NULL) { 645 bus_release_resource(sc->cpu_dev, 0, 0, cx_ptr->p_lvlx); 646 cx_ptr->p_lvlx = NULL; 647 } 648#endif 649 650 /* Allocate the control register for C2 or C3. */ 651 acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type, &sc->cpu_rid, 652 &cx_ptr->p_lvlx, RF_SHAREABLE); 653 if (cx_ptr->p_lvlx) { 654 sc->cpu_rid++; 655 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 656 "acpi_cpu%d: Got C%d - %d latency\n", 657 device_get_unit(sc->cpu_dev), cx_ptr->type, 658 cx_ptr->trans_lat)); 659 cx_ptr++; 660 sc->cpu_cx_count++; 661 } 662 } 663 AcpiOsFree(buf.Pointer); 664 665 return (0); 666} 667 668/* 669 * Call this *after* all CPUs have been attached. 670 */ 671static void 672acpi_cpu_startup(void *arg) 673{ 674 struct acpi_cpu_softc *sc; 675 int i; 676 677 /* Get set of CPU devices */ 678 devclass_get_devices(acpi_cpu_devclass, &cpu_devices, &cpu_ndevices); 679 680 /* 681 * Setup any quirks that might necessary now that we have probed 682 * all the CPUs 683 */ 684 acpi_cpu_quirks(); 685 686 cpu_cx_count = 0; 687 if (cpu_cx_generic) { 688 /* 689 * We are using generic Cx mode, probe for available Cx states 690 * for all processors. 691 */ 692 for (i = 0; i < cpu_ndevices; i++) { 693 sc = device_get_softc(cpu_devices[i]); 694 acpi_cpu_generic_cx_probe(sc); 695 } 696 697 /* 698 * Find the highest Cx state common to all CPUs 699 * in the system, taking quirks into account. 700 */ 701 for (i = 0; i < cpu_ndevices; i++) { 702 sc = device_get_softc(cpu_devices[i]); 703 if (sc->cpu_cx_count < cpu_cx_count) 704 cpu_cx_count = sc->cpu_cx_count; 705 } 706 } else { 707 /* 708 * We are using _CST mode, remove C3 state if necessary. 709 * Update the largest Cx state supported in the global cpu_cx_count. 710 * It will be used in the global Cx sysctl handler. 711 * As we now know for sure that we will be using _CST mode 712 * install our notify handler. 713 */ 714 for (i = 0; i < cpu_ndevices; i++) { 715 sc = device_get_softc(cpu_devices[i]); 716 if (cpu_quirks && CPU_QUIRK_NO_C3) { 717 sc->cpu_cx_count = sc->cpu_non_c3 + 1; 718 } 719 if (sc->cpu_cx_count > cpu_cx_count) 720 cpu_cx_count = sc->cpu_cx_count; 721 AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY, 722 acpi_cpu_notify, sc); 723 } 724 } 725 726 /* Perform Cx final initialization. */ 727 for (i = 0; i < cpu_ndevices; i++) { 728 sc = device_get_softc(cpu_devices[i]); 729 acpi_cpu_startup_cx(sc); 730 } 731 732 /* Add a sysctl handler to handle global Cx lowest setting */ 733 SYSCTL_ADD_PROC(&cpu_sysctl_ctx, SYSCTL_CHILDREN(cpu_sysctl_tree), 734 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW, 735 NULL, 0, acpi_cpu_global_cx_lowest_sysctl, "A", 736 "Global lowest Cx sleep state to use"); 737 738 /* Take over idling from cpu_idle_default(). */ 739 cpu_cx_lowest = 0; 740 cpu_disable_idle = FALSE; 741 cpu_idle_hook = acpi_cpu_idle; 742} 743 744static void 745acpi_cpu_startup_cx(struct acpi_cpu_softc *sc) 746{ 747 struct sbuf sb; 748 int i; 749 750 /* 751 * Set up the list of Cx states 752 */ 753 sc->cpu_non_c3 = 0; 754 sbuf_new(&sb, sc->cpu_cx_supported, sizeof(sc->cpu_cx_supported), 755 SBUF_FIXEDLEN); 756 for (i = 0; i < sc->cpu_cx_count; i++) { 757 sbuf_printf(&sb, "C%d/%d ", i + 1, sc->cpu_cx_states[i].trans_lat); 758 if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) 759 sc->cpu_non_c3 = i; 760 } 761 sbuf_trim(&sb); 762 sbuf_finish(&sb); 763 764 SYSCTL_ADD_STRING(&sc->cpu_sysctl_ctx, 765 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), 766 OID_AUTO, "cx_supported", CTLFLAG_RD, 767 sc->cpu_cx_supported, 0, 768 "Cx/microsecond values for supported Cx states"); 769 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx, 770 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), 771 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW, 772 (void *)sc, 0, acpi_cpu_cx_lowest_sysctl, "A", 773 "lowest Cx sleep state to use"); 774 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx, 775 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), 776 OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD, 777 (void *)sc, 0, acpi_cpu_usage_sysctl, "A", 778 "percent usage for each Cx state"); 779 780#ifdef notyet 781 /* Signal platform that we can handle _CST notification. */ 782 if (!cpu_cx_generic && cpu_cst_cnt != 0) { 783 ACPI_LOCK(acpi); 784 AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8); 785 ACPI_UNLOCK(acpi); 786 } 787#endif 788} 789 790/* 791 * Idle the CPU in the lowest state possible. This function is called with 792 * interrupts disabled. Note that once it re-enables interrupts, a task 793 * switch can occur so do not access shared data (i.e. the softc) after 794 * interrupts are re-enabled. 795 */ 796static void 797acpi_cpu_idle() 798{ 799 struct acpi_cpu_softc *sc; 800 struct acpi_cx *cx_next; 801 uint32_t start_time, end_time; 802 int bm_active, cx_next_idx, i; 803 804 /* If disabled, return immediately. */ 805 if (cpu_disable_idle) { 806 ACPI_ENABLE_IRQS(); 807 return; 808 } 809 810 /* 811 * Look up our CPU id to get our softc. If it's NULL, we'll use C1 812 * since there is no ACPI processor object for this CPU. This occurs 813 * for logical CPUs in the HTT case. 814 */ 815 sc = cpu_softc[PCPU_GET(cpuid)]; 816 if (sc == NULL) { 817 acpi_cpu_c1(); 818 return; 819 } 820 821 /* 822 * If we slept 100 us or more, use the lowest Cx state. Otherwise, 823 * find the lowest state that has a latency less than or equal to 824 * the length of our last sleep. 825 */ 826 cx_next_idx = sc->cpu_cx_lowest; 827 if (sc->cpu_prev_sleep < 100) { 828 /* 829 * If we sleep too short all the time, this system may not implement 830 * C2/3 correctly (i.e. reads return immediately). In this case, 831 * back off and use the next higher level. 832 * It seems that when you have a dual core cpu (like the Intel Core Duo) 833 * that both cores will get out of C3 state as soon as one of them 834 * requires it. This breaks the sleep detection logic as the sleep 835 * counter is local to each cpu. Disable the sleep logic for now as a 836 * workaround if there's more than one CPU. The right fix would probably 837 * be to add quirks for system that don't really support C3 state. 838 */ 839 if (mp_ncpus < 2 && sc->cpu_prev_sleep <= 1) { 840 sc->cpu_short_slp++; 841 if (sc->cpu_short_slp == 1000 && sc->cpu_cx_lowest != 0) { 842 if (sc->cpu_non_c3 == sc->cpu_cx_lowest && sc->cpu_non_c3 != 0) 843 sc->cpu_non_c3--; 844 sc->cpu_cx_lowest--; 845 sc->cpu_short_slp = 0; 846 device_printf(sc->cpu_dev, 847 "too many short sleeps, backing off to C%d\n", 848 sc->cpu_cx_lowest + 1); 849 } 850 } else 851 sc->cpu_short_slp = 0; 852 853 for (i = sc->cpu_cx_lowest; i >= 0; i--) 854 if (sc->cpu_cx_states[i].trans_lat <= sc->cpu_prev_sleep) { 855 cx_next_idx = i; 856 break; 857 } 858 } 859 860 /* 861 * Check for bus master activity. If there was activity, clear 862 * the bit and use the lowest non-C3 state. Note that the USB 863 * driver polling for new devices keeps this bit set all the 864 * time if USB is loaded. 865 */ 866 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) { 867 AcpiGetRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active, 868 ACPI_MTX_DO_NOT_LOCK); 869 if (bm_active != 0) { 870 AcpiSetRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1, 871 ACPI_MTX_DO_NOT_LOCK); 872 cx_next_idx = min(cx_next_idx, sc->cpu_non_c3); 873 } 874 } 875 876 /* Select the next state and update statistics. */ 877 cx_next = &sc->cpu_cx_states[cx_next_idx]; 878 sc->cpu_cx_stats[cx_next_idx]++; 879 KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep")); 880 881 /* 882 * Execute HLT (or equivalent) and wait for an interrupt. We can't 883 * calculate the time spent in C1 since the place we wake up is an 884 * ISR. Assume we slept one quantum and return. 885 */ 886 if (cx_next->type == ACPI_STATE_C1) { 887 sc->cpu_prev_sleep = 1000000 / hz; 888 acpi_cpu_c1(); 889 return; 890 } 891 892 /* 893 * For C3, disable bus master arbitration and enable bus master wake 894 * if BM control is available, otherwise flush the CPU cache. 895 */ 896 if (cx_next->type == ACPI_STATE_C3) { 897 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) { 898 AcpiSetRegister(ACPI_BITREG_ARB_DISABLE, 1, ACPI_MTX_DO_NOT_LOCK); 899 AcpiSetRegister(ACPI_BITREG_BUS_MASTER_RLD, 1, 900 ACPI_MTX_DO_NOT_LOCK); 901 } else 902 ACPI_FLUSH_CPU_CACHE(); 903 } 904 905 /* 906 * Read from P_LVLx to enter C2(+), checking time spent asleep. 907 * Use the ACPI timer for measuring sleep time. Since we need to 908 * get the time very close to the CPU start/stop clock logic, this 909 * is the only reliable time source. 910 */ 911 AcpiHwLowLevelRead(32, &start_time, &AcpiGbl_FADT->XPmTmrBlk); 912 CPU_GET_REG(cx_next->p_lvlx, 1); 913 914 /* 915 * Read the end time twice. Since it may take an arbitrary time 916 * to enter the idle state, the first read may be executed before 917 * the processor has stopped. Doing it again provides enough 918 * margin that we are certain to have a correct value. 919 */ 920 AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT->XPmTmrBlk); 921 AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT->XPmTmrBlk); 922 923 /* Enable bus master arbitration and disable bus master wakeup. */ 924 if (cx_next->type == ACPI_STATE_C3 && 925 (cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) { 926 AcpiSetRegister(ACPI_BITREG_ARB_DISABLE, 0, ACPI_MTX_DO_NOT_LOCK); 927 AcpiSetRegister(ACPI_BITREG_BUS_MASTER_RLD, 0, ACPI_MTX_DO_NOT_LOCK); 928 } 929 ACPI_ENABLE_IRQS(); 930 931 /* Find the actual time asleep in microseconds, minus overhead. */ 932 end_time = acpi_TimerDelta(end_time, start_time); 933 sc->cpu_prev_sleep = PM_USEC(end_time) - cx_next->trans_lat; 934} 935 936/* 937 * Re-evaluate the _CST object when we are notified that it changed. 938 * 939 * XXX Re-evaluation disabled until locking is done. 940 */ 941static void 942acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context) 943{ 944 struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)context; 945 946 if (notify != ACPI_NOTIFY_CX_STATES) 947 return; 948 949 device_printf(sc->cpu_dev, "Cx states changed\n"); 950 /* acpi_cpu_cx_cst(sc); */ 951} 952 953static int 954acpi_cpu_quirks(void) 955{ 956 device_t acpi_dev; 957 958 /* 959 * Bus mastering arbitration control is needed to keep caches coherent 960 * while sleeping in C3. If it's not present but a working flush cache 961 * instruction is present, flush the caches before entering C3 instead. 962 * Otherwise, just disable C3 completely. 963 */ 964 if (AcpiGbl_FADT->V1_Pm2CntBlk == 0 || AcpiGbl_FADT->Pm2CntLen == 0) { 965 if (AcpiGbl_FADT->WbInvd && AcpiGbl_FADT->WbInvdFlush == 0) { 966 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL; 967 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 968 "acpi_cpu%d: no BM control, using flush cache method\n", 969 device_get_unit(sc->cpu_dev))); 970 } else { 971 cpu_quirks |= CPU_QUIRK_NO_C3; 972 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 973 "acpi_cpu%d: no BM control, C3 not available\n", 974 device_get_unit(sc->cpu_dev))); 975 } 976 } 977 978 /* 979 * If we are using generic Cx mode, C3 on multiple CPUs requires using 980 * the expensive flush cache instruction. 981 */ 982 if (cpu_cx_generic && mp_ncpus > 1) 983 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL; 984 985 /* Look for various quirks of the PIIX4 part. */ 986 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3); 987 if (acpi_dev != NULL) { 988 switch (pci_get_revid(acpi_dev)) { 989 /* 990 * Disable C3 support for all PIIX4 chipsets. Some of these parts 991 * do not report the BMIDE status to the BM status register and 992 * others have a livelock bug if Type-F DMA is enabled. Linux 993 * works around the BMIDE bug by reading the BM status directly 994 * but we take the simpler approach of disabling C3 for these 995 * parts. 996 * 997 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA 998 * Livelock") from the January 2002 PIIX4 specification update. 999 * Applies to all PIIX4 models. 1000 */ 1001 case PCI_REVISION_4E: 1002 case PCI_REVISION_4M: 1003 cpu_quirks |= CPU_QUIRK_NO_C3; 1004 break; 1005 default: 1006 break; 1007 } 1008 } 1009 1010 return (0); 1011} 1012 1013static int 1014acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS) 1015{ 1016 struct acpi_cpu_softc *sc; 1017 struct sbuf sb; 1018 char buf[128]; 1019 int i; 1020 uintmax_t fract, sum, whole; 1021 1022 sc = (struct acpi_cpu_softc *) arg1; 1023 sum = 0; 1024 for (i = 0; i < sc->cpu_cx_count; i++) 1025 sum += sc->cpu_cx_stats[i]; 1026 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); 1027 for (i = 0; i < sc->cpu_cx_count; i++) { 1028 if (sum > 0) { 1029 whole = (uintmax_t)sc->cpu_cx_stats[i] * 100; 1030 fract = (whole % sum) * 100; 1031 sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum), 1032 (u_int)(fract / sum)); 1033 } else 1034 sbuf_printf(&sb, "0%% "); 1035 } 1036 sbuf_trim(&sb); 1037 sbuf_finish(&sb); 1038 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 1039 sbuf_delete(&sb); 1040 1041 return (0); 1042} 1043 1044static int 1045acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS) 1046{ 1047 struct acpi_cpu_softc *sc; 1048 char state[8]; 1049 int val, error, i; 1050 1051 sc = (struct acpi_cpu_softc *) arg1; 1052 snprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest + 1); 1053 error = sysctl_handle_string(oidp, state, sizeof(state), req); 1054 if (error != 0 || req->newptr == NULL) 1055 return (error); 1056 if (strlen(state) < 2 || toupper(state[0]) != 'C') 1057 return (EINVAL); 1058 val = (int) strtol(state + 1, NULL, 10) - 1; 1059 if (val < 0 || val > sc->cpu_cx_count - 1) 1060 return (EINVAL); 1061 1062 ACPI_SERIAL_BEGIN(cpu); 1063 sc->cpu_cx_lowest = val; 1064 1065 /* If not disabling, cache the new lowest non-C3 state. */ 1066 sc->cpu_non_c3 = 0; 1067 for (i = sc->cpu_cx_lowest; i >= 0; i--) { 1068 if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) { 1069 sc->cpu_non_c3 = i; 1070 break; 1071 } 1072 } 1073 1074 /* Reset the statistics counters. */ 1075 bzero(sc->cpu_cx_stats, sizeof(sc->cpu_cx_stats)); 1076 ACPI_SERIAL_END(cpu); 1077 1078 return (0); 1079} 1080 1081static int 1082acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS) 1083{ 1084 struct acpi_cpu_softc *sc; 1085 char state[8]; 1086 int val, error, i, j; 1087 1088 snprintf(state, sizeof(state), "C%d", cpu_cx_lowest + 1); 1089 error = sysctl_handle_string(oidp, state, sizeof(state), req); 1090 if (error != 0 || req->newptr == NULL) 1091 return (error); 1092 if (strlen(state) < 2 || toupper(state[0]) != 'C') 1093 return (EINVAL); 1094 val = (int) strtol(state + 1, NULL, 10) - 1; 1095 if (val < 0 || val > cpu_cx_count - 1) 1096 return (EINVAL); 1097 1098 cpu_cx_lowest = val; 1099 1100 /* 1101 * Update the new lowest useable Cx state for all CPUs 1102 */ 1103 ACPI_SERIAL_BEGIN(cpu); 1104 for (i = 0; i < cpu_ndevices; i++) { 1105 sc = device_get_softc(cpu_devices[i]); 1106 sc->cpu_cx_lowest = cpu_cx_lowest; 1107 sc->cpu_non_c3 = 0; 1108 for (j = sc->cpu_cx_lowest; j >= 0; j++) { 1109 if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) { 1110 sc->cpu_non_c3 = i; 1111 break; 1112 } 1113 } 1114 1115 /* Reset the statistics counters. */ 1116 bzero(sc->cpu_cx_stats, sizeof(sc->cpu_cx_stats)); 1117 } 1118 ACPI_SERIAL_END(cpu); 1119 1120 return (0); 1121} 1122