acpi_cpu.c revision 123325
1/*- 2 * Copyright (c) 2003 Nate Lawson (SDG) 3 * Copyright (c) 2001 Michael Smith 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: head/sys/dev/acpica/acpi_cpu.c 123325 2003-12-09 03:01:54Z njl $"); 30 31#include "opt_acpi.h" 32#include <sys/param.h> 33#include <sys/bus.h> 34#include <sys/kernel.h> 35#include <sys/malloc.h> 36#include <sys/pcpu.h> 37#include <sys/power.h> 38#include <sys/proc.h> 39#include <sys/sbuf.h> 40#include <sys/smp.h> 41 42#include <dev/pci/pcivar.h> 43#include <machine/atomic.h> 44#include <machine/bus.h> 45#ifdef __ia64__ 46#include <machine/pal.h> 47#endif 48#include <sys/rman.h> 49 50#include "acpi.h" 51#include <dev/acpica/acpivar.h> 52 53/* 54 * Support for ACPI Processor devices, including ACPI 2.0 throttling 55 * and C[1-3] sleep states. 56 * 57 * TODO: implement scans of all CPUs to be sure all Cx states are 58 * equivalent. 59 */ 60 61/* Hooks for the ACPI CA debugging infrastructure */ 62#define _COMPONENT ACPI_PROCESSOR 63ACPI_MODULE_NAME("PROCESSOR") 64 65struct acpi_cx { 66 struct resource *p_lvlx; /* Register to read to enter state. */ 67 uint32_t type; /* C1-3 (C4 and up treated as C3). */ 68 uint32_t trans_lat; /* Transition latency (usec). */ 69 uint32_t power; /* Power consumed (mW). */ 70}; 71#define MAX_CX_STATES 8 72 73struct acpi_cx_stats { 74 int long_slp; /* Count of sleeps >= trans_lat. */ 75 int short_slp; /* Count of sleeps < trans_lat. */ 76}; 77 78struct acpi_cpu_softc { 79 device_t cpu_dev; 80 ACPI_HANDLE cpu_handle; 81 uint32_t acpi_id; /* ACPI processor id */ 82 uint32_t cpu_p_blk; /* ACPI P_BLK location */ 83 uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */ 84 struct resource *cpu_p_cnt; /* Throttling control register */ 85 struct acpi_cx cpu_cx_states[MAX_CX_STATES]; 86 int cpu_cx_count; /* Number of valid Cx states. */ 87}; 88 89#define CPU_GET_REG(reg, width) \ 90 (bus_space_read_ ## width(rman_get_bustag((reg)), \ 91 rman_get_bushandle((reg)), 0)) 92#define CPU_SET_REG(reg, width, val) \ 93 (bus_space_write_ ## width(rman_get_bustag((reg)), \ 94 rman_get_bushandle((reg)), 0, (val))) 95 96/* 97 * Speeds are stored in counts, from 1 - CPU_MAX_SPEED, and 98 * reported to the user in tenths of a percent. 99 */ 100static uint32_t cpu_duty_offset; 101static uint32_t cpu_duty_width; 102#define CPU_MAX_SPEED (1 << cpu_duty_width) 103#define CPU_SPEED_PERCENT(x) ((1000 * (x)) / CPU_MAX_SPEED) 104#define CPU_SPEED_PRINTABLE(x) (CPU_SPEED_PERCENT(x) / 10), \ 105 (CPU_SPEED_PERCENT(x) % 10) 106#define CPU_P_CNT_THT_EN (1<<4) 107#define PM_USEC(x) ((x) >> 2) /* ~4 clocks per usec (3.57955 Mhz) */ 108 109#define ACPI_CPU_NOTIFY_PERF_STATES 0x80 /* _PSS changed. */ 110#define ACPI_CPU_NOTIFY_CX_STATES 0x81 /* _CST changed. */ 111 112#define CPU_QUIRK_NO_C3 0x0001 /* C3-type states are not usable. */ 113#define CPU_QUIRK_NO_THROTTLE 0x0002 /* Throttling is not usable. */ 114 115#define PCI_VENDOR_INTEL 0x8086 116#define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */ 117#define PCI_REVISION_A_STEP 0 118#define PCI_REVISION_B_STEP 1 119#define PCI_REVISION_4E 2 120#define PCI_REVISION_4M 3 121 122/* Platform hardware resource information. */ 123static uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */ 124static uint8_t cpu_pstate_cnt;/* Register to take over throttling. */ 125static uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */ 126static uint32_t cpu_rid; /* Driver-wide resource id. */ 127static uint32_t cpu_quirks; /* Indicate any hardware bugs. */ 128 129/* Runtime state. */ 130static int cpu_cx_count; /* Number of valid states */ 131static uint32_t cpu_cx_next; /* State to use for next sleep. */ 132static uint32_t cpu_non_c3; /* Index of lowest non-C3 state. */ 133static struct acpi_cx_stats cpu_cx_stats[MAX_CX_STATES]; 134static int cpu_idle_busy; /* Count of CPUs in acpi_cpu_idle. */ 135 136/* Values for sysctl. */ 137static uint32_t cpu_current_state; 138static uint32_t cpu_performance_state; 139static uint32_t cpu_economy_state; 140static uint32_t cpu_max_state; 141static int cpu_cx_lowest; 142static char cpu_cx_supported[64]; 143 144static device_t *cpu_devices; 145static int cpu_ndevices; 146static struct acpi_cpu_softc **cpu_softc; 147 148static struct sysctl_ctx_list acpi_cpu_sysctl_ctx; 149static struct sysctl_oid *acpi_cpu_sysctl_tree; 150 151static int acpi_cpu_probe(device_t dev); 152static int acpi_cpu_attach(device_t dev); 153static int acpi_pcpu_get_id(uint32_t idx, uint32_t *acpi_id, 154 uint32_t *cpu_id); 155static int acpi_cpu_shutdown(device_t dev); 156static int acpi_cpu_throttle_probe(struct acpi_cpu_softc *sc); 157static int acpi_cpu_cx_probe(struct acpi_cpu_softc *sc); 158static int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc); 159static void acpi_cpu_startup(void *arg); 160static void acpi_cpu_startup_throttling(void); 161static void acpi_cpu_startup_cx(void); 162static void acpi_cpu_throttle_set(uint32_t speed); 163static void acpi_cpu_idle(void); 164static void acpi_cpu_c1(void); 165static void acpi_pm_ticksub(uint32_t *end, const uint32_t *start); 166static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context); 167static int acpi_cpu_quirks(struct acpi_cpu_softc *sc); 168static void acpi_cpu_power_profile(void *arg); 169static int acpi_cpu_throttle_sysctl(SYSCTL_HANDLER_ARGS); 170static int acpi_cpu_history_sysctl(SYSCTL_HANDLER_ARGS); 171static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS); 172 173static device_method_t acpi_cpu_methods[] = { 174 /* Device interface */ 175 DEVMETHOD(device_probe, acpi_cpu_probe), 176 DEVMETHOD(device_attach, acpi_cpu_attach), 177 DEVMETHOD(device_shutdown, acpi_cpu_shutdown), 178 179 {0, 0} 180}; 181 182static driver_t acpi_cpu_driver = { 183 "acpi_cpu", 184 acpi_cpu_methods, 185 sizeof(struct acpi_cpu_softc), 186}; 187 188static devclass_t acpi_cpu_devclass; 189DRIVER_MODULE(acpi_cpu, acpi, acpi_cpu_driver, acpi_cpu_devclass, 0, 0); 190 191static int 192acpi_cpu_probe(device_t dev) 193{ 194 if (!acpi_disabled("cpu") && acpi_get_type(dev) == ACPI_TYPE_PROCESSOR) { 195 device_set_desc(dev, "CPU"); 196 if (cpu_softc == NULL) 197 cpu_softc = malloc(sizeof(struct acpi_cpu_softc *) * 198 (mp_maxid + 1), M_TEMP /* XXX */, M_WAITOK | M_ZERO); 199 return (0); 200 } 201 202 return (ENXIO); 203} 204 205static int 206acpi_cpu_attach(device_t dev) 207{ 208 struct acpi_cpu_softc *sc; 209 struct acpi_softc *acpi_sc; 210 ACPI_OBJECT pobj; 211 ACPI_BUFFER buf; 212 ACPI_STATUS status; 213 int thr_ret, cx_ret, cpu_id; 214 215 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 216 217 ACPI_ASSERTLOCK; 218 219 sc = device_get_softc(dev); 220 sc->cpu_dev = dev; 221 sc->cpu_handle = acpi_get_handle(dev); 222 223 /* Get our Processor object. */ 224 buf.Pointer = &pobj; 225 buf.Length = sizeof(pobj); 226 status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf); 227 if (ACPI_FAILURE(status)) { 228 device_printf(dev, "Couldn't get Processor object - %s\n", 229 AcpiFormatException(status)); 230 return_VALUE (ENXIO); 231 } 232 if (pobj.Type != ACPI_TYPE_PROCESSOR) { 233 device_printf(dev, "Processor object has bad type %d\n", pobj.Type); 234 return_VALUE (ENXIO); 235 } 236 237 /* 238 * Find the processor associated with our unit. We could use the 239 * ProcId as a key, however, some boxes do not have the same values 240 * in their Processor object as the ProcId values in the MADT. 241 */ 242 sc->acpi_id = pobj.Processor.ProcId; 243 if (acpi_pcpu_get_id(device_get_unit(dev), &sc->acpi_id, &cpu_id) != 0) 244 return_VALUE (ENXIO); 245 246 /* 247 * Check if we already probed this processor. We scan the bus twice 248 * so it's possible we've already seen this one. 249 */ 250 if (cpu_softc[cpu_id] != NULL) 251 return (ENXIO); 252 cpu_softc[cpu_id] = sc; 253 254 /* Get various global values from the Processor object. */ 255 sc->cpu_p_blk = pobj.Processor.PblkAddress; 256 sc->cpu_p_blk_len = pobj.Processor.PblkLength; 257 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n", 258 device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len)); 259 260 acpi_sc = acpi_device_get_parent_softc(dev); 261 sysctl_ctx_init(&acpi_cpu_sysctl_ctx); 262 acpi_cpu_sysctl_tree = SYSCTL_ADD_NODE(&acpi_cpu_sysctl_ctx, 263 SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), 264 OID_AUTO, "cpu", CTLFLAG_RD, 0, ""); 265 266 /* If this is the first device probed, check for quirks. */ 267 if (device_get_unit(dev) == 0) 268 acpi_cpu_quirks(sc); 269 270 /* 271 * Probe for throttling and Cx state support. 272 * If none of these is present, free up unused resources. 273 */ 274 thr_ret = acpi_cpu_throttle_probe(sc); 275 cx_ret = acpi_cpu_cx_probe(sc); 276 if (thr_ret == 0 || cx_ret == 0) { 277 status = AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY, 278 acpi_cpu_notify, sc); 279 if (device_get_unit(dev) == 0) 280 AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_cpu_startup, NULL); 281 } else { 282 sysctl_ctx_free(&acpi_cpu_sysctl_ctx); 283 } 284 285 return_VALUE (0); 286} 287 288/* 289 * Find the nth present CPU and return its pc_cpuid as well as set the 290 * pc_acpi_id from the most reliable source. 291 */ 292static int 293acpi_pcpu_get_id(uint32_t idx, uint32_t *acpi_id, uint32_t *cpu_id) 294{ 295 struct pcpu *pcpu_data; 296 uint32_t i; 297 298 KASSERT(acpi_id != NULL, ("Null acpi_id")); 299 KASSERT(cpu_id != NULL, ("Null cpu_id")); 300 for (i = 0; i <= mp_maxid; i++) { 301 if (CPU_ABSENT(i)) 302 continue; 303 pcpu_data = pcpu_find(i); 304 KASSERT(pcpu_data != NULL, ("no pcpu data for %d", i)); 305 if (idx-- == 0) { 306 /* 307 * If pc_acpi_id was not initialized (e.g., a non-APIC UP box) 308 * override it with the value from the ASL. Otherwise, if the 309 * two don't match, prefer the MADT-derived value. Finally, 310 * return the pc_cpuid to reference this processor. 311 */ 312 if (pcpu_data->pc_acpi_id == 0xffffffff) 313 pcpu_data->pc_acpi_id = *acpi_id; 314 else if (pcpu_data->pc_acpi_id != *acpi_id) 315 *acpi_id = pcpu_data->pc_acpi_id; 316 *cpu_id = pcpu_data->pc_cpuid; 317 return (0); 318 } 319 } 320 321 return (ESRCH); 322} 323 324static int 325acpi_cpu_shutdown(device_t dev) 326{ 327 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 328 329 /* Disable any entry to the idle function. */ 330 cpu_cx_count = 0; 331 332 /* Wait for all processors to exit acpi_cpu_idle(). */ 333 smp_rendezvous(NULL, NULL, NULL, NULL); 334 while (cpu_idle_busy > 0) 335 DELAY(1); 336 337 return_VALUE (0); 338} 339 340static int 341acpi_cpu_throttle_probe(struct acpi_cpu_softc *sc) 342{ 343 uint32_t duty_end; 344 ACPI_BUFFER buf; 345 ACPI_OBJECT obj; 346 ACPI_GENERIC_ADDRESS gas; 347 ACPI_STATUS status; 348 349 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 350 351 ACPI_ASSERTLOCK; 352 353 /* Get throttling parameters from the FADT. 0 means not supported. */ 354 if (device_get_unit(sc->cpu_dev) == 0) { 355 cpu_smi_cmd = AcpiGbl_FADT->SmiCmd; 356 cpu_pstate_cnt = AcpiGbl_FADT->PstateCnt; 357 cpu_cst_cnt = AcpiGbl_FADT->CstCnt; 358 cpu_duty_offset = AcpiGbl_FADT->DutyOffset; 359 cpu_duty_width = AcpiGbl_FADT->DutyWidth; 360 } 361 if (cpu_duty_width == 0 || (cpu_quirks & CPU_QUIRK_NO_THROTTLE) != 0) 362 return (ENXIO); 363 364 /* Validate the duty offset/width. */ 365 duty_end = cpu_duty_offset + cpu_duty_width - 1; 366 if (duty_end > 31) { 367 device_printf(sc->cpu_dev, "CLK_VAL field overflows P_CNT register\n"); 368 return (ENXIO); 369 } 370 if (cpu_duty_offset <= 4 && duty_end >= 4) { 371 device_printf(sc->cpu_dev, "CLK_VAL field overlaps THT_EN bit\n"); 372 return (ENXIO); 373 } 374 375 /* 376 * If not present, fall back to using the processor's P_BLK to find 377 * the P_CNT register. 378 * 379 * Note that some systems seem to duplicate the P_BLK pointer 380 * across multiple CPUs, so not getting the resource is not fatal. 381 */ 382 buf.Pointer = &obj; 383 buf.Length = sizeof(obj); 384 status = AcpiEvaluateObject(sc->cpu_handle, "_PTC", NULL, &buf); 385 if (ACPI_SUCCESS(status)) { 386 if (obj.Buffer.Length < sizeof(ACPI_GENERIC_ADDRESS) + 3) { 387 device_printf(sc->cpu_dev, "_PTC buffer too small\n"); 388 return (ENXIO); 389 } 390 memcpy(&gas, obj.Buffer.Pointer + 3, sizeof(gas)); 391 sc->cpu_p_cnt = acpi_bus_alloc_gas(sc->cpu_dev, &cpu_rid, &gas); 392 if (sc->cpu_p_cnt != NULL) { 393 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_CNT from _PTC\n", 394 device_get_unit(sc->cpu_dev))); 395 } 396 } 397 398 /* If _PTC not present or other failure, try the P_BLK. */ 399 if (sc->cpu_p_cnt == NULL) { 400 /* The spec says P_BLK must be at least 6 bytes long. */ 401 if (sc->cpu_p_blk_len != 6) 402 return (ENXIO); 403 gas.Address = sc->cpu_p_blk; 404 gas.AddressSpaceId = ACPI_ADR_SPACE_SYSTEM_IO; 405 gas.RegisterBitWidth = 32; 406 sc->cpu_p_cnt = acpi_bus_alloc_gas(sc->cpu_dev, &cpu_rid, &gas); 407 if (sc->cpu_p_cnt != NULL) { 408 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_CNT from P_BLK\n", 409 device_get_unit(sc->cpu_dev))); 410 } else { 411 device_printf(sc->cpu_dev, "Failed to attach throttling P_CNT\n"); 412 return (ENXIO); 413 } 414 } 415 cpu_rid++; 416 417 return (0); 418} 419 420static int 421acpi_cpu_cx_probe(struct acpi_cpu_softc *sc) 422{ 423 ACPI_GENERIC_ADDRESS gas; 424 struct acpi_cx *cx_ptr; 425 int error; 426 427 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 428 429 /* Bus mastering arbitration control is needed for C3. */ 430 if (AcpiGbl_FADT->V1_Pm2CntBlk == 0 || AcpiGbl_FADT->Pm2CntLen == 0) { 431 cpu_quirks |= CPU_QUIRK_NO_C3; 432 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 433 "acpi_cpu%d: No BM control, C3 disabled\n", 434 device_get_unit(sc->cpu_dev))); 435 } 436 437 /* 438 * First, check for the ACPI 2.0 _CST sleep states object. 439 * If not usable, fall back to the P_BLK's P_LVL2 and P_LVL3. 440 */ 441 sc->cpu_cx_count = 0; 442 error = acpi_cpu_cx_cst(sc); 443 if (error != 0) { 444 cx_ptr = sc->cpu_cx_states; 445 446 /* C1 has been required since just after ACPI 1.0 */ 447 cx_ptr->type = ACPI_STATE_C1; 448 cx_ptr->trans_lat = 0; 449 cpu_non_c3 = 0; 450 cx_ptr++; 451 sc->cpu_cx_count++; 452 453 if (sc->cpu_p_blk_len != 6) 454 goto done; 455 456 /* Validate and allocate resources for C2 (P_LVL2). */ 457 gas.AddressSpaceId = ACPI_ADR_SPACE_SYSTEM_IO; 458 gas.RegisterBitWidth = 8; 459 if (AcpiGbl_FADT->Plvl2Lat < 100) { 460 gas.Address = sc->cpu_p_blk + 4; 461 cx_ptr->p_lvlx = acpi_bus_alloc_gas(sc->cpu_dev, &cpu_rid, &gas); 462 if (cx_ptr->p_lvlx != NULL) { 463 cpu_rid++; 464 cx_ptr->type = ACPI_STATE_C2; 465 cx_ptr->trans_lat = AcpiGbl_FADT->Plvl2Lat; 466 cpu_non_c3 = 1; 467 cx_ptr++; 468 sc->cpu_cx_count++; 469 } 470 } 471 472 /* Validate and allocate resources for C3 (P_LVL3). */ 473 if (AcpiGbl_FADT->Plvl3Lat < 1000 && 474 (cpu_quirks & CPU_QUIRK_NO_C3) == 0) { 475 476 gas.Address = sc->cpu_p_blk + 5; 477 cx_ptr->p_lvlx = acpi_bus_alloc_gas(sc->cpu_dev, &cpu_rid, &gas); 478 if (cx_ptr->p_lvlx != NULL) { 479 cpu_rid++; 480 cx_ptr->type = ACPI_STATE_C3; 481 cx_ptr->trans_lat = AcpiGbl_FADT->Plvl3Lat; 482 cx_ptr++; 483 sc->cpu_cx_count++; 484 } 485 } 486 } 487 488done: 489 /* If no valid registers were found, don't attach. */ 490 if (sc->cpu_cx_count == 0) 491 return (ENXIO); 492 493 return (0); 494} 495 496/* 497 * Parse a _CST package and set up its Cx states. Since the _CST object 498 * can change dynamically, our notify handler may call this function 499 * to clean up and probe the new _CST package. 500 */ 501static int 502acpi_cpu_cx_cst(struct acpi_cpu_softc *sc) 503{ 504 struct acpi_cx *cx_ptr; 505 ACPI_STATUS status; 506 ACPI_BUFFER buf; 507 ACPI_OBJECT *top; 508 ACPI_OBJECT *pkg; 509 uint32_t count; 510 int i; 511 512 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 513 514 buf.Pointer = NULL; 515 buf.Length = ACPI_ALLOCATE_BUFFER; 516 status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf); 517 if (ACPI_FAILURE(status)) 518 return (ENXIO); 519 520 /* _CST is a package with a count and at least one Cx package. */ 521 top = (ACPI_OBJECT *)buf.Pointer; 522 if (!ACPI_PKG_VALID(top, 2)) { 523 device_printf(sc->cpu_dev, "Invalid _CST package\n"); 524 AcpiOsFree(buf.Pointer); 525 return (ENXIO); 526 } 527 acpi_PkgInt32(sc->cpu_dev, top, 0, &count); 528 if (count != top->Package.Count - 1) { 529 device_printf(sc->cpu_dev, "Invalid _CST state count (%d != %d)\n", 530 count, top->Package.Count - 1); 531 count = top->Package.Count - 1; 532 } 533 if (count > MAX_CX_STATES) { 534 device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count); 535 count = MAX_CX_STATES; 536 } 537 538 /* Set up all valid states. */ 539 sc->cpu_cx_count = 0; 540 cx_ptr = sc->cpu_cx_states; 541 for (i = 0; i < count; i++) { 542 pkg = &top->Package.Elements[i + 1]; 543 if (!ACPI_PKG_VALID(pkg, 4)) { 544 device_printf(sc->cpu_dev, "Skipping invalid Cx state package\n"); 545 continue; 546 } 547 548 /* Cx type, transition latency, power consumed. */ 549 acpi_PkgInt32(sc->cpu_dev, pkg, 1, &cx_ptr->type); 550 acpi_PkgInt32(sc->cpu_dev, pkg, 2, &cx_ptr->trans_lat); 551 acpi_PkgInt32(sc->cpu_dev, pkg, 3, &cx_ptr->power); 552 553 /* Validate the state to see if we should use it. */ 554 switch (cx_ptr->type) { 555 case ACPI_STATE_C1: 556 cpu_non_c3 = i; 557 cx_ptr++; 558 sc->cpu_cx_count++; 559 continue; 560 case ACPI_STATE_C2: 561 if (cx_ptr->trans_lat > 100) { 562 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 563 "acpi_cpu%d: C2[%d] not available.\n", 564 device_get_unit(sc->cpu_dev), i)); 565 continue; 566 } 567 cpu_non_c3 = i; 568 break; 569 case ACPI_STATE_C3: 570 default: 571 if (cx_ptr->trans_lat > 1000 || 572 (cpu_quirks & CPU_QUIRK_NO_C3) != 0) { 573 574 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 575 "acpi_cpu%d: C3[%d] not available.\n", 576 device_get_unit(sc->cpu_dev), i)); 577 continue; 578 } 579 break; 580 } 581 582#ifdef notyet 583 /* Free up any previous register. */ 584 if (cx_ptr->p_lvlx != NULL) { 585 bus_release_resource(sc->cpu_dev, 0, 0, cx_ptr->p_lvlx); 586 cx_ptr->p_lvlx = NULL; 587 } 588#endif 589 590 /* Allocate the control register for C2 or C3. */ 591 acpi_PkgGas(sc->cpu_dev, pkg, 0, &cpu_rid, &cx_ptr->p_lvlx); 592 if (cx_ptr->p_lvlx != NULL) { 593 cpu_rid++; 594 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 595 "acpi_cpu%d: Got C%d - %d latency\n", 596 device_get_unit(sc->cpu_dev), cx_ptr->type, 597 cx_ptr->trans_lat)); 598 cx_ptr++; 599 sc->cpu_cx_count++; 600 } 601 } 602 AcpiOsFree(buf.Pointer); 603 604 return (0); 605} 606 607/* 608 * Call this *after* all CPUs have been attached. 609 */ 610static void 611acpi_cpu_startup(void *arg) 612{ 613 struct acpi_cpu_softc *sc; 614 int count, i; 615 616 /* Get set of CPU devices */ 617 devclass_get_devices(acpi_cpu_devclass, &cpu_devices, &cpu_ndevices); 618 619 /* Register performance profile change handler */ 620 EVENTHANDLER_REGISTER(power_profile_change, acpi_cpu_power_profile, 621 NULL, 0); 622 623 /* 624 * Make sure all the processors' Cx counts match. We should probably 625 * also check the contents of each. However, no known systems have 626 * non-matching Cx counts so we'll deal with this later. 627 */ 628 count = MAX_CX_STATES; 629 for (i = 0; i < cpu_ndevices; i++) { 630 sc = device_get_softc(cpu_devices[i]); 631 count = min(sc->cpu_cx_count, count); 632 } 633 cpu_cx_count = count; 634 635 /* Perform throttling and Cx final initialization. */ 636 sc = device_get_softc(cpu_devices[0]); 637 if (sc->cpu_p_cnt != NULL) 638 acpi_cpu_startup_throttling(); 639 if (cpu_cx_count > 0) 640 acpi_cpu_startup_cx(); 641} 642 643/* 644 * Takes the ACPI lock to avoid fighting anyone over the SMI command 645 * port. 646 */ 647static void 648acpi_cpu_startup_throttling() 649{ 650 int cpu_temp_speed; 651 ACPI_LOCK_DECL; 652 653 /* Initialise throttling states */ 654 cpu_max_state = CPU_MAX_SPEED; 655 cpu_performance_state = cpu_max_state; 656 cpu_economy_state = cpu_performance_state / 2; 657 658 /* 0 is 'reserved' */ 659 if (cpu_economy_state == 0) 660 cpu_economy_state++; 661 if (TUNABLE_INT_FETCH("hw.acpi.cpu.performance_speed", &cpu_temp_speed) && 662 cpu_temp_speed > 0 && cpu_temp_speed <= cpu_max_state) { 663 664 cpu_performance_state = cpu_temp_speed; 665 } 666 if (TUNABLE_INT_FETCH("hw.acpi.cpu.economy_speed", &cpu_temp_speed) && 667 cpu_temp_speed > 0 && cpu_temp_speed <= cpu_max_state) { 668 669 cpu_economy_state = cpu_temp_speed; 670 } 671 672 SYSCTL_ADD_INT(&acpi_cpu_sysctl_ctx, 673 SYSCTL_CHILDREN(acpi_cpu_sysctl_tree), 674 OID_AUTO, "max_speed", CTLFLAG_RD, 675 &cpu_max_state, 0, "maximum CPU speed"); 676 SYSCTL_ADD_INT(&acpi_cpu_sysctl_ctx, 677 SYSCTL_CHILDREN(acpi_cpu_sysctl_tree), 678 OID_AUTO, "current_speed", CTLFLAG_RD, 679 &cpu_current_state, 0, "current CPU speed"); 680 SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx, 681 SYSCTL_CHILDREN(acpi_cpu_sysctl_tree), 682 OID_AUTO, "performance_speed", 683 CTLTYPE_INT | CTLFLAG_RW, &cpu_performance_state, 684 0, acpi_cpu_throttle_sysctl, "I", ""); 685 SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx, 686 SYSCTL_CHILDREN(acpi_cpu_sysctl_tree), 687 OID_AUTO, "economy_speed", 688 CTLTYPE_INT | CTLFLAG_RW, &cpu_economy_state, 689 0, acpi_cpu_throttle_sysctl, "I", ""); 690 691 /* If ACPI 2.0+, signal platform that we are taking over throttling. */ 692 if (cpu_pstate_cnt != 0) { 693 ACPI_LOCK; 694 AcpiOsWritePort(cpu_smi_cmd, cpu_pstate_cnt, 8); 695 ACPI_UNLOCK; 696 } 697 698 /* Set initial speed */ 699 acpi_cpu_power_profile(NULL); 700 701 printf("acpi_cpu: throttling enabled, %d steps (100%% to %d.%d%%), " 702 "currently %d.%d%%\n", CPU_MAX_SPEED, CPU_SPEED_PRINTABLE(1), 703 CPU_SPEED_PRINTABLE(cpu_current_state)); 704} 705 706static void 707acpi_cpu_startup_cx() 708{ 709 struct acpi_cpu_softc *sc; 710 struct sbuf sb; 711 int i; 712 ACPI_LOCK_DECL; 713 714 sc = device_get_softc(cpu_devices[0]); 715 sbuf_new(&sb, cpu_cx_supported, sizeof(cpu_cx_supported), SBUF_FIXEDLEN); 716 for (i = 0; i < cpu_cx_count; i++) { 717 sbuf_printf(&sb, "C%d/%d ", sc->cpu_cx_states[i].type, 718 sc->cpu_cx_states[i].trans_lat); 719 } 720 sbuf_trim(&sb); 721 sbuf_finish(&sb); 722 SYSCTL_ADD_STRING(&acpi_cpu_sysctl_ctx, 723 SYSCTL_CHILDREN(acpi_cpu_sysctl_tree), 724 OID_AUTO, "cx_supported", CTLFLAG_RD, cpu_cx_supported, 725 0, "Cx/microsecond values for supported Cx states"); 726 SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx, 727 SYSCTL_CHILDREN(acpi_cpu_sysctl_tree), 728 OID_AUTO, "cx_lowest", CTLTYPE_INT | CTLFLAG_RW, 729 NULL, 0, acpi_cpu_cx_lowest_sysctl, "I", 730 "lowest Cx sleep state to use"); 731 SYSCTL_ADD_PROC(&acpi_cpu_sysctl_ctx, 732 SYSCTL_CHILDREN(acpi_cpu_sysctl_tree), 733 OID_AUTO, "cx_history", CTLTYPE_STRING | CTLFLAG_RD, 734 NULL, 0, acpi_cpu_history_sysctl, "A", ""); 735 736#ifdef notyet 737 /* Signal platform that we can handle _CST notification. */ 738 if (cpu_cst_cnt != 0) { 739 ACPI_LOCK; 740 AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8); 741 ACPI_UNLOCK; 742 } 743#endif 744 745 /* Take over idling from cpu_idle_default(). */ 746 cpu_cx_next = cpu_cx_lowest; 747 cpu_idle_hook = acpi_cpu_idle; 748} 749 750/* 751 * Set CPUs to the new state. 752 * 753 * Must be called with the ACPI lock held. 754 */ 755static void 756acpi_cpu_throttle_set(uint32_t speed) 757{ 758 struct acpi_cpu_softc *sc; 759 int i; 760 uint32_t p_cnt, clk_val; 761 762 ACPI_ASSERTLOCK; 763 764 /* Iterate over processors */ 765 for (i = 0; i < cpu_ndevices; i++) { 766 sc = device_get_softc(cpu_devices[i]); 767 if (sc->cpu_p_cnt == NULL) 768 continue; 769 770 /* Get the current P_CNT value and disable throttling */ 771 p_cnt = CPU_GET_REG(sc->cpu_p_cnt, 4); 772 p_cnt &= ~CPU_P_CNT_THT_EN; 773 CPU_SET_REG(sc->cpu_p_cnt, 4, p_cnt); 774 775 /* If we're at maximum speed, that's all */ 776 if (speed < CPU_MAX_SPEED) { 777 /* Mask the old CLK_VAL off and or-in the new value */ 778 clk_val = CPU_MAX_SPEED << cpu_duty_offset; 779 p_cnt &= ~clk_val; 780 p_cnt |= (speed << cpu_duty_offset); 781 782 /* Write the new P_CNT value and then enable throttling */ 783 CPU_SET_REG(sc->cpu_p_cnt, 4, p_cnt); 784 p_cnt |= CPU_P_CNT_THT_EN; 785 CPU_SET_REG(sc->cpu_p_cnt, 4, p_cnt); 786 } 787 ACPI_VPRINT(sc->cpu_dev, acpi_device_get_parent_softc(sc->cpu_dev), 788 "set speed to %d.%d%%\n", CPU_SPEED_PRINTABLE(speed)); 789 } 790 cpu_current_state = speed; 791} 792 793/* 794 * Idle the CPU in the lowest state possible. 795 * This function is called with interrupts disabled. 796 */ 797static void 798acpi_cpu_idle() 799{ 800 struct acpi_cpu_softc *sc; 801 struct acpi_cx *cx_next; 802 uint32_t start_time, end_time; 803 int bm_active, i, asleep; 804 805 /* If disabled, return immediately. */ 806 if (cpu_cx_count == 0) { 807 ACPI_ENABLE_IRQS(); 808 return; 809 } 810 811 /* 812 * Look up our CPU id to get our softc. If it's NULL, we'll use C1 813 * since there is no ACPI processor object for this CPU. This occurs 814 * for logical CPUs in the HTT case. 815 */ 816 sc = cpu_softc[PCPU_GET(cpuid)]; 817 if (sc == NULL) { 818 acpi_cpu_c1(); 819 return; 820 } 821 822 /* Record that a CPU is in the idle function. */ 823 atomic_add_int(&cpu_idle_busy, 1); 824 825 /* 826 * Check for bus master activity. If there was activity, clear 827 * the bit and use the lowest non-C3 state. Note that the USB 828 * driver polling for new devices keeps this bit set all the 829 * time if USB is enabled. 830 */ 831 AcpiGetRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active, 832 ACPI_MTX_DO_NOT_LOCK); 833 if (bm_active != 0) { 834 AcpiSetRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1, 835 ACPI_MTX_DO_NOT_LOCK); 836 cpu_cx_next = min(cpu_cx_next, cpu_non_c3); 837 } 838 839 /* Perform the actual sleep based on the Cx-specific semantics. */ 840 cx_next = &sc->cpu_cx_states[cpu_cx_next]; 841 switch (cx_next->type) { 842 case ACPI_STATE_C0: 843 panic("acpi_cpu_idle: attempting to sleep in C0"); 844 /* NOTREACHED */ 845 case ACPI_STATE_C1: 846 /* Execute HLT (or equivalent) and wait for an interrupt. */ 847 acpi_cpu_c1(); 848 849 /* 850 * We can't calculate the time spent in C1 since the place we 851 * wake up is an ISR. Use a constant time of 1 ms. 852 */ 853 start_time = 0; 854 end_time = 1000; 855 break; 856 case ACPI_STATE_C2: 857 /* 858 * Read from P_LVLx to enter C2, checking time spent asleep. 859 * Use the ACPI timer for measuring sleep time. Since we need to 860 * get the time very close to the CPU start/stop clock logic, this 861 * is the only reliable time source. 862 */ 863 AcpiHwLowLevelRead(32, &start_time, &AcpiGbl_FADT->XPmTmrBlk); 864 CPU_GET_REG(cx_next->p_lvlx, 1); 865 866 /* 867 * Read the end time twice. Since it may take an arbitrary time 868 * to enter the idle state, the first read may be executed before 869 * the processor has stopped. Doing it again provides enough 870 * margin that we are certain to have a correct value. 871 */ 872 AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT->XPmTmrBlk); 873 AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT->XPmTmrBlk); 874 ACPI_ENABLE_IRQS(); 875 break; 876 case ACPI_STATE_C3: 877 default: 878 /* Disable bus master arbitration and enable bus master wakeup. */ 879 AcpiSetRegister(ACPI_BITREG_ARB_DISABLE, 1, ACPI_MTX_DO_NOT_LOCK); 880 AcpiSetRegister(ACPI_BITREG_BUS_MASTER_RLD, 1, ACPI_MTX_DO_NOT_LOCK); 881 882 /* Read from P_LVLx to enter C3, checking time spent asleep. */ 883 AcpiHwLowLevelRead(32, &start_time, &AcpiGbl_FADT->XPmTmrBlk); 884 CPU_GET_REG(cx_next->p_lvlx, 1); 885 886 /* Read the end time twice. See comment for C2 above. */ 887 AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT->XPmTmrBlk); 888 AcpiHwLowLevelRead(32, &end_time, &AcpiGbl_FADT->XPmTmrBlk); 889 890 /* Enable bus master arbitration and disable bus master wakeup. */ 891 AcpiSetRegister(ACPI_BITREG_ARB_DISABLE, 0, ACPI_MTX_DO_NOT_LOCK); 892 AcpiSetRegister(ACPI_BITREG_BUS_MASTER_RLD, 0, ACPI_MTX_DO_NOT_LOCK); 893 ACPI_ENABLE_IRQS(); 894 break; 895 } 896 897 /* Find the actual time asleep in microseconds, minus overhead. */ 898 acpi_pm_ticksub(&end_time, &start_time); 899 asleep = PM_USEC(end_time) - cx_next->trans_lat; 900 901 /* Record statistics */ 902 if (asleep < cx_next->trans_lat) 903 cpu_cx_stats[cpu_cx_next].short_slp++; 904 else 905 cpu_cx_stats[cpu_cx_next].long_slp++; 906 907 /* 908 * If we slept 100 us or more, use the lowest Cx state. 909 * Otherwise, find the lowest state that has a latency less than 910 * or equal to the length of our last sleep. 911 */ 912 if (asleep >= 100) 913 cpu_cx_next = cpu_cx_lowest; 914 else { 915 for (i = cpu_cx_lowest; i >= 0; i--) { 916 if (sc->cpu_cx_states[i].trans_lat <= asleep) { 917 cpu_cx_next = i; 918 break; 919 } 920 } 921 } 922 923 /* Decrement reference count checked by acpi_cpu_shutdown(). */ 924 atomic_subtract_int(&cpu_idle_busy, 1); 925} 926 927/* Put the CPU in C1 in a machine-dependant way. */ 928static void 929acpi_cpu_c1() 930{ 931#ifdef __ia64__ 932 ia64_call_pal_static(PAL_HALT_LIGHT, 0, 0, 0); 933#else 934 __asm __volatile("sti; hlt"); 935#endif 936} 937 938/* Find the difference between two PM tick counts. */ 939static void 940acpi_pm_ticksub(uint32_t *end, const uint32_t *start) 941{ 942 if (*end >= *start) 943 *end = *end - *start; 944 else if (AcpiGbl_FADT->TmrValExt == 0) 945 *end = (((0x00FFFFFF - *start) + *end + 1) & 0x00FFFFFF); 946 else 947 *end = ((0xFFFFFFFF - *start) + *end + 1); 948} 949 950/* 951 * Re-evaluate the _PSS and _CST objects when we are notified that they 952 * have changed. 953 * 954 * XXX Re-evaluation disabled until locking is done. 955 */ 956static void 957acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context) 958{ 959 struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)context; 960 961 switch (notify) { 962 case ACPI_CPU_NOTIFY_PERF_STATES: 963 device_printf(sc->cpu_dev, "Performance states changed\n"); 964 /* acpi_cpu_px_available(sc); */ 965 break; 966 case ACPI_CPU_NOTIFY_CX_STATES: 967 device_printf(sc->cpu_dev, "Cx states changed\n"); 968 /* acpi_cpu_cx_cst(sc); */ 969 break; 970 default: 971 device_printf(sc->cpu_dev, "Unknown notify %#x\n", notify); 972 break; 973 } 974} 975 976static int 977acpi_cpu_quirks(struct acpi_cpu_softc *sc) 978{ 979 980 /* 981 * C3 is not supported on multiple CPUs since this would require 982 * flushing all caches which is currently too expensive. 983 */ 984 if (mp_ncpus > 1) 985 cpu_quirks |= CPU_QUIRK_NO_C3; 986 987#ifdef notyet 988 /* Look for various quirks of the PIIX4 part. */ 989 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3); 990 if (acpi_dev != NULL) { 991 switch (pci_get_revid(acpi_dev)) { 992 /* 993 * Disable throttling control on PIIX4 A and B-step. 994 * See specification changes #13 ("Manual Throttle Duty Cycle") 995 * and #14 ("Enabling and Disabling Manual Throttle"), plus 996 * erratum #5 ("STPCLK# Deassertion Time") from the January 997 * 2002 PIIX4 specification update. Note that few (if any) 998 * mobile systems ever used this part. 999 */ 1000 case PCI_REVISION_A_STEP: 1001 case PCI_REVISION_B_STEP: 1002 cpu_quirks |= CPU_QUIRK_NO_THROTTLE; 1003 /* FALLTHROUGH */ 1004 /* 1005 * Disable C3 support for all PIIX4 chipsets. Some of these parts 1006 * do not report the BMIDE status to the BM status register and 1007 * others have a livelock bug if Type-F DMA is enabled. Linux 1008 * works around the BMIDE bug by reading the BM status directly 1009 * but we take the simpler approach of disabling C3 for these 1010 * parts. 1011 * 1012 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA 1013 * Livelock") from the January 2002 PIIX4 specification update. 1014 * Applies to all PIIX4 models. 1015 */ 1016 case PCI_REVISION_4E: 1017 case PCI_REVISION_4M: 1018 cpu_quirks |= CPU_QUIRK_NO_C3; 1019 break; 1020 default: 1021 break; 1022 } 1023 } 1024#endif 1025 1026 return (0); 1027} 1028 1029/* 1030 * Power profile change hook. 1031 * 1032 * Uses the ACPI lock to avoid reentrancy. 1033 */ 1034static void 1035acpi_cpu_power_profile(void *arg) 1036{ 1037 int state; 1038 uint32_t new; 1039 ACPI_LOCK_DECL; 1040 1041 state = power_profile_get_state(); 1042 if (state != POWER_PROFILE_PERFORMANCE && state != POWER_PROFILE_ECONOMY) 1043 return; 1044 1045 ACPI_LOCK; 1046 1047 switch (state) { 1048 case POWER_PROFILE_PERFORMANCE: 1049 new = cpu_performance_state; 1050 break; 1051 case POWER_PROFILE_ECONOMY: 1052 new = cpu_economy_state; 1053 break; 1054 default: 1055 new = cpu_current_state; 1056 break; 1057 } 1058 1059 if (cpu_current_state != new) 1060 acpi_cpu_throttle_set(new); 1061 1062 ACPI_UNLOCK; 1063} 1064 1065/* 1066 * Handle changes in the performance/ecomony CPU settings. 1067 * 1068 * Does not need the ACPI lock (although setting *argp should 1069 * probably be atomic). 1070 */ 1071static int 1072acpi_cpu_throttle_sysctl(SYSCTL_HANDLER_ARGS) 1073{ 1074 uint32_t *argp; 1075 uint32_t arg; 1076 int error; 1077 1078 argp = (uint32_t *)oidp->oid_arg1; 1079 arg = *argp; 1080 error = sysctl_handle_int(oidp, &arg, 0, req); 1081 1082 /* Error or no new value */ 1083 if (error != 0 || req->newptr == NULL) 1084 return (error); 1085 if (arg < 1 || arg > cpu_max_state) 1086 return (EINVAL); 1087 1088 /* Set new value and possibly switch */ 1089 *argp = arg; 1090 acpi_cpu_power_profile(NULL); 1091 1092 return (0); 1093} 1094 1095static int 1096acpi_cpu_history_sysctl(SYSCTL_HANDLER_ARGS) 1097{ 1098 struct sbuf sb; 1099 char buf[128]; 1100 int i; 1101 1102 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); 1103 for (i = 0; i < cpu_cx_count; i++) { 1104 sbuf_printf(&sb, "%u/%u ", cpu_cx_stats[i].long_slp, 1105 cpu_cx_stats[i].short_slp); 1106 } 1107 sbuf_trim(&sb); 1108 sbuf_finish(&sb); 1109 sysctl_handle_string(oidp, sbuf_data(&sb), 0, req); 1110 1111 return (0); 1112} 1113 1114static int 1115acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS) 1116{ 1117 struct acpi_cpu_softc *sc; 1118 int val, error, i; 1119 1120 sc = device_get_softc(cpu_devices[0]); 1121 val = cpu_cx_lowest; 1122 error = sysctl_handle_int(oidp, &val, 0, req); 1123 if (error != 0 || req->newptr == NULL) 1124 return (error); 1125 if (val < 0 || val > cpu_cx_count - 1) 1126 return (EINVAL); 1127 1128 /* Use the new value for the next idle slice. */ 1129 cpu_cx_lowest = val; 1130 cpu_cx_next = val; 1131 1132 /* If not disabling, cache the new lowest non-C3 state. */ 1133 cpu_non_c3 = 0; 1134 for (i = cpu_cx_lowest; i >= 0; i--) { 1135 if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) { 1136 cpu_non_c3 = i; 1137 break; 1138 } 1139 } 1140 1141 /* Reset the statistics counters. */ 1142 memset(cpu_cx_stats, 0, sizeof(cpu_cx_stats)); 1143 1144 return (0); 1145} 1146