acpi_cpu.c revision 280973
1/*- 2 * Copyright (c) 2003-2005 Nate Lawson (SDG) 3 * Copyright (c) 2001 Michael Smith 4 * All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 25 * SUCH DAMAGE. 26 */ 27 28#include <sys/cdefs.h> 29__FBSDID("$FreeBSD: stable/10/sys/dev/acpica/acpi_cpu.c 280973 2015-04-02 01:02:42Z jhb $"); 30 31#include "opt_acpi.h" 32#include <sys/param.h> 33#include <sys/bus.h> 34#include <sys/cpu.h> 35#include <sys/kernel.h> 36#include <sys/malloc.h> 37#include <sys/module.h> 38#include <sys/pcpu.h> 39#include <sys/power.h> 40#include <sys/proc.h> 41#include <sys/sched.h> 42#include <sys/sbuf.h> 43#include <sys/smp.h> 44 45#include <dev/pci/pcivar.h> 46#include <machine/atomic.h> 47#include <machine/bus.h> 48#if defined(__amd64__) || defined(__i386__) 49#include <machine/clock.h> 50#endif 51#include <sys/rman.h> 52 53#include <contrib/dev/acpica/include/acpi.h> 54#include <contrib/dev/acpica/include/accommon.h> 55 56#include <dev/acpica/acpivar.h> 57 58/* 59 * Support for ACPI Processor devices, including C[1-3] sleep states. 60 */ 61 62/* Hooks for the ACPI CA debugging infrastructure */ 63#define _COMPONENT ACPI_PROCESSOR 64ACPI_MODULE_NAME("PROCESSOR") 65 66struct acpi_cx { 67 struct resource *p_lvlx; /* Register to read to enter state. */ 68 uint32_t type; /* C1-3 (C4 and up treated as C3). */ 69 uint32_t trans_lat; /* Transition latency (usec). */ 70 uint32_t power; /* Power consumed (mW). */ 71 int res_type; /* Resource type for p_lvlx. */ 72 int res_rid; /* Resource ID for p_lvlx. */ 73}; 74#define MAX_CX_STATES 8 75 76struct acpi_cpu_softc { 77 device_t cpu_dev; 78 ACPI_HANDLE cpu_handle; 79 struct pcpu *cpu_pcpu; 80 uint32_t cpu_acpi_id; /* ACPI processor id */ 81 uint32_t cpu_p_blk; /* ACPI P_BLK location */ 82 uint32_t cpu_p_blk_len; /* P_BLK length (must be 6). */ 83 struct acpi_cx cpu_cx_states[MAX_CX_STATES]; 84 int cpu_cx_count; /* Number of valid Cx states. */ 85 int cpu_prev_sleep;/* Last idle sleep duration. */ 86 int cpu_features; /* Child driver supported features. */ 87 /* Runtime state. */ 88 int cpu_non_c2; /* Index of lowest non-C2 state. */ 89 int cpu_non_c3; /* Index of lowest non-C3 state. */ 90 u_int cpu_cx_stats[MAX_CX_STATES];/* Cx usage history. */ 91 /* Values for sysctl. */ 92 struct sysctl_ctx_list cpu_sysctl_ctx; 93 struct sysctl_oid *cpu_sysctl_tree; 94 int cpu_cx_lowest; 95 int cpu_cx_lowest_lim; 96 int cpu_disable_idle; /* Disable entry to idle function */ 97 char cpu_cx_supported[64]; 98}; 99 100struct acpi_cpu_device { 101 struct resource_list ad_rl; 102}; 103 104#define CPU_GET_REG(reg, width) \ 105 (bus_space_read_ ## width(rman_get_bustag((reg)), \ 106 rman_get_bushandle((reg)), 0)) 107#define CPU_SET_REG(reg, width, val) \ 108 (bus_space_write_ ## width(rman_get_bustag((reg)), \ 109 rman_get_bushandle((reg)), 0, (val))) 110 111#define PM_USEC(x) ((x) >> 2) /* ~4 clocks per usec (3.57955 Mhz) */ 112 113#define ACPI_NOTIFY_CX_STATES 0x81 /* _CST changed. */ 114 115#define CPU_QUIRK_NO_C3 (1<<0) /* C3-type states are not usable. */ 116#define CPU_QUIRK_NO_BM_CTRL (1<<2) /* No bus mastering control. */ 117 118#define PCI_VENDOR_INTEL 0x8086 119#define PCI_DEVICE_82371AB_3 0x7113 /* PIIX4 chipset for quirks. */ 120#define PCI_REVISION_A_STEP 0 121#define PCI_REVISION_B_STEP 1 122#define PCI_REVISION_4E 2 123#define PCI_REVISION_4M 3 124#define PIIX4_DEVACTB_REG 0x58 125#define PIIX4_BRLD_EN_IRQ0 (1<<0) 126#define PIIX4_BRLD_EN_IRQ (1<<1) 127#define PIIX4_BRLD_EN_IRQ8 (1<<5) 128#define PIIX4_STOP_BREAK_MASK (PIIX4_BRLD_EN_IRQ0 | PIIX4_BRLD_EN_IRQ | PIIX4_BRLD_EN_IRQ8) 129#define PIIX4_PCNTRL_BST_EN (1<<10) 130 131/* Allow users to ignore processor orders in MADT. */ 132static int cpu_unordered; 133TUNABLE_INT("debug.acpi.cpu_unordered", &cpu_unordered); 134SYSCTL_INT(_debug_acpi, OID_AUTO, cpu_unordered, CTLFLAG_RDTUN, 135 &cpu_unordered, 0, 136 "Do not use the MADT to match ACPI Processor objects to CPUs."); 137 138/* Platform hardware resource information. */ 139static uint32_t cpu_smi_cmd; /* Value to write to SMI_CMD. */ 140static uint8_t cpu_cst_cnt; /* Indicate we are _CST aware. */ 141static int cpu_quirks; /* Indicate any hardware bugs. */ 142 143/* Values for sysctl. */ 144static struct sysctl_ctx_list cpu_sysctl_ctx; 145static struct sysctl_oid *cpu_sysctl_tree; 146static int cpu_cx_generic; 147static int cpu_cx_lowest_lim; 148 149static device_t *cpu_devices; 150static int cpu_ndevices; 151static struct acpi_cpu_softc **cpu_softc; 152ACPI_SERIAL_DECL(cpu, "ACPI CPU"); 153 154static int acpi_cpu_probe(device_t dev); 155static int acpi_cpu_attach(device_t dev); 156static int acpi_cpu_suspend(device_t dev); 157static int acpi_cpu_resume(device_t dev); 158static int acpi_pcpu_get_id(device_t dev, uint32_t *acpi_id, 159 uint32_t *cpu_id); 160static struct resource_list *acpi_cpu_get_rlist(device_t dev, device_t child); 161static device_t acpi_cpu_add_child(device_t dev, u_int order, const char *name, 162 int unit); 163static int acpi_cpu_read_ivar(device_t dev, device_t child, int index, 164 uintptr_t *result); 165static int acpi_cpu_shutdown(device_t dev); 166static void acpi_cpu_cx_probe(struct acpi_cpu_softc *sc); 167static void acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc); 168static int acpi_cpu_cx_cst(struct acpi_cpu_softc *sc); 169static void acpi_cpu_startup(void *arg); 170static void acpi_cpu_startup_cx(struct acpi_cpu_softc *sc); 171static void acpi_cpu_cx_list(struct acpi_cpu_softc *sc); 172static void acpi_cpu_idle(sbintime_t sbt); 173static void acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context); 174static int acpi_cpu_quirks(void); 175static int acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS); 176static int acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc); 177static int acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS); 178static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS); 179 180static device_method_t acpi_cpu_methods[] = { 181 /* Device interface */ 182 DEVMETHOD(device_probe, acpi_cpu_probe), 183 DEVMETHOD(device_attach, acpi_cpu_attach), 184 DEVMETHOD(device_detach, bus_generic_detach), 185 DEVMETHOD(device_shutdown, acpi_cpu_shutdown), 186 DEVMETHOD(device_suspend, acpi_cpu_suspend), 187 DEVMETHOD(device_resume, acpi_cpu_resume), 188 189 /* Bus interface */ 190 DEVMETHOD(bus_add_child, acpi_cpu_add_child), 191 DEVMETHOD(bus_read_ivar, acpi_cpu_read_ivar), 192 DEVMETHOD(bus_get_resource_list, acpi_cpu_get_rlist), 193 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), 194 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), 195 DEVMETHOD(bus_alloc_resource, bus_generic_rl_alloc_resource), 196 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource), 197 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), 198 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), 199 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 200 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 201 202 DEVMETHOD_END 203}; 204 205static driver_t acpi_cpu_driver = { 206 "cpu", 207 acpi_cpu_methods, 208 sizeof(struct acpi_cpu_softc), 209}; 210 211static devclass_t acpi_cpu_devclass; 212DRIVER_MODULE(cpu, acpi, acpi_cpu_driver, acpi_cpu_devclass, 0, 0); 213MODULE_DEPEND(cpu, acpi, 1, 1, 1); 214 215static int 216acpi_cpu_probe(device_t dev) 217{ 218 int acpi_id, cpu_id; 219 ACPI_BUFFER buf; 220 ACPI_HANDLE handle; 221 ACPI_OBJECT *obj; 222 ACPI_STATUS status; 223 224 if (acpi_disabled("cpu") || acpi_get_type(dev) != ACPI_TYPE_PROCESSOR) 225 return (ENXIO); 226 227 handle = acpi_get_handle(dev); 228 if (cpu_softc == NULL) 229 cpu_softc = malloc(sizeof(struct acpi_cpu_softc *) * 230 (mp_maxid + 1), M_TEMP /* XXX */, M_WAITOK | M_ZERO); 231 232 /* Get our Processor object. */ 233 buf.Pointer = NULL; 234 buf.Length = ACPI_ALLOCATE_BUFFER; 235 status = AcpiEvaluateObject(handle, NULL, NULL, &buf); 236 if (ACPI_FAILURE(status)) { 237 device_printf(dev, "probe failed to get Processor obj - %s\n", 238 AcpiFormatException(status)); 239 return (ENXIO); 240 } 241 obj = (ACPI_OBJECT *)buf.Pointer; 242 if (obj->Type != ACPI_TYPE_PROCESSOR) { 243 device_printf(dev, "Processor object has bad type %d\n", obj->Type); 244 AcpiOsFree(obj); 245 return (ENXIO); 246 } 247 248 /* 249 * Find the processor associated with our unit. We could use the 250 * ProcId as a key, however, some boxes do not have the same values 251 * in their Processor object as the ProcId values in the MADT. 252 */ 253 acpi_id = obj->Processor.ProcId; 254 AcpiOsFree(obj); 255 if (acpi_pcpu_get_id(dev, &acpi_id, &cpu_id) != 0) 256 return (ENXIO); 257 258 /* 259 * Check if we already probed this processor. We scan the bus twice 260 * so it's possible we've already seen this one. 261 */ 262 if (cpu_softc[cpu_id] != NULL) 263 return (ENXIO); 264 265 /* Mark this processor as in-use and save our derived id for attach. */ 266 cpu_softc[cpu_id] = (void *)1; 267 acpi_set_private(dev, (void*)(intptr_t)cpu_id); 268 device_set_desc(dev, "ACPI CPU"); 269 270 return (0); 271} 272 273static int 274acpi_cpu_attach(device_t dev) 275{ 276 ACPI_BUFFER buf; 277 ACPI_OBJECT arg[4], *obj; 278 ACPI_OBJECT_LIST arglist; 279 struct pcpu *pcpu_data; 280 struct acpi_cpu_softc *sc; 281 struct acpi_softc *acpi_sc; 282 ACPI_STATUS status; 283 u_int features; 284 int cpu_id, drv_count, i; 285 driver_t **drivers; 286 uint32_t cap_set[3]; 287 288 /* UUID needed by _OSC evaluation */ 289 static uint8_t cpu_oscuuid[16] = { 0x16, 0xA6, 0x77, 0x40, 0x0C, 0x29, 290 0xBE, 0x47, 0x9E, 0xBD, 0xD8, 0x70, 291 0x58, 0x71, 0x39, 0x53 }; 292 293 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 294 295 sc = device_get_softc(dev); 296 sc->cpu_dev = dev; 297 sc->cpu_handle = acpi_get_handle(dev); 298 cpu_id = (int)(intptr_t)acpi_get_private(dev); 299 cpu_softc[cpu_id] = sc; 300 pcpu_data = pcpu_find(cpu_id); 301 pcpu_data->pc_device = dev; 302 sc->cpu_pcpu = pcpu_data; 303 cpu_smi_cmd = AcpiGbl_FADT.SmiCommand; 304 cpu_cst_cnt = AcpiGbl_FADT.CstControl; 305 306 buf.Pointer = NULL; 307 buf.Length = ACPI_ALLOCATE_BUFFER; 308 status = AcpiEvaluateObject(sc->cpu_handle, NULL, NULL, &buf); 309 if (ACPI_FAILURE(status)) { 310 device_printf(dev, "attach failed to get Processor obj - %s\n", 311 AcpiFormatException(status)); 312 return (ENXIO); 313 } 314 obj = (ACPI_OBJECT *)buf.Pointer; 315 sc->cpu_p_blk = obj->Processor.PblkAddress; 316 sc->cpu_p_blk_len = obj->Processor.PblkLength; 317 sc->cpu_acpi_id = obj->Processor.ProcId; 318 AcpiOsFree(obj); 319 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu%d: P_BLK at %#x/%d\n", 320 device_get_unit(dev), sc->cpu_p_blk, sc->cpu_p_blk_len)); 321 322 /* 323 * If this is the first cpu we attach, create and initialize the generic 324 * resources that will be used by all acpi cpu devices. 325 */ 326 if (device_get_unit(dev) == 0) { 327 /* Assume we won't be using generic Cx mode by default */ 328 cpu_cx_generic = FALSE; 329 330 /* Install hw.acpi.cpu sysctl tree */ 331 acpi_sc = acpi_device_get_parent_softc(dev); 332 sysctl_ctx_init(&cpu_sysctl_ctx); 333 cpu_sysctl_tree = SYSCTL_ADD_NODE(&cpu_sysctl_ctx, 334 SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "cpu", 335 CTLFLAG_RD, 0, "node for CPU children"); 336 337 /* Queue post cpu-probing task handler */ 338 AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cpu_startup, NULL); 339 } 340 341 /* 342 * Before calling any CPU methods, collect child driver feature hints 343 * and notify ACPI of them. We support unified SMP power control 344 * so advertise this ourselves. Note this is not the same as independent 345 * SMP control where each CPU can have different settings. 346 */ 347 sc->cpu_features = ACPI_CAP_SMP_SAME | ACPI_CAP_SMP_SAME_C3; 348 if (devclass_get_drivers(acpi_cpu_devclass, &drivers, &drv_count) == 0) { 349 for (i = 0; i < drv_count; i++) { 350 if (ACPI_GET_FEATURES(drivers[i], &features) == 0) 351 sc->cpu_features |= features; 352 } 353 free(drivers, M_TEMP); 354 } 355 356 /* 357 * CPU capabilities are specified in 358 * Intel Processor Vendor-Specific ACPI Interface Specification. 359 */ 360 if (sc->cpu_features) { 361 arglist.Pointer = arg; 362 arglist.Count = 4; 363 arg[0].Type = ACPI_TYPE_BUFFER; 364 arg[0].Buffer.Length = sizeof(cpu_oscuuid); 365 arg[0].Buffer.Pointer = cpu_oscuuid; /* UUID */ 366 arg[1].Type = ACPI_TYPE_INTEGER; 367 arg[1].Integer.Value = 1; /* revision */ 368 arg[2].Type = ACPI_TYPE_INTEGER; 369 arg[2].Integer.Value = 1; /* count */ 370 arg[3].Type = ACPI_TYPE_BUFFER; 371 arg[3].Buffer.Length = sizeof(cap_set); /* Capabilities buffer */ 372 arg[3].Buffer.Pointer = (uint8_t *)cap_set; 373 cap_set[0] = 0; /* status */ 374 cap_set[1] = sc->cpu_features; 375 status = AcpiEvaluateObject(sc->cpu_handle, "_OSC", &arglist, NULL); 376 if (ACPI_SUCCESS(status)) { 377 if (cap_set[0] != 0) 378 device_printf(dev, "_OSC returned status %#x\n", cap_set[0]); 379 } 380 else { 381 arglist.Pointer = arg; 382 arglist.Count = 1; 383 arg[0].Type = ACPI_TYPE_BUFFER; 384 arg[0].Buffer.Length = sizeof(cap_set); 385 arg[0].Buffer.Pointer = (uint8_t *)cap_set; 386 cap_set[0] = 1; /* revision */ 387 cap_set[1] = 1; /* number of capabilities integers */ 388 cap_set[2] = sc->cpu_features; 389 AcpiEvaluateObject(sc->cpu_handle, "_PDC", &arglist, NULL); 390 } 391 } 392 393 /* Probe for Cx state support. */ 394 acpi_cpu_cx_probe(sc); 395 396 return (0); 397} 398 399static void 400acpi_cpu_postattach(void *unused __unused) 401{ 402 device_t *devices; 403 int err; 404 int i, n; 405 406 err = devclass_get_devices(acpi_cpu_devclass, &devices, &n); 407 if (err != 0) { 408 printf("devclass_get_devices(acpi_cpu_devclass) failed\n"); 409 return; 410 } 411 for (i = 0; i < n; i++) 412 bus_generic_probe(devices[i]); 413 for (i = 0; i < n; i++) 414 bus_generic_attach(devices[i]); 415 free(devices, M_TEMP); 416} 417 418SYSINIT(acpi_cpu, SI_SUB_CONFIGURE, SI_ORDER_MIDDLE, 419 acpi_cpu_postattach, NULL); 420 421static void 422disable_idle(struct acpi_cpu_softc *sc) 423{ 424 cpuset_t cpuset; 425 426 CPU_SETOF(sc->cpu_pcpu->pc_cpuid, &cpuset); 427 sc->cpu_disable_idle = TRUE; 428 429 /* 430 * Ensure that the CPU is not in idle state or in acpi_cpu_idle(). 431 * Note that this code depends on the fact that the rendezvous IPI 432 * can not penetrate context where interrupts are disabled and acpi_cpu_idle 433 * is called and executed in such a context with interrupts being re-enabled 434 * right before return. 435 */ 436 smp_rendezvous_cpus(cpuset, smp_no_rendevous_barrier, NULL, 437 smp_no_rendevous_barrier, NULL); 438} 439 440static void 441enable_idle(struct acpi_cpu_softc *sc) 442{ 443 444 sc->cpu_disable_idle = FALSE; 445} 446 447static int 448is_idle_disabled(struct acpi_cpu_softc *sc) 449{ 450 451 return (sc->cpu_disable_idle); 452} 453 454/* 455 * Disable any entry to the idle function during suspend and re-enable it 456 * during resume. 457 */ 458static int 459acpi_cpu_suspend(device_t dev) 460{ 461 int error; 462 463 error = bus_generic_suspend(dev); 464 if (error) 465 return (error); 466 disable_idle(device_get_softc(dev)); 467 return (0); 468} 469 470static int 471acpi_cpu_resume(device_t dev) 472{ 473 474 enable_idle(device_get_softc(dev)); 475 return (bus_generic_resume(dev)); 476} 477 478/* 479 * Find the processor associated with a given ACPI ID. By default, 480 * use the MADT to map ACPI IDs to APIC IDs and use that to locate a 481 * processor. Some systems have inconsistent ASL and MADT however. 482 * For these systems the cpu_unordered tunable can be set in which 483 * case we assume that Processor objects are listed in the same order 484 * in both the MADT and ASL. 485 */ 486static int 487acpi_pcpu_get_id(device_t dev, uint32_t *acpi_id, uint32_t *cpu_id) 488{ 489 struct pcpu *pc; 490 uint32_t i, idx; 491 492 KASSERT(acpi_id != NULL, ("Null acpi_id")); 493 KASSERT(cpu_id != NULL, ("Null cpu_id")); 494 idx = device_get_unit(dev); 495 496 /* 497 * If pc_acpi_id for CPU 0 is not initialized (e.g. a non-APIC 498 * UP box) use the ACPI ID from the first processor we find. 499 */ 500 if (idx == 0 && mp_ncpus == 1) { 501 pc = pcpu_find(0); 502 if (pc->pc_acpi_id == 0xffffffff) 503 pc->pc_acpi_id = *acpi_id; 504 *cpu_id = 0; 505 return (0); 506 } 507 508 CPU_FOREACH(i) { 509 pc = pcpu_find(i); 510 KASSERT(pc != NULL, ("no pcpu data for %d", i)); 511 if (cpu_unordered) { 512 if (idx-- == 0) { 513 /* 514 * If pc_acpi_id doesn't match the ACPI ID from the 515 * ASL, prefer the MADT-derived value. 516 */ 517 if (pc->pc_acpi_id != *acpi_id) 518 *acpi_id = pc->pc_acpi_id; 519 *cpu_id = pc->pc_cpuid; 520 return (0); 521 } 522 } else { 523 if (pc->pc_acpi_id == *acpi_id) { 524 if (bootverbose) 525 device_printf(dev, 526 "Processor %s (ACPI ID %u) -> APIC ID %d\n", 527 acpi_name(acpi_get_handle(dev)), *acpi_id, 528 pc->pc_cpuid); 529 *cpu_id = pc->pc_cpuid; 530 return (0); 531 } 532 } 533 } 534 535 if (bootverbose) 536 printf("ACPI: Processor %s (ACPI ID %u) ignored\n", 537 acpi_name(acpi_get_handle(dev)), *acpi_id); 538 539 return (ESRCH); 540} 541 542static struct resource_list * 543acpi_cpu_get_rlist(device_t dev, device_t child) 544{ 545 struct acpi_cpu_device *ad; 546 547 ad = device_get_ivars(child); 548 if (ad == NULL) 549 return (NULL); 550 return (&ad->ad_rl); 551} 552 553static device_t 554acpi_cpu_add_child(device_t dev, u_int order, const char *name, int unit) 555{ 556 struct acpi_cpu_device *ad; 557 device_t child; 558 559 if ((ad = malloc(sizeof(*ad), M_TEMP, M_NOWAIT | M_ZERO)) == NULL) 560 return (NULL); 561 562 resource_list_init(&ad->ad_rl); 563 564 child = device_add_child_ordered(dev, order, name, unit); 565 if (child != NULL) 566 device_set_ivars(child, ad); 567 else 568 free(ad, M_TEMP); 569 return (child); 570} 571 572static int 573acpi_cpu_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) 574{ 575 struct acpi_cpu_softc *sc; 576 577 sc = device_get_softc(dev); 578 switch (index) { 579 case ACPI_IVAR_HANDLE: 580 *result = (uintptr_t)sc->cpu_handle; 581 break; 582 case CPU_IVAR_PCPU: 583 *result = (uintptr_t)sc->cpu_pcpu; 584 break; 585#if defined(__amd64__) || defined(__i386__) 586 case CPU_IVAR_NOMINAL_MHZ: 587 if (tsc_is_invariant) { 588 *result = (uintptr_t)(atomic_load_acq_64(&tsc_freq) / 1000000); 589 break; 590 } 591 /* FALLTHROUGH */ 592#endif 593 default: 594 return (ENOENT); 595 } 596 return (0); 597} 598 599static int 600acpi_cpu_shutdown(device_t dev) 601{ 602 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 603 604 /* Allow children to shutdown first. */ 605 bus_generic_shutdown(dev); 606 607 /* 608 * Disable any entry to the idle function. 609 */ 610 disable_idle(device_get_softc(dev)); 611 612 /* 613 * CPU devices are not truely detached and remain referenced, 614 * so their resources are not freed. 615 */ 616 617 return_VALUE (0); 618} 619 620static void 621acpi_cpu_cx_probe(struct acpi_cpu_softc *sc) 622{ 623 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 624 625 /* Use initial sleep value of 1 sec. to start with lowest idle state. */ 626 sc->cpu_prev_sleep = 1000000; 627 sc->cpu_cx_lowest = 0; 628 sc->cpu_cx_lowest_lim = 0; 629 630 /* 631 * Check for the ACPI 2.0 _CST sleep states object. If we can't find 632 * any, we'll revert to generic FADT/P_BLK Cx control method which will 633 * be handled by acpi_cpu_startup. We need to defer to after having 634 * probed all the cpus in the system before probing for generic Cx 635 * states as we may already have found cpus with valid _CST packages 636 */ 637 if (!cpu_cx_generic && acpi_cpu_cx_cst(sc) != 0) { 638 /* 639 * We were unable to find a _CST package for this cpu or there 640 * was an error parsing it. Switch back to generic mode. 641 */ 642 cpu_cx_generic = TRUE; 643 if (bootverbose) 644 device_printf(sc->cpu_dev, "switching to generic Cx mode\n"); 645 } 646 647 /* 648 * TODO: _CSD Package should be checked here. 649 */ 650} 651 652static void 653acpi_cpu_generic_cx_probe(struct acpi_cpu_softc *sc) 654{ 655 ACPI_GENERIC_ADDRESS gas; 656 struct acpi_cx *cx_ptr; 657 658 sc->cpu_cx_count = 0; 659 cx_ptr = sc->cpu_cx_states; 660 661 /* Use initial sleep value of 1 sec. to start with lowest idle state. */ 662 sc->cpu_prev_sleep = 1000000; 663 664 /* C1 has been required since just after ACPI 1.0 */ 665 cx_ptr->type = ACPI_STATE_C1; 666 cx_ptr->trans_lat = 0; 667 cx_ptr++; 668 sc->cpu_non_c2 = sc->cpu_cx_count; 669 sc->cpu_non_c3 = sc->cpu_cx_count; 670 sc->cpu_cx_count++; 671 cpu_deepest_sleep = 1; 672 673 /* 674 * The spec says P_BLK must be 6 bytes long. However, some systems 675 * use it to indicate a fractional set of features present so we 676 * take 5 as C2. Some may also have a value of 7 to indicate 677 * another C3 but most use _CST for this (as required) and having 678 * "only" C1-C3 is not a hardship. 679 */ 680 if (sc->cpu_p_blk_len < 5) 681 return; 682 683 /* Validate and allocate resources for C2 (P_LVL2). */ 684 gas.SpaceId = ACPI_ADR_SPACE_SYSTEM_IO; 685 gas.BitWidth = 8; 686 if (AcpiGbl_FADT.C2Latency <= 100) { 687 gas.Address = sc->cpu_p_blk + 4; 688 cx_ptr->res_rid = 0; 689 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid, 690 &gas, &cx_ptr->p_lvlx, RF_SHAREABLE); 691 if (cx_ptr->p_lvlx != NULL) { 692 cx_ptr->type = ACPI_STATE_C2; 693 cx_ptr->trans_lat = AcpiGbl_FADT.C2Latency; 694 cx_ptr++; 695 sc->cpu_non_c3 = sc->cpu_cx_count; 696 sc->cpu_cx_count++; 697 cpu_deepest_sleep = 2; 698 } 699 } 700 if (sc->cpu_p_blk_len < 6) 701 return; 702 703 /* Validate and allocate resources for C3 (P_LVL3). */ 704 if (AcpiGbl_FADT.C3Latency <= 1000 && !(cpu_quirks & CPU_QUIRK_NO_C3)) { 705 gas.Address = sc->cpu_p_blk + 5; 706 cx_ptr->res_rid = 1; 707 acpi_bus_alloc_gas(sc->cpu_dev, &cx_ptr->res_type, &cx_ptr->res_rid, 708 &gas, &cx_ptr->p_lvlx, RF_SHAREABLE); 709 if (cx_ptr->p_lvlx != NULL) { 710 cx_ptr->type = ACPI_STATE_C3; 711 cx_ptr->trans_lat = AcpiGbl_FADT.C3Latency; 712 cx_ptr++; 713 sc->cpu_cx_count++; 714 cpu_deepest_sleep = 3; 715 } 716 } 717} 718 719/* 720 * Parse a _CST package and set up its Cx states. Since the _CST object 721 * can change dynamically, our notify handler may call this function 722 * to clean up and probe the new _CST package. 723 */ 724static int 725acpi_cpu_cx_cst(struct acpi_cpu_softc *sc) 726{ 727 struct acpi_cx *cx_ptr; 728 ACPI_STATUS status; 729 ACPI_BUFFER buf; 730 ACPI_OBJECT *top; 731 ACPI_OBJECT *pkg; 732 uint32_t count; 733 int i; 734 735 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 736 737 buf.Pointer = NULL; 738 buf.Length = ACPI_ALLOCATE_BUFFER; 739 status = AcpiEvaluateObject(sc->cpu_handle, "_CST", NULL, &buf); 740 if (ACPI_FAILURE(status)) 741 return (ENXIO); 742 743 /* _CST is a package with a count and at least one Cx package. */ 744 top = (ACPI_OBJECT *)buf.Pointer; 745 if (!ACPI_PKG_VALID(top, 2) || acpi_PkgInt32(top, 0, &count) != 0) { 746 device_printf(sc->cpu_dev, "invalid _CST package\n"); 747 AcpiOsFree(buf.Pointer); 748 return (ENXIO); 749 } 750 if (count != top->Package.Count - 1) { 751 device_printf(sc->cpu_dev, "invalid _CST state count (%d != %d)\n", 752 count, top->Package.Count - 1); 753 count = top->Package.Count - 1; 754 } 755 if (count > MAX_CX_STATES) { 756 device_printf(sc->cpu_dev, "_CST has too many states (%d)\n", count); 757 count = MAX_CX_STATES; 758 } 759 760 sc->cpu_non_c2 = 0; 761 sc->cpu_non_c3 = 0; 762 sc->cpu_cx_count = 0; 763 cx_ptr = sc->cpu_cx_states; 764 765 /* 766 * C1 has been required since just after ACPI 1.0. 767 * Reserve the first slot for it. 768 */ 769 cx_ptr->type = ACPI_STATE_C0; 770 cx_ptr++; 771 sc->cpu_cx_count++; 772 cpu_deepest_sleep = 1; 773 774 /* Set up all valid states. */ 775 for (i = 0; i < count; i++) { 776 pkg = &top->Package.Elements[i + 1]; 777 if (!ACPI_PKG_VALID(pkg, 4) || 778 acpi_PkgInt32(pkg, 1, &cx_ptr->type) != 0 || 779 acpi_PkgInt32(pkg, 2, &cx_ptr->trans_lat) != 0 || 780 acpi_PkgInt32(pkg, 3, &cx_ptr->power) != 0) { 781 782 device_printf(sc->cpu_dev, "skipping invalid Cx state package\n"); 783 continue; 784 } 785 786 /* Validate the state to see if we should use it. */ 787 switch (cx_ptr->type) { 788 case ACPI_STATE_C1: 789 if (sc->cpu_cx_states[0].type == ACPI_STATE_C0) { 790 /* This is the first C1 state. Use the reserved slot. */ 791 sc->cpu_cx_states[0] = *cx_ptr; 792 } else { 793 sc->cpu_non_c2 = sc->cpu_cx_count; 794 sc->cpu_non_c3 = sc->cpu_cx_count; 795 cx_ptr++; 796 sc->cpu_cx_count++; 797 } 798 continue; 799 case ACPI_STATE_C2: 800 sc->cpu_non_c3 = sc->cpu_cx_count; 801 if (cpu_deepest_sleep < 2) 802 cpu_deepest_sleep = 2; 803 break; 804 case ACPI_STATE_C3: 805 default: 806 if ((cpu_quirks & CPU_QUIRK_NO_C3) != 0) { 807 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 808 "acpi_cpu%d: C3[%d] not available.\n", 809 device_get_unit(sc->cpu_dev), i)); 810 continue; 811 } else 812 cpu_deepest_sleep = 3; 813 break; 814 } 815 816 /* Free up any previous register. */ 817 if (cx_ptr->p_lvlx != NULL) { 818 bus_release_resource(sc->cpu_dev, cx_ptr->res_type, cx_ptr->res_rid, 819 cx_ptr->p_lvlx); 820 cx_ptr->p_lvlx = NULL; 821 } 822 823 /* Allocate the control register for C2 or C3. */ 824 cx_ptr->res_rid = sc->cpu_cx_count; 825 acpi_PkgGas(sc->cpu_dev, pkg, 0, &cx_ptr->res_type, &cx_ptr->res_rid, 826 &cx_ptr->p_lvlx, RF_SHAREABLE); 827 if (cx_ptr->p_lvlx) { 828 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 829 "acpi_cpu%d: Got C%d - %d latency\n", 830 device_get_unit(sc->cpu_dev), cx_ptr->type, 831 cx_ptr->trans_lat)); 832 cx_ptr++; 833 sc->cpu_cx_count++; 834 } 835 } 836 AcpiOsFree(buf.Pointer); 837 838 /* If C1 state was not found, we need one now. */ 839 cx_ptr = sc->cpu_cx_states; 840 if (cx_ptr->type == ACPI_STATE_C0) { 841 cx_ptr->type = ACPI_STATE_C1; 842 cx_ptr->trans_lat = 0; 843 } 844 845 return (0); 846} 847 848/* 849 * Call this *after* all CPUs have been attached. 850 */ 851static void 852acpi_cpu_startup(void *arg) 853{ 854 struct acpi_cpu_softc *sc; 855 int i; 856 857 /* Get set of CPU devices */ 858 devclass_get_devices(acpi_cpu_devclass, &cpu_devices, &cpu_ndevices); 859 860 /* 861 * Setup any quirks that might necessary now that we have probed 862 * all the CPUs 863 */ 864 acpi_cpu_quirks(); 865 866 if (cpu_cx_generic) { 867 /* 868 * We are using generic Cx mode, probe for available Cx states 869 * for all processors. 870 */ 871 for (i = 0; i < cpu_ndevices; i++) { 872 sc = device_get_softc(cpu_devices[i]); 873 acpi_cpu_generic_cx_probe(sc); 874 } 875 } else { 876 /* 877 * We are using _CST mode, remove C3 state if necessary. 878 * As we now know for sure that we will be using _CST mode 879 * install our notify handler. 880 */ 881 for (i = 0; i < cpu_ndevices; i++) { 882 sc = device_get_softc(cpu_devices[i]); 883 if (cpu_quirks & CPU_QUIRK_NO_C3) { 884 sc->cpu_cx_count = min(sc->cpu_cx_count, sc->cpu_non_c3 + 1); 885 } 886 AcpiInstallNotifyHandler(sc->cpu_handle, ACPI_DEVICE_NOTIFY, 887 acpi_cpu_notify, sc); 888 } 889 } 890 891 /* Perform Cx final initialization. */ 892 for (i = 0; i < cpu_ndevices; i++) { 893 sc = device_get_softc(cpu_devices[i]); 894 acpi_cpu_startup_cx(sc); 895 } 896 897 /* Add a sysctl handler to handle global Cx lowest setting */ 898 SYSCTL_ADD_PROC(&cpu_sysctl_ctx, SYSCTL_CHILDREN(cpu_sysctl_tree), 899 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW, 900 NULL, 0, acpi_cpu_global_cx_lowest_sysctl, "A", 901 "Global lowest Cx sleep state to use"); 902 903 /* Take over idling from cpu_idle_default(). */ 904 cpu_cx_lowest_lim = 0; 905 for (i = 0; i < cpu_ndevices; i++) { 906 sc = device_get_softc(cpu_devices[i]); 907 enable_idle(sc); 908 } 909 cpu_idle_hook = acpi_cpu_idle; 910} 911 912static void 913acpi_cpu_cx_list(struct acpi_cpu_softc *sc) 914{ 915 struct sbuf sb; 916 int i; 917 918 /* 919 * Set up the list of Cx states 920 */ 921 sbuf_new(&sb, sc->cpu_cx_supported, sizeof(sc->cpu_cx_supported), 922 SBUF_FIXEDLEN); 923 for (i = 0; i < sc->cpu_cx_count; i++) 924 sbuf_printf(&sb, "C%d/%d/%d ", i + 1, sc->cpu_cx_states[i].type, 925 sc->cpu_cx_states[i].trans_lat); 926 sbuf_trim(&sb); 927 sbuf_finish(&sb); 928} 929 930static void 931acpi_cpu_startup_cx(struct acpi_cpu_softc *sc) 932{ 933 acpi_cpu_cx_list(sc); 934 935 SYSCTL_ADD_STRING(&sc->cpu_sysctl_ctx, 936 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), 937 OID_AUTO, "cx_supported", CTLFLAG_RD, 938 sc->cpu_cx_supported, 0, 939 "Cx/microsecond values for supported Cx states"); 940 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx, 941 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), 942 OID_AUTO, "cx_lowest", CTLTYPE_STRING | CTLFLAG_RW, 943 (void *)sc, 0, acpi_cpu_cx_lowest_sysctl, "A", 944 "lowest Cx sleep state to use"); 945 SYSCTL_ADD_PROC(&sc->cpu_sysctl_ctx, 946 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->cpu_dev)), 947 OID_AUTO, "cx_usage", CTLTYPE_STRING | CTLFLAG_RD, 948 (void *)sc, 0, acpi_cpu_usage_sysctl, "A", 949 "percent usage for each Cx state"); 950 951 /* Signal platform that we can handle _CST notification. */ 952 if (!cpu_cx_generic && cpu_cst_cnt != 0) { 953 ACPI_LOCK(acpi); 954 AcpiOsWritePort(cpu_smi_cmd, cpu_cst_cnt, 8); 955 ACPI_UNLOCK(acpi); 956 } 957} 958 959/* 960 * Idle the CPU in the lowest state possible. This function is called with 961 * interrupts disabled. Note that once it re-enables interrupts, a task 962 * switch can occur so do not access shared data (i.e. the softc) after 963 * interrupts are re-enabled. 964 */ 965static void 966acpi_cpu_idle(sbintime_t sbt) 967{ 968 struct acpi_cpu_softc *sc; 969 struct acpi_cx *cx_next; 970 uint64_t cputicks; 971 uint32_t start_time, end_time; 972 int bm_active, cx_next_idx, i, us; 973 974 /* 975 * Look up our CPU id to get our softc. If it's NULL, we'll use C1 976 * since there is no ACPI processor object for this CPU. This occurs 977 * for logical CPUs in the HTT case. 978 */ 979 sc = cpu_softc[PCPU_GET(cpuid)]; 980 if (sc == NULL) { 981 acpi_cpu_c1(); 982 return; 983 } 984 985 /* If disabled, take the safe path. */ 986 if (is_idle_disabled(sc)) { 987 acpi_cpu_c1(); 988 return; 989 } 990 991 /* Find the lowest state that has small enough latency. */ 992 us = sc->cpu_prev_sleep; 993 if (sbt >= 0 && us > (sbt >> 12)) 994 us = (sbt >> 12); 995 cx_next_idx = 0; 996 if (cpu_disable_c2_sleep) 997 i = min(sc->cpu_cx_lowest, sc->cpu_non_c2); 998 else if (cpu_disable_c3_sleep) 999 i = min(sc->cpu_cx_lowest, sc->cpu_non_c3); 1000 else 1001 i = sc->cpu_cx_lowest; 1002 for (; i >= 0; i--) { 1003 if (sc->cpu_cx_states[i].trans_lat * 3 <= us) { 1004 cx_next_idx = i; 1005 break; 1006 } 1007 } 1008 1009 /* 1010 * Check for bus master activity. If there was activity, clear 1011 * the bit and use the lowest non-C3 state. Note that the USB 1012 * driver polling for new devices keeps this bit set all the 1013 * time if USB is loaded. 1014 */ 1015 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0 && 1016 cx_next_idx > sc->cpu_non_c3) { 1017 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, &bm_active); 1018 if (bm_active != 0) { 1019 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_STATUS, 1); 1020 cx_next_idx = sc->cpu_non_c3; 1021 } 1022 } 1023 1024 /* Select the next state and update statistics. */ 1025 cx_next = &sc->cpu_cx_states[cx_next_idx]; 1026 sc->cpu_cx_stats[cx_next_idx]++; 1027 KASSERT(cx_next->type != ACPI_STATE_C0, ("acpi_cpu_idle: C0 sleep")); 1028 1029 /* 1030 * Execute HLT (or equivalent) and wait for an interrupt. We can't 1031 * precisely calculate the time spent in C1 since the place we wake up 1032 * is an ISR. Assume we slept no more then half of quantum, unless 1033 * we are called inside critical section, delaying context switch. 1034 */ 1035 if (cx_next->type == ACPI_STATE_C1) { 1036 cputicks = cpu_ticks(); 1037 acpi_cpu_c1(); 1038 end_time = ((cpu_ticks() - cputicks) << 20) / cpu_tickrate(); 1039 if (curthread->td_critnest == 0) 1040 end_time = min(end_time, 500000 / hz); 1041 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + end_time) / 4; 1042 return; 1043 } 1044 1045 /* 1046 * For C3, disable bus master arbitration and enable bus master wake 1047 * if BM control is available, otherwise flush the CPU cache. 1048 */ 1049 if (cx_next->type == ACPI_STATE_C3) { 1050 if ((cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) { 1051 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 1); 1052 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 1); 1053 } else 1054 ACPI_FLUSH_CPU_CACHE(); 1055 } 1056 1057 /* 1058 * Read from P_LVLx to enter C2(+), checking time spent asleep. 1059 * Use the ACPI timer for measuring sleep time. Since we need to 1060 * get the time very close to the CPU start/stop clock logic, this 1061 * is the only reliable time source. 1062 */ 1063 if (cx_next->type == ACPI_STATE_C3) { 1064 AcpiHwRead(&start_time, &AcpiGbl_FADT.XPmTimerBlock); 1065 cputicks = 0; 1066 } else { 1067 start_time = 0; 1068 cputicks = cpu_ticks(); 1069 } 1070 CPU_GET_REG(cx_next->p_lvlx, 1); 1071 1072 /* 1073 * Read the end time twice. Since it may take an arbitrary time 1074 * to enter the idle state, the first read may be executed before 1075 * the processor has stopped. Doing it again provides enough 1076 * margin that we are certain to have a correct value. 1077 */ 1078 AcpiHwRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock); 1079 if (cx_next->type == ACPI_STATE_C3) { 1080 AcpiHwRead(&end_time, &AcpiGbl_FADT.XPmTimerBlock); 1081 end_time = acpi_TimerDelta(end_time, start_time); 1082 } else 1083 end_time = ((cpu_ticks() - cputicks) << 20) / cpu_tickrate(); 1084 1085 /* Enable bus master arbitration and disable bus master wakeup. */ 1086 if (cx_next->type == ACPI_STATE_C3 && 1087 (cpu_quirks & CPU_QUIRK_NO_BM_CTRL) == 0) { 1088 AcpiWriteBitRegister(ACPI_BITREG_ARB_DISABLE, 0); 1089 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); 1090 } 1091 ACPI_ENABLE_IRQS(); 1092 1093 sc->cpu_prev_sleep = (sc->cpu_prev_sleep * 3 + PM_USEC(end_time)) / 4; 1094} 1095 1096/* 1097 * Re-evaluate the _CST object when we are notified that it changed. 1098 */ 1099static void 1100acpi_cpu_notify(ACPI_HANDLE h, UINT32 notify, void *context) 1101{ 1102 struct acpi_cpu_softc *sc = (struct acpi_cpu_softc *)context; 1103 1104 if (notify != ACPI_NOTIFY_CX_STATES) 1105 return; 1106 1107 /* 1108 * C-state data for target CPU is going to be in flux while we execute 1109 * acpi_cpu_cx_cst, so disable entering acpi_cpu_idle. 1110 * Also, it may happen that multiple ACPI taskqueues may concurrently 1111 * execute notifications for the same CPU. ACPI_SERIAL is used to 1112 * protect against that. 1113 */ 1114 ACPI_SERIAL_BEGIN(cpu); 1115 disable_idle(sc); 1116 1117 /* Update the list of Cx states. */ 1118 acpi_cpu_cx_cst(sc); 1119 acpi_cpu_cx_list(sc); 1120 acpi_cpu_set_cx_lowest(sc); 1121 1122 enable_idle(sc); 1123 ACPI_SERIAL_END(cpu); 1124 1125 acpi_UserNotify("PROCESSOR", sc->cpu_handle, notify); 1126} 1127 1128static int 1129acpi_cpu_quirks(void) 1130{ 1131 device_t acpi_dev; 1132 uint32_t val; 1133 1134 ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); 1135 1136 /* 1137 * Bus mastering arbitration control is needed to keep caches coherent 1138 * while sleeping in C3. If it's not present but a working flush cache 1139 * instruction is present, flush the caches before entering C3 instead. 1140 * Otherwise, just disable C3 completely. 1141 */ 1142 if (AcpiGbl_FADT.Pm2ControlBlock == 0 || 1143 AcpiGbl_FADT.Pm2ControlLength == 0) { 1144 if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) && 1145 (AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) { 1146 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL; 1147 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1148 "acpi_cpu: no BM control, using flush cache method\n")); 1149 } else { 1150 cpu_quirks |= CPU_QUIRK_NO_C3; 1151 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1152 "acpi_cpu: no BM control, C3 not available\n")); 1153 } 1154 } 1155 1156 /* 1157 * If we are using generic Cx mode, C3 on multiple CPUs requires using 1158 * the expensive flush cache instruction. 1159 */ 1160 if (cpu_cx_generic && mp_ncpus > 1) { 1161 cpu_quirks |= CPU_QUIRK_NO_BM_CTRL; 1162 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1163 "acpi_cpu: SMP, using flush cache mode for C3\n")); 1164 } 1165 1166 /* Look for various quirks of the PIIX4 part. */ 1167 acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3); 1168 if (acpi_dev != NULL) { 1169 switch (pci_get_revid(acpi_dev)) { 1170 /* 1171 * Disable C3 support for all PIIX4 chipsets. Some of these parts 1172 * do not report the BMIDE status to the BM status register and 1173 * others have a livelock bug if Type-F DMA is enabled. Linux 1174 * works around the BMIDE bug by reading the BM status directly 1175 * but we take the simpler approach of disabling C3 for these 1176 * parts. 1177 * 1178 * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA 1179 * Livelock") from the January 2002 PIIX4 specification update. 1180 * Applies to all PIIX4 models. 1181 * 1182 * Also, make sure that all interrupts cause a "Stop Break" 1183 * event to exit from C2 state. 1184 * Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak) 1185 * should be set to zero, otherwise it causes C2 to short-sleep. 1186 * PIIX4 doesn't properly support C3 and bus master activity 1187 * need not break out of C2. 1188 */ 1189 case PCI_REVISION_A_STEP: 1190 case PCI_REVISION_B_STEP: 1191 case PCI_REVISION_4E: 1192 case PCI_REVISION_4M: 1193 cpu_quirks |= CPU_QUIRK_NO_C3; 1194 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1195 "acpi_cpu: working around PIIX4 bug, disabling C3\n")); 1196 1197 val = pci_read_config(acpi_dev, PIIX4_DEVACTB_REG, 4); 1198 if ((val & PIIX4_STOP_BREAK_MASK) != PIIX4_STOP_BREAK_MASK) { 1199 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1200 "acpi_cpu: PIIX4: enabling IRQs to generate Stop Break\n")); 1201 val |= PIIX4_STOP_BREAK_MASK; 1202 pci_write_config(acpi_dev, PIIX4_DEVACTB_REG, val, 4); 1203 } 1204 AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD, &val); 1205 if (val) { 1206 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 1207 "acpi_cpu: PIIX4: reset BRLD_EN_BM\n")); 1208 AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); 1209 } 1210 break; 1211 default: 1212 break; 1213 } 1214 } 1215 1216 return (0); 1217} 1218 1219static int 1220acpi_cpu_usage_sysctl(SYSCTL_HANDLER_ARGS) 1221{ 1222 struct acpi_cpu_softc *sc; 1223 struct sbuf sb; 1224 char buf[128]; 1225 int i; 1226 uintmax_t fract, sum, whole; 1227 1228 sc = (struct acpi_cpu_softc *) arg1; 1229 sum = 0; 1230 for (i = 0; i < sc->cpu_cx_count; i++) 1231 sum += sc->cpu_cx_stats[i]; 1232 sbuf_new(&sb, buf, sizeof(buf), SBUF_FIXEDLEN); 1233 for (i = 0; i < sc->cpu_cx_count; i++) { 1234 if (sum > 0) { 1235 whole = (uintmax_t)sc->cpu_cx_stats[i] * 100; 1236 fract = (whole % sum) * 100; 1237 sbuf_printf(&sb, "%u.%02u%% ", (u_int)(whole / sum), 1238 (u_int)(fract / sum)); 1239 } else 1240 sbuf_printf(&sb, "0.00%% "); 1241 } 1242 sbuf_printf(&sb, "last %dus", sc->cpu_prev_sleep); 1243 sbuf_trim(&sb); 1244 sbuf_finish(&sb); 1245 sysctl_handle_string(oidp, sbuf_data(&sb), sbuf_len(&sb), req); 1246 sbuf_delete(&sb); 1247 1248 return (0); 1249} 1250 1251static int 1252acpi_cpu_set_cx_lowest(struct acpi_cpu_softc *sc) 1253{ 1254 int i; 1255 1256 ACPI_SERIAL_ASSERT(cpu); 1257 sc->cpu_cx_lowest = min(sc->cpu_cx_lowest_lim, sc->cpu_cx_count - 1); 1258 1259 /* If not disabling, cache the new lowest non-C3 state. */ 1260 sc->cpu_non_c3 = 0; 1261 for (i = sc->cpu_cx_lowest; i >= 0; i--) { 1262 if (sc->cpu_cx_states[i].type < ACPI_STATE_C3) { 1263 sc->cpu_non_c3 = i; 1264 break; 1265 } 1266 } 1267 1268 /* Reset the statistics counters. */ 1269 bzero(sc->cpu_cx_stats, sizeof(sc->cpu_cx_stats)); 1270 return (0); 1271} 1272 1273static int 1274acpi_cpu_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS) 1275{ 1276 struct acpi_cpu_softc *sc; 1277 char state[8]; 1278 int val, error; 1279 1280 sc = (struct acpi_cpu_softc *) arg1; 1281 snprintf(state, sizeof(state), "C%d", sc->cpu_cx_lowest_lim + 1); 1282 error = sysctl_handle_string(oidp, state, sizeof(state), req); 1283 if (error != 0 || req->newptr == NULL) 1284 return (error); 1285 if (strlen(state) < 2 || toupper(state[0]) != 'C') 1286 return (EINVAL); 1287 if (strcasecmp(state, "Cmax") == 0) 1288 val = MAX_CX_STATES; 1289 else { 1290 val = (int) strtol(state + 1, NULL, 10); 1291 if (val < 1 || val > MAX_CX_STATES) 1292 return (EINVAL); 1293 } 1294 1295 ACPI_SERIAL_BEGIN(cpu); 1296 sc->cpu_cx_lowest_lim = val - 1; 1297 acpi_cpu_set_cx_lowest(sc); 1298 ACPI_SERIAL_END(cpu); 1299 1300 return (0); 1301} 1302 1303static int 1304acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS) 1305{ 1306 struct acpi_cpu_softc *sc; 1307 char state[8]; 1308 int val, error, i; 1309 1310 snprintf(state, sizeof(state), "C%d", cpu_cx_lowest_lim + 1); 1311 error = sysctl_handle_string(oidp, state, sizeof(state), req); 1312 if (error != 0 || req->newptr == NULL) 1313 return (error); 1314 if (strlen(state) < 2 || toupper(state[0]) != 'C') 1315 return (EINVAL); 1316 if (strcasecmp(state, "Cmax") == 0) 1317 val = MAX_CX_STATES; 1318 else { 1319 val = (int) strtol(state + 1, NULL, 10); 1320 if (val < 1 || val > MAX_CX_STATES) 1321 return (EINVAL); 1322 } 1323 1324 /* Update the new lowest useable Cx state for all CPUs. */ 1325 ACPI_SERIAL_BEGIN(cpu); 1326 cpu_cx_lowest_lim = val - 1; 1327 for (i = 0; i < cpu_ndevices; i++) { 1328 sc = device_get_softc(cpu_devices[i]); 1329 sc->cpu_cx_lowest_lim = cpu_cx_lowest_lim; 1330 acpi_cpu_set_cx_lowest(sc); 1331 } 1332 ACPI_SERIAL_END(cpu); 1333 1334 return (0); 1335} 1336