1/* 2 * processor_idle - idle state submodule to the ACPI processor driver 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 10 * - Added support for C3 on SMP 11 * 12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or (at 17 * your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, but 20 * WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 22 * General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License along 25 * with this program; if not, write to the Free Software Foundation, Inc., 26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 27 * 28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 29 */ 30 31#include <linux/kernel.h> 32#include <linux/module.h> 33#include <linux/init.h> 34#include <linux/cpufreq.h> 35#include <linux/slab.h> 36#include <linux/acpi.h> 37#include <linux/dmi.h> 38#include <linux/moduleparam.h> 39#include <linux/sched.h> /* need_resched() */ 40#include <linux/pm_qos_params.h> 41#include <linux/clockchips.h> 42#include <linux/cpuidle.h> 43#include <linux/irqflags.h> 44 45/* 46 * Include the apic definitions for x86 to have the APIC timer related defines 47 * available also for UP (on SMP it gets magically included via linux/smp.h). 48 * asm/acpi.h is not an option, as it would require more include magic. Also 49 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera. 50 */ 51#ifdef CONFIG_X86 52#include <asm/apic.h> 53#endif 54 55#include <asm/io.h> 56#include <asm/uaccess.h> 57 58#include <acpi/acpi_bus.h> 59#include <acpi/processor.h> 60#include <asm/processor.h> 61 62#define PREFIX "ACPI: " 63 64#define ACPI_PROCESSOR_CLASS "processor" 65#define _COMPONENT ACPI_PROCESSOR_COMPONENT 66ACPI_MODULE_NAME("processor_idle"); 67#define ACPI_PROCESSOR_FILE_POWER "power" 68#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) 69#define C2_OVERHEAD 1 /* 1us */ 70#define C3_OVERHEAD 1 /* 1us */ 71#define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000)) 72 73static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER; 74module_param(max_cstate, uint, 0000); 75static unsigned int nocst __read_mostly; 76module_param(nocst, uint, 0000); 77static int bm_check_disable __read_mostly; 78module_param(bm_check_disable, uint, 0000); 79 80static unsigned int latency_factor __read_mostly = 2; 81module_param(latency_factor, uint, 0644); 82 83/* 84 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. 85 * For now disable this. Probably a bug somewhere else. 86 * 87 * To skip this limit, boot/load with a large max_cstate limit. 88 */ 89static int set_max_cstate(const struct dmi_system_id *id) 90{ 91 if (max_cstate > ACPI_PROCESSOR_MAX_POWER) 92 return 0; 93 94 printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate." 95 " Override with \"processor.max_cstate=%d\"\n", id->ident, 96 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1); 97 98 max_cstate = (long)id->driver_data; 99 100 return 0; 101} 102 103/* Actually this shouldn't be __cpuinitdata, would be better to fix the 104 callers to only run once -AK */ 105static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { 106 { set_max_cstate, "Clevo 5600D", { 107 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 108 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, 109 (void *)2}, 110 { set_max_cstate, "Pavilion zv5000", { 111 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 112 DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")}, 113 (void *)1}, 114 { set_max_cstate, "Asus L8400B", { 115 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), 116 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, 117 (void *)1}, 118 {}, 119}; 120 121 122/* 123 * Callers should disable interrupts before the call and enable 124 * interrupts after return. 125 */ 126static void acpi_safe_halt(void) 127{ 128 current_thread_info()->status &= ~TS_POLLING; 129 /* 130 * TS_POLLING-cleared state must be visible before we 131 * test NEED_RESCHED: 132 */ 133 smp_mb(); 134 if (!need_resched()) { 135 safe_halt(); 136 local_irq_disable(); 137 } 138 current_thread_info()->status |= TS_POLLING; 139} 140 141#ifdef ARCH_APICTIMER_STOPS_ON_C3 142 143/* 144 * Some BIOS implementations switch to C3 in the published C2 state. 145 * This seems to be a common problem on AMD boxen, but other vendors 146 * are affected too. We pick the most conservative approach: we assume 147 * that the local APIC stops in both C2 and C3. 148 */ 149static void lapic_timer_check_state(int state, struct acpi_processor *pr, 150 struct acpi_processor_cx *cx) 151{ 152 struct acpi_processor_power *pwr = &pr->power; 153 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; 154 155 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT)) 156 return; 157 158 if (c1e_detected) 159 type = ACPI_STATE_C1; 160 161 /* 162 * Check, if one of the previous states already marked the lapic 163 * unstable 164 */ 165 if (pwr->timer_broadcast_on_state < state) 166 return; 167 168 if (cx->type >= type) 169 pr->power.timer_broadcast_on_state = state; 170} 171 172static void __lapic_timer_propagate_broadcast(void *arg) 173{ 174 struct acpi_processor *pr = (struct acpi_processor *) arg; 175 unsigned long reason; 176 177 reason = pr->power.timer_broadcast_on_state < INT_MAX ? 178 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; 179 180 clockevents_notify(reason, &pr->id); 181} 182 183static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) 184{ 185 smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast, 186 (void *)pr, 1); 187} 188 189/* Power(C) State timer broadcast control */ 190static void lapic_timer_state_broadcast(struct acpi_processor *pr, 191 struct acpi_processor_cx *cx, 192 int broadcast) 193{ 194 int state = cx - pr->power.states; 195 196 if (state >= pr->power.timer_broadcast_on_state) { 197 unsigned long reason; 198 199 reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER : 200 CLOCK_EVT_NOTIFY_BROADCAST_EXIT; 201 clockevents_notify(reason, &pr->id); 202 } 203} 204 205#else 206 207static void lapic_timer_check_state(int state, struct acpi_processor *pr, 208 struct acpi_processor_cx *cstate) { } 209static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { } 210static void lapic_timer_state_broadcast(struct acpi_processor *pr, 211 struct acpi_processor_cx *cx, 212 int broadcast) 213{ 214} 215 216#endif 217 218/* 219 * Suspend / resume control 220 */ 221static int acpi_idle_suspend; 222static u32 saved_bm_rld; 223 224static void acpi_idle_bm_rld_save(void) 225{ 226 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &saved_bm_rld); 227} 228static void acpi_idle_bm_rld_restore(void) 229{ 230 u32 resumed_bm_rld; 231 232 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_RLD, &resumed_bm_rld); 233 234 if (resumed_bm_rld != saved_bm_rld) 235 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, saved_bm_rld); 236} 237 238int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) 239{ 240 if (acpi_idle_suspend == 1) 241 return 0; 242 243 acpi_idle_bm_rld_save(); 244 acpi_idle_suspend = 1; 245 return 0; 246} 247 248int acpi_processor_resume(struct acpi_device * device) 249{ 250 if (acpi_idle_suspend == 0) 251 return 0; 252 253 acpi_idle_bm_rld_restore(); 254 acpi_idle_suspend = 0; 255 return 0; 256} 257 258#if defined(CONFIG_X86) 259static void tsc_check_state(int state) 260{ 261 switch (boot_cpu_data.x86_vendor) { 262 case X86_VENDOR_AMD: 263 case X86_VENDOR_INTEL: 264 /* 265 * AMD Fam10h TSC will tick in all 266 * C/P/S0/S1 states when this bit is set. 267 */ 268 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC)) 269 return; 270 271 /*FALL THROUGH*/ 272 default: 273 /* TSC could halt in idle, so notify users */ 274 if (state > ACPI_STATE_C1) 275 mark_tsc_unstable("TSC halts in idle"); 276 } 277} 278#else 279static void tsc_check_state(int state) { return; } 280#endif 281 282static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) 283{ 284 285 if (!pr) 286 return -EINVAL; 287 288 if (!pr->pblk) 289 return -ENODEV; 290 291 /* if info is obtained from pblk/fadt, type equals state */ 292 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; 293 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; 294 295#ifndef CONFIG_HOTPLUG_CPU 296 /* 297 * Check for P_LVL2_UP flag before entering C2 and above on 298 * an SMP system. 299 */ 300 if ((num_online_cpus() > 1) && 301 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 302 return -ENODEV; 303#endif 304 305 /* determine C2 and C3 address from pblk */ 306 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; 307 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; 308 309 /* determine latencies from FADT */ 310 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency; 311 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency; 312 313 /* 314 * FADT specified C2 latency must be less than or equal to 315 * 100 microseconds. 316 */ 317 if (acpi_gbl_FADT.C2latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { 318 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 319 "C2 latency too large [%d]\n", acpi_gbl_FADT.C2latency)); 320 /* invalidate C2 */ 321 pr->power.states[ACPI_STATE_C2].address = 0; 322 } 323 324 /* 325 * FADT supplied C3 latency must be less than or equal to 326 * 1000 microseconds. 327 */ 328 if (acpi_gbl_FADT.C3latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { 329 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 330 "C3 latency too large [%d]\n", acpi_gbl_FADT.C3latency)); 331 /* invalidate C3 */ 332 pr->power.states[ACPI_STATE_C3].address = 0; 333 } 334 335 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 336 "lvl2[0x%08x] lvl3[0x%08x]\n", 337 pr->power.states[ACPI_STATE_C2].address, 338 pr->power.states[ACPI_STATE_C3].address)); 339 340 return 0; 341} 342 343static int acpi_processor_get_power_info_default(struct acpi_processor *pr) 344{ 345 if (!pr->power.states[ACPI_STATE_C1].valid) { 346 /* set the first C-State to C1 */ 347 /* all processors need to support C1 */ 348 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; 349 pr->power.states[ACPI_STATE_C1].valid = 1; 350 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT; 351 } 352 /* the C0 state only exists as a filler in our array */ 353 pr->power.states[ACPI_STATE_C0].valid = 1; 354 return 0; 355} 356 357static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) 358{ 359 acpi_status status = 0; 360 u64 count; 361 int current_count; 362 int i; 363 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 364 union acpi_object *cst; 365 366 367 if (nocst) 368 return -ENODEV; 369 370 current_count = 0; 371 372 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); 373 if (ACPI_FAILURE(status)) { 374 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); 375 return -ENODEV; 376 } 377 378 cst = buffer.pointer; 379 380 /* There must be at least 2 elements */ 381 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { 382 printk(KERN_ERR PREFIX "not enough elements in _CST\n"); 383 status = -EFAULT; 384 goto end; 385 } 386 387 count = cst->package.elements[0].integer.value; 388 389 /* Validate number of power states. */ 390 if (count < 1 || count != cst->package.count - 1) { 391 printk(KERN_ERR PREFIX "count given by _CST is not valid\n"); 392 status = -EFAULT; 393 goto end; 394 } 395 396 /* Tell driver that at least _CST is supported. */ 397 pr->flags.has_cst = 1; 398 399 for (i = 1; i <= count; i++) { 400 union acpi_object *element; 401 union acpi_object *obj; 402 struct acpi_power_register *reg; 403 struct acpi_processor_cx cx; 404 405 memset(&cx, 0, sizeof(cx)); 406 407 element = &(cst->package.elements[i]); 408 if (element->type != ACPI_TYPE_PACKAGE) 409 continue; 410 411 if (element->package.count != 4) 412 continue; 413 414 obj = &(element->package.elements[0]); 415 416 if (obj->type != ACPI_TYPE_BUFFER) 417 continue; 418 419 reg = (struct acpi_power_register *)obj->buffer.pointer; 420 421 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && 422 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) 423 continue; 424 425 /* There should be an easy way to extract an integer... */ 426 obj = &(element->package.elements[1]); 427 if (obj->type != ACPI_TYPE_INTEGER) 428 continue; 429 430 cx.type = obj->integer.value; 431 /* 432 * Some buggy BIOSes won't list C1 in _CST - 433 * Let acpi_processor_get_power_info_default() handle them later 434 */ 435 if (i == 1 && cx.type != ACPI_STATE_C1) 436 current_count++; 437 438 cx.address = reg->address; 439 cx.index = current_count + 1; 440 441 cx.entry_method = ACPI_CSTATE_SYSTEMIO; 442 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { 443 if (acpi_processor_ffh_cstate_probe 444 (pr->id, &cx, reg) == 0) { 445 cx.entry_method = ACPI_CSTATE_FFH; 446 } else if (cx.type == ACPI_STATE_C1) { 447 /* 448 * C1 is a special case where FIXED_HARDWARE 449 * can be handled in non-MWAIT way as well. 450 * In that case, save this _CST entry info. 451 * Otherwise, ignore this info and continue. 452 */ 453 cx.entry_method = ACPI_CSTATE_HALT; 454 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); 455 } else { 456 continue; 457 } 458 if (cx.type == ACPI_STATE_C1 && 459 (idle_halt || idle_nomwait)) { 460 /* 461 * In most cases the C1 space_id obtained from 462 * _CST object is FIXED_HARDWARE access mode. 463 * But when the option of idle=halt is added, 464 * the entry_method type should be changed from 465 * CSTATE_FFH to CSTATE_HALT. 466 * When the option of idle=nomwait is added, 467 * the C1 entry_method type should be 468 * CSTATE_HALT. 469 */ 470 cx.entry_method = ACPI_CSTATE_HALT; 471 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT"); 472 } 473 } else { 474 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x", 475 cx.address); 476 } 477 478 if (cx.type == ACPI_STATE_C1) { 479 cx.valid = 1; 480 } 481 482 obj = &(element->package.elements[2]); 483 if (obj->type != ACPI_TYPE_INTEGER) 484 continue; 485 486 cx.latency = obj->integer.value; 487 488 obj = &(element->package.elements[3]); 489 if (obj->type != ACPI_TYPE_INTEGER) 490 continue; 491 492 cx.power = obj->integer.value; 493 494 current_count++; 495 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); 496 497 /* 498 * We support total ACPI_PROCESSOR_MAX_POWER - 1 499 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1) 500 */ 501 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) { 502 printk(KERN_WARNING 503 "Limiting number of power states to max (%d)\n", 504 ACPI_PROCESSOR_MAX_POWER); 505 printk(KERN_WARNING 506 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); 507 break; 508 } 509 } 510 511 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", 512 current_count)); 513 514 /* Validate number of power states discovered */ 515 if (current_count < 2) 516 status = -EFAULT; 517 518 end: 519 kfree(buffer.pointer); 520 521 return status; 522} 523 524static void acpi_processor_power_verify_c3(struct acpi_processor *pr, 525 struct acpi_processor_cx *cx) 526{ 527 static int bm_check_flag = -1; 528 static int bm_control_flag = -1; 529 530 531 if (!cx->address) 532 return; 533 534 /* 535 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) 536 * DMA transfers are used by any ISA device to avoid livelock. 537 * Note that we could disable Type-F DMA (as recommended by 538 * the erratum), but this is known to disrupt certain ISA 539 * devices thus we take the conservative approach. 540 */ 541 else if (errata.piix4.fdma) { 542 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 543 "C3 not supported on PIIX4 with Type-F DMA\n")); 544 return; 545 } 546 547 /* All the logic here assumes flags.bm_check is same across all CPUs */ 548 if (bm_check_flag == -1) { 549 /* Determine whether bm_check is needed based on CPU */ 550 acpi_processor_power_init_bm_check(&(pr->flags), pr->id); 551 bm_check_flag = pr->flags.bm_check; 552 bm_control_flag = pr->flags.bm_control; 553 } else { 554 pr->flags.bm_check = bm_check_flag; 555 pr->flags.bm_control = bm_control_flag; 556 } 557 558 if (pr->flags.bm_check) { 559 if (!pr->flags.bm_control) { 560 if (pr->flags.has_cst != 1) { 561 /* bus mastering control is necessary */ 562 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 563 "C3 support requires BM control\n")); 564 return; 565 } else { 566 /* Here we enter C3 without bus mastering */ 567 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 568 "C3 support without BM control\n")); 569 } 570 } 571 } else { 572 /* 573 * WBINVD should be set in fadt, for C3 state to be 574 * supported on when bm_check is not required. 575 */ 576 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) { 577 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 578 "Cache invalidation should work properly" 579 " for C3 to be enabled on SMP systems\n")); 580 return; 581 } 582 } 583 584 /* 585 * Otherwise we've met all of our C3 requirements. 586 * Normalize the C3 latency to expidite policy. Enable 587 * checking of bus mastering status (bm_check) so we can 588 * use this in our C3 policy 589 */ 590 cx->valid = 1; 591 592 cx->latency_ticks = cx->latency; 593 /* 594 * On older chipsets, BM_RLD needs to be set 595 * in order for Bus Master activity to wake the 596 * system from C3. Newer chipsets handle DMA 597 * during C3 automatically and BM_RLD is a NOP. 598 * In either case, the proper way to 599 * handle BM_RLD is to set it and leave it set. 600 */ 601 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1); 602 603 return; 604} 605 606static int acpi_processor_power_verify(struct acpi_processor *pr) 607{ 608 unsigned int i; 609 unsigned int working = 0; 610 611 pr->power.timer_broadcast_on_state = INT_MAX; 612 613 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 614 struct acpi_processor_cx *cx = &pr->power.states[i]; 615 616 switch (cx->type) { 617 case ACPI_STATE_C1: 618 cx->valid = 1; 619 break; 620 621 case ACPI_STATE_C2: 622 if (!cx->address) 623 break; 624 cx->valid = 1; 625 cx->latency_ticks = cx->latency; /* Normalize latency */ 626 break; 627 628 case ACPI_STATE_C3: 629 acpi_processor_power_verify_c3(pr, cx); 630 break; 631 } 632 if (!cx->valid) 633 continue; 634 635 lapic_timer_check_state(i, pr, cx); 636 tsc_check_state(cx->type); 637 working++; 638 } 639 640 lapic_timer_propagate_broadcast(pr); 641 642 return (working); 643} 644 645static int acpi_processor_get_power_info(struct acpi_processor *pr) 646{ 647 unsigned int i; 648 int result; 649 650 651 /* NOTE: the idle thread may not be running while calling 652 * this function */ 653 654 /* Zero initialize all the C-states info. */ 655 memset(pr->power.states, 0, sizeof(pr->power.states)); 656 657 result = acpi_processor_get_power_info_cst(pr); 658 if (result == -ENODEV) 659 result = acpi_processor_get_power_info_fadt(pr); 660 661 if (result) 662 return result; 663 664 acpi_processor_get_power_info_default(pr); 665 666 pr->power.count = acpi_processor_power_verify(pr); 667 668 /* 669 * if one state of type C2 or C3 is available, mark this 670 * CPU as being "idle manageable" 671 */ 672 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 673 if (pr->power.states[i].valid) { 674 pr->power.count = i; 675 if (pr->power.states[i].type >= ACPI_STATE_C2) 676 pr->flags.power = 1; 677 } 678 } 679 680 return 0; 681} 682 683/** 684 * acpi_idle_bm_check - checks if bus master activity was detected 685 */ 686static int acpi_idle_bm_check(void) 687{ 688 u32 bm_status = 0; 689 690 if (bm_check_disable) 691 return 0; 692 693 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); 694 if (bm_status) 695 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); 696 /* 697 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect 698 * the true state of bus mastering activity; forcing us to 699 * manually check the BMIDEA bit of each IDE channel. 700 */ 701 else if (errata.piix4.bmisx) { 702 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) 703 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) 704 bm_status = 1; 705 } 706 return bm_status; 707} 708 709/** 710 * acpi_idle_do_entry - a helper function that does C2 and C3 type entry 711 * @cx: cstate data 712 * 713 * Caller disables interrupt before call and enables interrupt after return. 714 */ 715static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx) 716{ 717 /* Don't trace irqs off for idle */ 718 stop_critical_timings(); 719 if (cx->entry_method == ACPI_CSTATE_FFH) { 720 /* Call into architectural FFH based C-state */ 721 acpi_processor_ffh_cstate_enter(cx); 722 } else if (cx->entry_method == ACPI_CSTATE_HALT) { 723 acpi_safe_halt(); 724 } else { 725 /* IO port based C-state */ 726 inb(cx->address); 727 /* Dummy wait op - must do something useless after P_LVL2 read 728 because chipsets cannot guarantee that STPCLK# signal 729 gets asserted in time to freeze execution properly. */ 730 inl(acpi_gbl_FADT.xpm_timer_block.address); 731 } 732 start_critical_timings(); 733} 734 735/** 736 * acpi_idle_enter_c1 - enters an ACPI C1 state-type 737 * @dev: the target CPU 738 * @state: the state data 739 * 740 * This is equivalent to the HALT instruction. 741 */ 742static int acpi_idle_enter_c1(struct cpuidle_device *dev, 743 struct cpuidle_state *state) 744{ 745 ktime_t kt1, kt2; 746 s64 idle_time; 747 struct acpi_processor *pr; 748 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 749 750 pr = __get_cpu_var(processors); 751 752 if (unlikely(!pr)) 753 return 0; 754 755 local_irq_disable(); 756 757 /* Do not access any ACPI IO ports in suspend path */ 758 if (acpi_idle_suspend) { 759 local_irq_enable(); 760 cpu_relax(); 761 return 0; 762 } 763 764 lapic_timer_state_broadcast(pr, cx, 1); 765 kt1 = ktime_get_real(); 766 acpi_idle_do_entry(cx); 767 kt2 = ktime_get_real(); 768 idle_time = ktime_to_us(ktime_sub(kt2, kt1)); 769 770 local_irq_enable(); 771 cx->usage++; 772 lapic_timer_state_broadcast(pr, cx, 0); 773 774 return idle_time; 775} 776 777/** 778 * acpi_idle_enter_simple - enters an ACPI state without BM handling 779 * @dev: the target CPU 780 * @state: the state data 781 */ 782static int acpi_idle_enter_simple(struct cpuidle_device *dev, 783 struct cpuidle_state *state) 784{ 785 struct acpi_processor *pr; 786 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 787 ktime_t kt1, kt2; 788 s64 idle_time_ns; 789 s64 idle_time; 790 791 pr = __get_cpu_var(processors); 792 793 if (unlikely(!pr)) 794 return 0; 795 796 if (acpi_idle_suspend) 797 return(acpi_idle_enter_c1(dev, state)); 798 799 local_irq_disable(); 800 801 if (cx->entry_method != ACPI_CSTATE_FFH) { 802 current_thread_info()->status &= ~TS_POLLING; 803 /* 804 * TS_POLLING-cleared state must be visible before we test 805 * NEED_RESCHED: 806 */ 807 smp_mb(); 808 809 if (unlikely(need_resched())) { 810 current_thread_info()->status |= TS_POLLING; 811 local_irq_enable(); 812 return 0; 813 } 814 } 815 816 /* 817 * Must be done before busmaster disable as we might need to 818 * access HPET ! 819 */ 820 lapic_timer_state_broadcast(pr, cx, 1); 821 822 if (cx->type == ACPI_STATE_C3) 823 ACPI_FLUSH_CPU_CACHE(); 824 825 kt1 = ktime_get_real(); 826 /* Tell the scheduler that we are going deep-idle: */ 827 sched_clock_idle_sleep_event(); 828 acpi_idle_do_entry(cx); 829 kt2 = ktime_get_real(); 830 idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); 831 idle_time = idle_time_ns; 832 do_div(idle_time, NSEC_PER_USEC); 833 834 /* Tell the scheduler how much we idled: */ 835 sched_clock_idle_wakeup_event(idle_time_ns); 836 837 local_irq_enable(); 838 if (cx->entry_method != ACPI_CSTATE_FFH) 839 current_thread_info()->status |= TS_POLLING; 840 841 cx->usage++; 842 843 lapic_timer_state_broadcast(pr, cx, 0); 844 cx->time += idle_time; 845 return idle_time; 846} 847 848static int c3_cpu_count; 849static DEFINE_SPINLOCK(c3_lock); 850 851/** 852 * acpi_idle_enter_bm - enters C3 with proper BM handling 853 * @dev: the target CPU 854 * @state: the state data 855 * 856 * If BM is detected, the deepest non-C3 idle state is entered instead. 857 */ 858static int acpi_idle_enter_bm(struct cpuidle_device *dev, 859 struct cpuidle_state *state) 860{ 861 struct acpi_processor *pr; 862 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 863 ktime_t kt1, kt2; 864 s64 idle_time_ns; 865 s64 idle_time; 866 867 868 pr = __get_cpu_var(processors); 869 870 if (unlikely(!pr)) 871 return 0; 872 873 if (acpi_idle_suspend) 874 return(acpi_idle_enter_c1(dev, state)); 875 876 if (!cx->bm_sts_skip && acpi_idle_bm_check()) { 877 if (dev->safe_state) { 878 dev->last_state = dev->safe_state; 879 return dev->safe_state->enter(dev, dev->safe_state); 880 } else { 881 local_irq_disable(); 882 acpi_safe_halt(); 883 local_irq_enable(); 884 return 0; 885 } 886 } 887 888 local_irq_disable(); 889 890 if (cx->entry_method != ACPI_CSTATE_FFH) { 891 current_thread_info()->status &= ~TS_POLLING; 892 /* 893 * TS_POLLING-cleared state must be visible before we test 894 * NEED_RESCHED: 895 */ 896 smp_mb(); 897 898 if (unlikely(need_resched())) { 899 current_thread_info()->status |= TS_POLLING; 900 local_irq_enable(); 901 return 0; 902 } 903 } 904 905 acpi_unlazy_tlb(smp_processor_id()); 906 907 /* Tell the scheduler that we are going deep-idle: */ 908 sched_clock_idle_sleep_event(); 909 /* 910 * Must be done before busmaster disable as we might need to 911 * access HPET ! 912 */ 913 lapic_timer_state_broadcast(pr, cx, 1); 914 915 kt1 = ktime_get_real(); 916 /* 917 * disable bus master 918 * bm_check implies we need ARB_DIS 919 * !bm_check implies we need cache flush 920 * bm_control implies whether we can do ARB_DIS 921 * 922 * That leaves a case where bm_check is set and bm_control is 923 * not set. In that case we cannot do much, we enter C3 924 * without doing anything. 925 */ 926 if (pr->flags.bm_check && pr->flags.bm_control) { 927 spin_lock(&c3_lock); 928 c3_cpu_count++; 929 /* Disable bus master arbitration when all CPUs are in C3 */ 930 if (c3_cpu_count == num_online_cpus()) 931 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1); 932 spin_unlock(&c3_lock); 933 } else if (!pr->flags.bm_check) { 934 ACPI_FLUSH_CPU_CACHE(); 935 } 936 937 acpi_idle_do_entry(cx); 938 939 /* Re-enable bus master arbitration */ 940 if (pr->flags.bm_check && pr->flags.bm_control) { 941 spin_lock(&c3_lock); 942 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0); 943 c3_cpu_count--; 944 spin_unlock(&c3_lock); 945 } 946 kt2 = ktime_get_real(); 947 idle_time_ns = ktime_to_ns(ktime_sub(kt2, kt1)); 948 idle_time = idle_time_ns; 949 do_div(idle_time, NSEC_PER_USEC); 950 951 /* Tell the scheduler how much we idled: */ 952 sched_clock_idle_wakeup_event(idle_time_ns); 953 954 local_irq_enable(); 955 if (cx->entry_method != ACPI_CSTATE_FFH) 956 current_thread_info()->status |= TS_POLLING; 957 958 cx->usage++; 959 960 lapic_timer_state_broadcast(pr, cx, 0); 961 cx->time += idle_time; 962 return idle_time; 963} 964 965struct cpuidle_driver acpi_idle_driver = { 966 .name = "acpi_idle", 967 .owner = THIS_MODULE, 968}; 969 970/** 971 * acpi_processor_setup_cpuidle - prepares and configures CPUIDLE 972 * @pr: the ACPI processor 973 */ 974static int acpi_processor_setup_cpuidle(struct acpi_processor *pr) 975{ 976 int i, count = CPUIDLE_DRIVER_STATE_START; 977 struct acpi_processor_cx *cx; 978 struct cpuidle_state *state; 979 struct cpuidle_device *dev = &pr->power.dev; 980 981 if (!pr->flags.power_setup_done) 982 return -EINVAL; 983 984 if (pr->flags.power == 0) { 985 return -EINVAL; 986 } 987 988 dev->cpu = pr->id; 989 for (i = 0; i < CPUIDLE_STATE_MAX; i++) { 990 dev->states[i].name[0] = '\0'; 991 dev->states[i].desc[0] = '\0'; 992 } 993 994 if (max_cstate == 0) 995 max_cstate = 1; 996 997 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) { 998 cx = &pr->power.states[i]; 999 state = &dev->states[count]; 1000 1001 if (!cx->valid) 1002 continue; 1003 1004#ifdef CONFIG_HOTPLUG_CPU 1005 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && 1006 !pr->flags.has_cst && 1007 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 1008 continue; 1009#endif 1010 cpuidle_set_statedata(state, cx); 1011 1012 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i); 1013 strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); 1014 state->exit_latency = cx->latency; 1015 state->target_residency = cx->latency * latency_factor; 1016 state->power_usage = cx->power; 1017 1018 state->flags = 0; 1019 switch (cx->type) { 1020 case ACPI_STATE_C1: 1021 state->flags |= CPUIDLE_FLAG_SHALLOW; 1022 if (cx->entry_method == ACPI_CSTATE_FFH) 1023 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1024 1025 state->enter = acpi_idle_enter_c1; 1026 dev->safe_state = state; 1027 break; 1028 1029 case ACPI_STATE_C2: 1030 state->flags |= CPUIDLE_FLAG_BALANCED; 1031 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1032 state->enter = acpi_idle_enter_simple; 1033 dev->safe_state = state; 1034 break; 1035 1036 case ACPI_STATE_C3: 1037 state->flags |= CPUIDLE_FLAG_DEEP; 1038 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1039 state->flags |= CPUIDLE_FLAG_CHECK_BM; 1040 state->enter = pr->flags.bm_check ? 1041 acpi_idle_enter_bm : 1042 acpi_idle_enter_simple; 1043 break; 1044 } 1045 1046 count++; 1047 if (count == CPUIDLE_STATE_MAX) 1048 break; 1049 } 1050 1051 dev->state_count = count; 1052 1053 if (!count) 1054 return -EINVAL; 1055 1056 return 0; 1057} 1058 1059int acpi_processor_cst_has_changed(struct acpi_processor *pr) 1060{ 1061 int ret = 0; 1062 1063 if (boot_option_idle_override) 1064 return 0; 1065 1066 if (!pr) 1067 return -EINVAL; 1068 1069 if (nocst) { 1070 return -ENODEV; 1071 } 1072 1073 if (!pr->flags.power_setup_done) 1074 return -ENODEV; 1075 1076 cpuidle_pause_and_lock(); 1077 cpuidle_disable_device(&pr->power.dev); 1078 acpi_processor_get_power_info(pr); 1079 if (pr->flags.power) { 1080 acpi_processor_setup_cpuidle(pr); 1081 ret = cpuidle_enable_device(&pr->power.dev); 1082 } 1083 cpuidle_resume_and_unlock(); 1084 1085 return ret; 1086} 1087 1088int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, 1089 struct acpi_device *device) 1090{ 1091 acpi_status status = 0; 1092 static int first_run; 1093 1094 if (boot_option_idle_override) 1095 return 0; 1096 1097 if (!first_run) { 1098 if (idle_halt) { 1099 /* 1100 * When the boot option of "idle=halt" is added, halt 1101 * is used for CPU IDLE. 1102 * In such case C2/C3 is meaningless. So the max_cstate 1103 * is set to one. 1104 */ 1105 max_cstate = 1; 1106 } 1107 dmi_check_system(processor_power_dmi_table); 1108 max_cstate = acpi_processor_cstate_check(max_cstate); 1109 if (max_cstate < ACPI_C_STATES_MAX) 1110 printk(KERN_NOTICE 1111 "ACPI: processor limited to max C-state %d\n", 1112 max_cstate); 1113 first_run++; 1114 } 1115 1116 if (!pr) 1117 return -EINVAL; 1118 1119 if (acpi_gbl_FADT.cst_control && !nocst) { 1120 status = 1121 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8); 1122 if (ACPI_FAILURE(status)) { 1123 ACPI_EXCEPTION((AE_INFO, status, 1124 "Notifying BIOS of _CST ability failed")); 1125 } 1126 } 1127 1128 acpi_processor_get_power_info(pr); 1129 pr->flags.power_setup_done = 1; 1130 1131 /* 1132 * Install the idle handler if processor power management is supported. 1133 * Note that we use previously set idle handler will be used on 1134 * platforms that only support C1. 1135 */ 1136 if (pr->flags.power) { 1137 acpi_processor_setup_cpuidle(pr); 1138 if (cpuidle_register_device(&pr->power.dev)) 1139 return -EIO; 1140 } 1141 return 0; 1142} 1143 1144int acpi_processor_power_exit(struct acpi_processor *pr, 1145 struct acpi_device *device) 1146{ 1147 if (boot_option_idle_override) 1148 return 0; 1149 1150 cpuidle_unregister_device(&pr->power.dev); 1151 pr->flags.power_setup_done = 0; 1152 1153 return 0; 1154} 1155