1/* 2 * processor_idle - idle state submodule to the ACPI processor driver 3 * 4 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 5 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 6 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de> 7 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> 8 * - Added processor hotplug support 9 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 10 * - Added support for C3 on SMP 11 * 12 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 13 * 14 * This program is free software; you can redistribute it and/or modify 15 * it under the terms of the GNU General Public License as published by 16 * the Free Software Foundation; either version 2 of the License, or (at 17 * your option) any later version. 18 * 19 * This program is distributed in the hope that it will be useful, but 20 * WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 22 * General Public License for more details. 23 * 24 * You should have received a copy of the GNU General Public License along 25 * with this program; if not, write to the Free Software Foundation, Inc., 26 * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. 27 * 28 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 29 */ 30 31#include <linux/kernel.h> 32#include <linux/module.h> 33#include <linux/init.h> 34#include <linux/cpufreq.h> 35#include <linux/proc_fs.h> 36#include <linux/seq_file.h> 37#include <linux/acpi.h> 38#include <linux/dmi.h> 39#include <linux/moduleparam.h> 40#include <linux/sched.h> /* need_resched() */ 41#include <linux/latency.h> 42#include <linux/clockchips.h> 43 44/* 45 * Include the apic definitions for x86 to have the APIC timer related defines 46 * available also for UP (on SMP it gets magically included via linux/smp.h). 47 * asm/acpi.h is not an option, as it would require more include magic. Also 48 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera. 49 */ 50#ifdef CONFIG_X86 51#include <asm/apic.h> 52#endif 53 54#include <asm/io.h> 55#include <asm/uaccess.h> 56 57#include <acpi/acpi_bus.h> 58#include <acpi/processor.h> 59 60#define ACPI_PROCESSOR_COMPONENT 0x01000000 61#define ACPI_PROCESSOR_CLASS "processor" 62#define _COMPONENT ACPI_PROCESSOR_COMPONENT 63ACPI_MODULE_NAME("processor_idle"); 64#define ACPI_PROCESSOR_FILE_POWER "power" 65#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000) 66#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY) 67#define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */ 68#define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */ 69static void (*pm_idle_save) (void) __read_mostly; 70module_param(max_cstate, uint, 0644); 71 72static unsigned int nocst __read_mostly; 73module_param(nocst, uint, 0000); 74 75/* 76 * bm_history -- bit-mask with a bit per jiffy of bus-master activity 77 * 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms 78 * 800 HZ: 0xFFFFFFFF: 32 jiffies = 40ms 79 * 100 HZ: 0x0000000F: 4 jiffies = 40ms 80 * reduce history for more aggressive entry into C3 81 */ 82static unsigned int bm_history __read_mostly = 83 (HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1)); 84module_param(bm_history, uint, 0644); 85/* -------------------------------------------------------------------------- 86 Power Management 87 -------------------------------------------------------------------------- */ 88 89/* 90 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3. 91 * For now disable this. Probably a bug somewhere else. 92 * 93 * To skip this limit, boot/load with a large max_cstate limit. 94 */ 95static int set_max_cstate(struct dmi_system_id *id) 96{ 97 if (max_cstate > ACPI_PROCESSOR_MAX_POWER) 98 return 0; 99 100 printk(KERN_NOTICE PREFIX "%s detected - limiting to C%ld max_cstate." 101 " Override with \"processor.max_cstate=%d\"\n", id->ident, 102 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1); 103 104 max_cstate = (long)id->driver_data; 105 106 return 0; 107} 108 109/* Actually this shouldn't be __cpuinitdata, would be better to fix the 110 callers to only run once -AK */ 111static struct dmi_system_id __cpuinitdata processor_power_dmi_table[] = { 112 { set_max_cstate, "IBM ThinkPad R40e", { 113 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 114 DMI_MATCH(DMI_BIOS_VERSION,"1SET70WW")}, (void *)1}, 115 { set_max_cstate, "IBM ThinkPad R40e", { 116 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 117 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW")}, (void *)1}, 118 { set_max_cstate, "IBM ThinkPad R40e", { 119 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 120 DMI_MATCH(DMI_BIOS_VERSION,"1SET43WW") }, (void*)1}, 121 { set_max_cstate, "IBM ThinkPad R40e", { 122 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 123 DMI_MATCH(DMI_BIOS_VERSION,"1SET45WW") }, (void*)1}, 124 { set_max_cstate, "IBM ThinkPad R40e", { 125 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 126 DMI_MATCH(DMI_BIOS_VERSION,"1SET47WW") }, (void*)1}, 127 { set_max_cstate, "IBM ThinkPad R40e", { 128 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 129 DMI_MATCH(DMI_BIOS_VERSION,"1SET50WW") }, (void*)1}, 130 { set_max_cstate, "IBM ThinkPad R40e", { 131 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 132 DMI_MATCH(DMI_BIOS_VERSION,"1SET52WW") }, (void*)1}, 133 { set_max_cstate, "IBM ThinkPad R40e", { 134 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 135 DMI_MATCH(DMI_BIOS_VERSION,"1SET55WW") }, (void*)1}, 136 { set_max_cstate, "IBM ThinkPad R40e", { 137 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 138 DMI_MATCH(DMI_BIOS_VERSION,"1SET56WW") }, (void*)1}, 139 { set_max_cstate, "IBM ThinkPad R40e", { 140 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 141 DMI_MATCH(DMI_BIOS_VERSION,"1SET59WW") }, (void*)1}, 142 { set_max_cstate, "IBM ThinkPad R40e", { 143 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 144 DMI_MATCH(DMI_BIOS_VERSION,"1SET60WW") }, (void*)1}, 145 { set_max_cstate, "IBM ThinkPad R40e", { 146 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 147 DMI_MATCH(DMI_BIOS_VERSION,"1SET61WW") }, (void*)1}, 148 { set_max_cstate, "IBM ThinkPad R40e", { 149 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 150 DMI_MATCH(DMI_BIOS_VERSION,"1SET62WW") }, (void*)1}, 151 { set_max_cstate, "IBM ThinkPad R40e", { 152 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 153 DMI_MATCH(DMI_BIOS_VERSION,"1SET64WW") }, (void*)1}, 154 { set_max_cstate, "IBM ThinkPad R40e", { 155 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 156 DMI_MATCH(DMI_BIOS_VERSION,"1SET65WW") }, (void*)1}, 157 { set_max_cstate, "IBM ThinkPad R40e", { 158 DMI_MATCH(DMI_BIOS_VENDOR,"IBM"), 159 DMI_MATCH(DMI_BIOS_VERSION,"1SET68WW") }, (void*)1}, 160 { set_max_cstate, "Medion 41700", { 161 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 162 DMI_MATCH(DMI_BIOS_VERSION,"R01-A1J")}, (void *)1}, 163 { set_max_cstate, "Clevo 5600D", { 164 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"), 165 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")}, 166 (void *)2}, 167 {}, 168}; 169 170static inline u32 ticks_elapsed(u32 t1, u32 t2) 171{ 172 if (t2 >= t1) 173 return (t2 - t1); 174 else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER)) 175 return (((0x00FFFFFF - t1) + t2) & 0x00FFFFFF); 176 else 177 return ((0xFFFFFFFF - t1) + t2); 178} 179 180static void 181acpi_processor_power_activate(struct acpi_processor *pr, 182 struct acpi_processor_cx *new) 183{ 184 struct acpi_processor_cx *old; 185 186 if (!pr || !new) 187 return; 188 189 old = pr->power.state; 190 191 if (old) 192 old->promotion.count = 0; 193 new->demotion.count = 0; 194 195 /* Cleanup from old state. */ 196 if (old) { 197 switch (old->type) { 198 case ACPI_STATE_C3: 199 /* Disable bus master reload */ 200 if (new->type != ACPI_STATE_C3 && pr->flags.bm_check) 201 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); 202 break; 203 } 204 } 205 206 /* Prepare to use new state. */ 207 switch (new->type) { 208 case ACPI_STATE_C3: 209 /* Enable bus master reload */ 210 if (old->type != ACPI_STATE_C3 && pr->flags.bm_check) 211 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1); 212 break; 213 } 214 215 pr->power.state = new; 216 217 return; 218} 219 220static void acpi_safe_halt(void) 221{ 222 current_thread_info()->status &= ~TS_POLLING; 223 /* 224 * TS_POLLING-cleared state must be visible before we 225 * test NEED_RESCHED: 226 */ 227 smp_mb(); 228 if (!need_resched()) 229 safe_halt(); 230 current_thread_info()->status |= TS_POLLING; 231} 232 233static atomic_t c3_cpu_count; 234 235/* Common C-state entry for C2, C3, .. */ 236static void acpi_cstate_enter(struct acpi_processor_cx *cstate) 237{ 238 if (cstate->space_id == ACPI_CSTATE_FFH) { 239 /* Call into architectural FFH based C-state */ 240 acpi_processor_ffh_cstate_enter(cstate); 241 } else { 242 int unused; 243 /* IO port based C-state */ 244 inb(cstate->address); 245 /* Dummy wait op - must do something useless after P_LVL2 read 246 because chipsets cannot guarantee that STPCLK# signal 247 gets asserted in time to freeze execution properly. */ 248 unused = inl(acpi_gbl_FADT.xpm_timer_block.address); 249 } 250} 251 252#ifdef ARCH_APICTIMER_STOPS_ON_C3 253 254/* 255 * Some BIOS implementations switch to C3 in the published C2 state. 256 * This seems to be a common problem on AMD boxen, but other vendors 257 * are affected too. We pick the most conservative approach: we assume 258 * that the local APIC stops in both C2 and C3. 259 */ 260static void acpi_timer_check_state(int state, struct acpi_processor *pr, 261 struct acpi_processor_cx *cx) 262{ 263 struct acpi_processor_power *pwr = &pr->power; 264 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2; 265 266 /* 267 * Check, if one of the previous states already marked the lapic 268 * unstable 269 */ 270 if (pwr->timer_broadcast_on_state < state) 271 return; 272 273 if (cx->type >= type) 274 pr->power.timer_broadcast_on_state = state; 275} 276 277static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) 278{ 279#ifdef CONFIG_GENERIC_CLOCKEVENTS 280 unsigned long reason; 281 282 reason = pr->power.timer_broadcast_on_state < INT_MAX ? 283 CLOCK_EVT_NOTIFY_BROADCAST_ON : CLOCK_EVT_NOTIFY_BROADCAST_OFF; 284 285 clockevents_notify(reason, &pr->id); 286#else 287 cpumask_t mask = cpumask_of_cpu(pr->id); 288 289 if (pr->power.timer_broadcast_on_state < INT_MAX) 290 on_each_cpu(switch_APIC_timer_to_ipi, &mask, 1, 1); 291 else 292 on_each_cpu(switch_ipi_to_APIC_timer, &mask, 1, 1); 293#endif 294} 295 296/* Power(C) State timer broadcast control */ 297static void acpi_state_timer_broadcast(struct acpi_processor *pr, 298 struct acpi_processor_cx *cx, 299 int broadcast) 300{ 301#ifdef CONFIG_GENERIC_CLOCKEVENTS 302 303 int state = cx - pr->power.states; 304 305 if (state >= pr->power.timer_broadcast_on_state) { 306 unsigned long reason; 307 308 reason = broadcast ? CLOCK_EVT_NOTIFY_BROADCAST_ENTER : 309 CLOCK_EVT_NOTIFY_BROADCAST_EXIT; 310 clockevents_notify(reason, &pr->id); 311 } 312#endif 313} 314 315#else 316 317static void acpi_timer_check_state(int state, struct acpi_processor *pr, 318 struct acpi_processor_cx *cstate) { } 319static void acpi_propagate_timer_broadcast(struct acpi_processor *pr) { } 320static void acpi_state_timer_broadcast(struct acpi_processor *pr, 321 struct acpi_processor_cx *cx, 322 int broadcast) 323{ 324} 325 326#endif 327 328static void acpi_processor_idle(void) 329{ 330 struct acpi_processor *pr = NULL; 331 struct acpi_processor_cx *cx = NULL; 332 struct acpi_processor_cx *next_state = NULL; 333 int sleep_ticks = 0; 334 u32 t1, t2 = 0; 335 336 /* 337 * Interrupts must be disabled during bus mastering calculations and 338 * for C2/C3 transitions. 339 */ 340 local_irq_disable(); 341 342 pr = processors[smp_processor_id()]; 343 if (!pr) { 344 local_irq_enable(); 345 return; 346 } 347 348 /* 349 * Check whether we truly need to go idle, or should 350 * reschedule: 351 */ 352 if (unlikely(need_resched())) { 353 local_irq_enable(); 354 return; 355 } 356 357 cx = pr->power.state; 358 if (!cx) { 359 if (pm_idle_save) 360 pm_idle_save(); 361 else 362 acpi_safe_halt(); 363 return; 364 } 365 366 /* 367 * Check BM Activity 368 * ----------------- 369 * Check for bus mastering activity (if required), record, and check 370 * for demotion. 371 */ 372 if (pr->flags.bm_check) { 373 u32 bm_status = 0; 374 unsigned long diff = jiffies - pr->power.bm_check_timestamp; 375 376 if (diff > 31) 377 diff = 31; 378 379 pr->power.bm_activity <<= diff; 380 381 acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status); 382 if (bm_status) { 383 pr->power.bm_activity |= 0x1; 384 acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1); 385 } 386 /* 387 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect 388 * the true state of bus mastering activity; forcing us to 389 * manually check the BMIDEA bit of each IDE channel. 390 */ 391 else if (errata.piix4.bmisx) { 392 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01) 393 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01)) 394 pr->power.bm_activity |= 0x1; 395 } 396 397 pr->power.bm_check_timestamp = jiffies; 398 399 /* 400 * If bus mastering is or was active this jiffy, demote 401 * to avoid a faulty transition. Note that the processor 402 * won't enter a low-power state during this call (to this 403 * function) but should upon the next. 404 * 405 * TBD: A better policy might be to fallback to the demotion 406 * state (use it for this quantum only) istead of 407 * demoting -- and rely on duration as our sole demotion 408 * qualification. This may, however, introduce DMA 409 * issues (e.g. floppy DMA transfer overrun/underrun). 410 */ 411 if ((pr->power.bm_activity & 0x1) && 412 cx->demotion.threshold.bm) { 413 local_irq_enable(); 414 next_state = cx->demotion.state; 415 goto end; 416 } 417 } 418 419#ifdef CONFIG_HOTPLUG_CPU 420 /* 421 * Check for P_LVL2_UP flag before entering C2 and above on 422 * an SMP system. We do it here instead of doing it at _CST/P_LVL 423 * detection phase, to work cleanly with logical CPU hotplug. 424 */ 425 if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) && 426 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 427 cx = &pr->power.states[ACPI_STATE_C1]; 428#endif 429 430 /* 431 * Sleep: 432 * ------ 433 * Invoke the current Cx state to put the processor to sleep. 434 */ 435 if (cx->type == ACPI_STATE_C2 || cx->type == ACPI_STATE_C3) { 436 current_thread_info()->status &= ~TS_POLLING; 437 /* 438 * TS_POLLING-cleared state must be visible before we 439 * test NEED_RESCHED: 440 */ 441 smp_mb(); 442 if (need_resched()) { 443 current_thread_info()->status |= TS_POLLING; 444 local_irq_enable(); 445 return; 446 } 447 } 448 449 switch (cx->type) { 450 451 case ACPI_STATE_C1: 452 /* 453 * Invoke C1. 454 * Use the appropriate idle routine, the one that would 455 * be used without acpi C-states. 456 */ 457 if (pm_idle_save) 458 pm_idle_save(); 459 else 460 acpi_safe_halt(); 461 462 /* 463 * TBD: Can't get time duration while in C1, as resumes 464 * go to an ISR rather than here. Need to instrument 465 * base interrupt handler. 466 * 467 * Note: the TSC better not stop in C1, sched_clock() will 468 * skew otherwise. 469 */ 470 sleep_ticks = 0xFFFFFFFF; 471 break; 472 473 case ACPI_STATE_C2: 474 /* Get start time (ticks) */ 475 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 476 /* Tell the scheduler that we are going deep-idle: */ 477 sched_clock_idle_sleep_event(); 478 /* Invoke C2 */ 479 acpi_state_timer_broadcast(pr, cx, 1); 480 acpi_cstate_enter(cx); 481 /* Get end time (ticks) */ 482 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 483 484#ifdef CONFIG_GENERIC_TIME 485 /* TSC halts in C2, so notify users */ 486 mark_tsc_unstable("possible TSC halt in C2"); 487#endif 488 /* Compute time (ticks) that we were actually asleep */ 489 sleep_ticks = ticks_elapsed(t1, t2); 490 491 /* Tell the scheduler how much we idled: */ 492 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 493 494 /* Re-enable interrupts */ 495 local_irq_enable(); 496 /* Do not account our idle-switching overhead: */ 497 sleep_ticks -= cx->latency_ticks + C2_OVERHEAD; 498 499 current_thread_info()->status |= TS_POLLING; 500 acpi_state_timer_broadcast(pr, cx, 0); 501 break; 502 503 case ACPI_STATE_C3: 504 if (pr->flags.bm_check) { 505 if (atomic_inc_return(&c3_cpu_count) == 506 num_online_cpus()) { 507 /* 508 * All CPUs are trying to go to C3 509 * Disable bus master arbitration 510 */ 511 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); 512 } 513 } else { 514 /* SMP with no shared cache... Invalidate cache */ 515 ACPI_FLUSH_CPU_CACHE(); 516 } 517 518 /* Get start time (ticks) */ 519 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address); 520 /* Invoke C3 */ 521 acpi_state_timer_broadcast(pr, cx, 1); 522 /* Tell the scheduler that we are going deep-idle: */ 523 sched_clock_idle_sleep_event(); 524 acpi_cstate_enter(cx); 525 /* Get end time (ticks) */ 526 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address); 527 if (pr->flags.bm_check) { 528 /* Enable bus master arbitration */ 529 atomic_dec(&c3_cpu_count); 530 acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0); 531 } 532 533#ifdef CONFIG_GENERIC_TIME 534 /* TSC halts in C3, so notify users */ 535 mark_tsc_unstable("TSC halts in C3"); 536#endif 537 /* Compute time (ticks) that we were actually asleep */ 538 sleep_ticks = ticks_elapsed(t1, t2); 539 /* Tell the scheduler how much we idled: */ 540 sched_clock_idle_wakeup_event(sleep_ticks*PM_TIMER_TICK_NS); 541 542 /* Re-enable interrupts */ 543 local_irq_enable(); 544 /* Do not account our idle-switching overhead: */ 545 sleep_ticks -= cx->latency_ticks + C3_OVERHEAD; 546 547 current_thread_info()->status |= TS_POLLING; 548 acpi_state_timer_broadcast(pr, cx, 0); 549 break; 550 551 default: 552 local_irq_enable(); 553 return; 554 } 555 cx->usage++; 556 if ((cx->type != ACPI_STATE_C1) && (sleep_ticks > 0)) 557 cx->time += sleep_ticks; 558 559 next_state = pr->power.state; 560 561#ifdef CONFIG_HOTPLUG_CPU 562 /* Don't do promotion/demotion */ 563 if ((cx->type == ACPI_STATE_C1) && (num_online_cpus() > 1) && 564 !pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) { 565 next_state = cx; 566 goto end; 567 } 568#endif 569 570 /* 571 * Promotion? 572 * ---------- 573 * Track the number of longs (time asleep is greater than threshold) 574 * and promote when the count threshold is reached. Note that bus 575 * mastering activity may prevent promotions. 576 * Do not promote above max_cstate. 577 */ 578 if (cx->promotion.state && 579 ((cx->promotion.state - pr->power.states) <= max_cstate)) { 580 if (sleep_ticks > cx->promotion.threshold.ticks && 581 cx->promotion.state->latency <= system_latency_constraint()) { 582 cx->promotion.count++; 583 cx->demotion.count = 0; 584 if (cx->promotion.count >= 585 cx->promotion.threshold.count) { 586 if (pr->flags.bm_check) { 587 if (! 588 (pr->power.bm_activity & cx-> 589 promotion.threshold.bm)) { 590 next_state = 591 cx->promotion.state; 592 goto end; 593 } 594 } else { 595 next_state = cx->promotion.state; 596 goto end; 597 } 598 } 599 } 600 } 601 602 /* 603 * Demotion? 604 * --------- 605 * Track the number of shorts (time asleep is less than time threshold) 606 * and demote when the usage threshold is reached. 607 */ 608 if (cx->demotion.state) { 609 if (sleep_ticks < cx->demotion.threshold.ticks) { 610 cx->demotion.count++; 611 cx->promotion.count = 0; 612 if (cx->demotion.count >= cx->demotion.threshold.count) { 613 next_state = cx->demotion.state; 614 goto end; 615 } 616 } 617 } 618 619 end: 620 /* 621 * Demote if current state exceeds max_cstate 622 * or if the latency of the current state is unacceptable 623 */ 624 if ((pr->power.state - pr->power.states) > max_cstate || 625 pr->power.state->latency > system_latency_constraint()) { 626 if (cx->demotion.state) 627 next_state = cx->demotion.state; 628 } 629 630 /* 631 * New Cx State? 632 * ------------- 633 * If we're going to start using a new Cx state we must clean up 634 * from the previous and prepare to use the new. 635 */ 636 if (next_state != pr->power.state) 637 acpi_processor_power_activate(pr, next_state); 638} 639 640static int acpi_processor_set_power_policy(struct acpi_processor *pr) 641{ 642 unsigned int i; 643 unsigned int state_is_set = 0; 644 struct acpi_processor_cx *lower = NULL; 645 struct acpi_processor_cx *higher = NULL; 646 struct acpi_processor_cx *cx; 647 648 649 if (!pr) 650 return -EINVAL; 651 652 /* 653 * This function sets the default Cx state policy (OS idle handler). 654 * Our scheme is to promote quickly to C2 but more conservatively 655 * to C3. We're favoring C2 for its characteristics of low latency 656 * (quick response), good power savings, and ability to allow bus 657 * mastering activity. Note that the Cx state policy is completely 658 * customizable and can be altered dynamically. 659 */ 660 661 /* startup state */ 662 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 663 cx = &pr->power.states[i]; 664 if (!cx->valid) 665 continue; 666 667 if (!state_is_set) 668 pr->power.state = cx; 669 state_is_set++; 670 break; 671 } 672 673 if (!state_is_set) 674 return -ENODEV; 675 676 /* demotion */ 677 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 678 cx = &pr->power.states[i]; 679 if (!cx->valid) 680 continue; 681 682 if (lower) { 683 cx->demotion.state = lower; 684 cx->demotion.threshold.ticks = cx->latency_ticks; 685 cx->demotion.threshold.count = 1; 686 if (cx->type == ACPI_STATE_C3) 687 cx->demotion.threshold.bm = bm_history; 688 } 689 690 lower = cx; 691 } 692 693 /* promotion */ 694 for (i = (ACPI_PROCESSOR_MAX_POWER - 1); i > 0; i--) { 695 cx = &pr->power.states[i]; 696 if (!cx->valid) 697 continue; 698 699 if (higher) { 700 cx->promotion.state = higher; 701 cx->promotion.threshold.ticks = cx->latency_ticks; 702 if (cx->type >= ACPI_STATE_C2) 703 cx->promotion.threshold.count = 4; 704 else 705 cx->promotion.threshold.count = 10; 706 if (higher->type == ACPI_STATE_C3) 707 cx->promotion.threshold.bm = bm_history; 708 } 709 710 higher = cx; 711 } 712 713 return 0; 714} 715 716static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr) 717{ 718 719 if (!pr) 720 return -EINVAL; 721 722 if (!pr->pblk) 723 return -ENODEV; 724 725 /* if info is obtained from pblk/fadt, type equals state */ 726 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2; 727 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3; 728 729#ifndef CONFIG_HOTPLUG_CPU 730 /* 731 * Check for P_LVL2_UP flag before entering C2 and above on 732 * an SMP system. 733 */ 734 if ((num_online_cpus() > 1) && 735 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED)) 736 return -ENODEV; 737#endif 738 739 /* determine C2 and C3 address from pblk */ 740 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4; 741 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5; 742 743 /* determine latencies from FADT */ 744 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.C2latency; 745 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.C3latency; 746 747 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 748 "lvl2[0x%08x] lvl3[0x%08x]\n", 749 pr->power.states[ACPI_STATE_C2].address, 750 pr->power.states[ACPI_STATE_C3].address)); 751 752 return 0; 753} 754 755static int acpi_processor_get_power_info_default(struct acpi_processor *pr) 756{ 757 if (!pr->power.states[ACPI_STATE_C1].valid) { 758 /* set the first C-State to C1 */ 759 /* all processors need to support C1 */ 760 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1; 761 pr->power.states[ACPI_STATE_C1].valid = 1; 762 } 763 /* the C0 state only exists as a filler in our array */ 764 pr->power.states[ACPI_STATE_C0].valid = 1; 765 return 0; 766} 767 768static int acpi_processor_get_power_info_cst(struct acpi_processor *pr) 769{ 770 acpi_status status = 0; 771 acpi_integer count; 772 int current_count; 773 int i; 774 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 775 union acpi_object *cst; 776 777 778 if (nocst) 779 return -ENODEV; 780 781 current_count = 0; 782 783 status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer); 784 if (ACPI_FAILURE(status)) { 785 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n")); 786 return -ENODEV; 787 } 788 789 cst = buffer.pointer; 790 791 /* There must be at least 2 elements */ 792 if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) { 793 printk(KERN_ERR PREFIX "not enough elements in _CST\n"); 794 status = -EFAULT; 795 goto end; 796 } 797 798 count = cst->package.elements[0].integer.value; 799 800 /* Validate number of power states. */ 801 if (count < 1 || count != cst->package.count - 1) { 802 printk(KERN_ERR PREFIX "count given by _CST is not valid\n"); 803 status = -EFAULT; 804 goto end; 805 } 806 807 /* Tell driver that at least _CST is supported. */ 808 pr->flags.has_cst = 1; 809 810 for (i = 1; i <= count; i++) { 811 union acpi_object *element; 812 union acpi_object *obj; 813 struct acpi_power_register *reg; 814 struct acpi_processor_cx cx; 815 816 memset(&cx, 0, sizeof(cx)); 817 818 element = &(cst->package.elements[i]); 819 if (element->type != ACPI_TYPE_PACKAGE) 820 continue; 821 822 if (element->package.count != 4) 823 continue; 824 825 obj = &(element->package.elements[0]); 826 827 if (obj->type != ACPI_TYPE_BUFFER) 828 continue; 829 830 reg = (struct acpi_power_register *)obj->buffer.pointer; 831 832 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO && 833 (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) 834 continue; 835 836 /* There should be an easy way to extract an integer... */ 837 obj = &(element->package.elements[1]); 838 if (obj->type != ACPI_TYPE_INTEGER) 839 continue; 840 841 cx.type = obj->integer.value; 842 /* 843 * Some buggy BIOSes won't list C1 in _CST - 844 * Let acpi_processor_get_power_info_default() handle them later 845 */ 846 if (i == 1 && cx.type != ACPI_STATE_C1) 847 current_count++; 848 849 cx.address = reg->address; 850 cx.index = current_count + 1; 851 852 cx.space_id = ACPI_CSTATE_SYSTEMIO; 853 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) { 854 if (acpi_processor_ffh_cstate_probe 855 (pr->id, &cx, reg) == 0) { 856 cx.space_id = ACPI_CSTATE_FFH; 857 } else if (cx.type != ACPI_STATE_C1) { 858 /* 859 * C1 is a special case where FIXED_HARDWARE 860 * can be handled in non-MWAIT way as well. 861 * In that case, save this _CST entry info. 862 * That is, we retain space_id of SYSTEM_IO for 863 * halt based C1. 864 * Otherwise, ignore this info and continue. 865 */ 866 continue; 867 } 868 } 869 870 obj = &(element->package.elements[2]); 871 if (obj->type != ACPI_TYPE_INTEGER) 872 continue; 873 874 cx.latency = obj->integer.value; 875 876 obj = &(element->package.elements[3]); 877 if (obj->type != ACPI_TYPE_INTEGER) 878 continue; 879 880 cx.power = obj->integer.value; 881 882 current_count++; 883 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx)); 884 885 /* 886 * We support total ACPI_PROCESSOR_MAX_POWER - 1 887 * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1) 888 */ 889 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) { 890 printk(KERN_WARNING 891 "Limiting number of power states to max (%d)\n", 892 ACPI_PROCESSOR_MAX_POWER); 893 printk(KERN_WARNING 894 "Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n"); 895 break; 896 } 897 } 898 899 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n", 900 current_count)); 901 902 /* Validate number of power states discovered */ 903 if (current_count < 2) 904 status = -EFAULT; 905 906 end: 907 kfree(buffer.pointer); 908 909 return status; 910} 911 912static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx) 913{ 914 915 if (!cx->address) 916 return; 917 918 /* 919 * C2 latency must be less than or equal to 100 920 * microseconds. 921 */ 922 else if (cx->latency > ACPI_PROCESSOR_MAX_C2_LATENCY) { 923 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 924 "latency too large [%d]\n", cx->latency)); 925 return; 926 } 927 928 /* 929 * Otherwise we've met all of our C2 requirements. 930 * Normalize the C2 latency to expidite policy 931 */ 932 cx->valid = 1; 933 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); 934 935 return; 936} 937 938static void acpi_processor_power_verify_c3(struct acpi_processor *pr, 939 struct acpi_processor_cx *cx) 940{ 941 static int bm_check_flag; 942 943 944 if (!cx->address) 945 return; 946 947 /* 948 * C3 latency must be less than or equal to 1000 949 * microseconds. 950 */ 951 else if (cx->latency > ACPI_PROCESSOR_MAX_C3_LATENCY) { 952 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 953 "latency too large [%d]\n", cx->latency)); 954 return; 955 } 956 957 /* 958 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast) 959 * DMA transfers are used by any ISA device to avoid livelock. 960 * Note that we could disable Type-F DMA (as recommended by 961 * the erratum), but this is known to disrupt certain ISA 962 * devices thus we take the conservative approach. 963 */ 964 else if (errata.piix4.fdma) { 965 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 966 "C3 not supported on PIIX4 with Type-F DMA\n")); 967 return; 968 } 969 970 /* All the logic here assumes flags.bm_check is same across all CPUs */ 971 if (!bm_check_flag) { 972 /* Determine whether bm_check is needed based on CPU */ 973 acpi_processor_power_init_bm_check(&(pr->flags), pr->id); 974 bm_check_flag = pr->flags.bm_check; 975 } else { 976 pr->flags.bm_check = bm_check_flag; 977 } 978 979 if (pr->flags.bm_check) { 980 /* bus mastering control is necessary */ 981 if (!pr->flags.bm_control) { 982 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 983 "C3 support requires bus mastering control\n")); 984 return; 985 } 986 } else { 987 /* 988 * WBINVD should be set in fadt, for C3 state to be 989 * supported on when bm_check is not required. 990 */ 991 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) { 992 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 993 "Cache invalidation should work properly" 994 " for C3 to be enabled on SMP systems\n")); 995 return; 996 } 997 acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); 998 } 999 1000 /* 1001 * Otherwise we've met all of our C3 requirements. 1002 * Normalize the C3 latency to expidite policy. Enable 1003 * checking of bus mastering status (bm_check) so we can 1004 * use this in our C3 policy 1005 */ 1006 cx->valid = 1; 1007 cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency); 1008 1009 return; 1010} 1011 1012static int acpi_processor_power_verify(struct acpi_processor *pr) 1013{ 1014 unsigned int i; 1015 unsigned int working = 0; 1016 1017 pr->power.timer_broadcast_on_state = INT_MAX; 1018 1019 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 1020 struct acpi_processor_cx *cx = &pr->power.states[i]; 1021 1022 switch (cx->type) { 1023 case ACPI_STATE_C1: 1024 cx->valid = 1; 1025 break; 1026 1027 case ACPI_STATE_C2: 1028 acpi_processor_power_verify_c2(cx); 1029 if (cx->valid) 1030 acpi_timer_check_state(i, pr, cx); 1031 break; 1032 1033 case ACPI_STATE_C3: 1034 acpi_processor_power_verify_c3(pr, cx); 1035 if (cx->valid) 1036 acpi_timer_check_state(i, pr, cx); 1037 break; 1038 } 1039 1040 if (cx->valid) 1041 working++; 1042 } 1043 1044 acpi_propagate_timer_broadcast(pr); 1045 1046 return (working); 1047} 1048 1049static int acpi_processor_get_power_info(struct acpi_processor *pr) 1050{ 1051 unsigned int i; 1052 int result; 1053 1054 1055 /* NOTE: the idle thread may not be running while calling 1056 * this function */ 1057 1058 /* Zero initialize all the C-states info. */ 1059 memset(pr->power.states, 0, sizeof(pr->power.states)); 1060 1061 result = acpi_processor_get_power_info_cst(pr); 1062 if (result == -ENODEV) 1063 result = acpi_processor_get_power_info_fadt(pr); 1064 1065 if (result) 1066 return result; 1067 1068 acpi_processor_get_power_info_default(pr); 1069 1070 pr->power.count = acpi_processor_power_verify(pr); 1071 1072 /* 1073 * Set Default Policy 1074 * ------------------ 1075 * Now that we know which states are supported, set the default 1076 * policy. Note that this policy can be changed dynamically 1077 * (e.g. encourage deeper sleeps to conserve battery life when 1078 * not on AC). 1079 */ 1080 result = acpi_processor_set_power_policy(pr); 1081 if (result) 1082 return result; 1083 1084 /* 1085 * if one state of type C2 or C3 is available, mark this 1086 * CPU as being "idle manageable" 1087 */ 1088 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) { 1089 if (pr->power.states[i].valid) { 1090 pr->power.count = i; 1091 if (pr->power.states[i].type >= ACPI_STATE_C2) 1092 pr->flags.power = 1; 1093 } 1094 } 1095 1096 return 0; 1097} 1098 1099int acpi_processor_cst_has_changed(struct acpi_processor *pr) 1100{ 1101 int result = 0; 1102 1103 1104 if (!pr) 1105 return -EINVAL; 1106 1107 if (nocst) { 1108 return -ENODEV; 1109 } 1110 1111 if (!pr->flags.power_setup_done) 1112 return -ENODEV; 1113 1114 /* Fall back to the default idle loop */ 1115 pm_idle = pm_idle_save; 1116 synchronize_sched(); /* Relies on interrupts forcing exit from idle. */ 1117 1118 pr->flags.power = 0; 1119 result = acpi_processor_get_power_info(pr); 1120 if ((pr->flags.power == 1) && (pr->flags.power_setup_done)) 1121 pm_idle = acpi_processor_idle; 1122 1123 return result; 1124} 1125 1126/* proc interface */ 1127 1128static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset) 1129{ 1130 struct acpi_processor *pr = seq->private; 1131 unsigned int i; 1132 1133 1134 if (!pr) 1135 goto end; 1136 1137 seq_printf(seq, "active state: C%zd\n" 1138 "max_cstate: C%d\n" 1139 "bus master activity: %08x\n" 1140 "maximum allowed latency: %d usec\n", 1141 pr->power.state ? pr->power.state - pr->power.states : 0, 1142 max_cstate, (unsigned)pr->power.bm_activity, 1143 system_latency_constraint()); 1144 1145 seq_puts(seq, "states:\n"); 1146 1147 for (i = 1; i <= pr->power.count; i++) { 1148 seq_printf(seq, " %cC%d: ", 1149 (&pr->power.states[i] == 1150 pr->power.state ? '*' : ' '), i); 1151 1152 if (!pr->power.states[i].valid) { 1153 seq_puts(seq, "<not supported>\n"); 1154 continue; 1155 } 1156 1157 switch (pr->power.states[i].type) { 1158 case ACPI_STATE_C1: 1159 seq_printf(seq, "type[C1] "); 1160 break; 1161 case ACPI_STATE_C2: 1162 seq_printf(seq, "type[C2] "); 1163 break; 1164 case ACPI_STATE_C3: 1165 seq_printf(seq, "type[C3] "); 1166 break; 1167 default: 1168 seq_printf(seq, "type[--] "); 1169 break; 1170 } 1171 1172 if (pr->power.states[i].promotion.state) 1173 seq_printf(seq, "promotion[C%zd] ", 1174 (pr->power.states[i].promotion.state - 1175 pr->power.states)); 1176 else 1177 seq_puts(seq, "promotion[--] "); 1178 1179 if (pr->power.states[i].demotion.state) 1180 seq_printf(seq, "demotion[C%zd] ", 1181 (pr->power.states[i].demotion.state - 1182 pr->power.states)); 1183 else 1184 seq_puts(seq, "demotion[--] "); 1185 1186 seq_printf(seq, "latency[%03d] usage[%08d] duration[%020llu]\n", 1187 pr->power.states[i].latency, 1188 pr->power.states[i].usage, 1189 (unsigned long long)pr->power.states[i].time); 1190 } 1191 1192 end: 1193 return 0; 1194} 1195 1196static int acpi_processor_power_open_fs(struct inode *inode, struct file *file) 1197{ 1198 return single_open(file, acpi_processor_power_seq_show, 1199 PDE(inode)->data); 1200} 1201 1202static const struct file_operations acpi_processor_power_fops = { 1203 .open = acpi_processor_power_open_fs, 1204 .read = seq_read, 1205 .llseek = seq_lseek, 1206 .release = single_release, 1207}; 1208 1209#ifdef CONFIG_SMP 1210static void smp_callback(void *v) 1211{ 1212 /* we already woke the CPU up, nothing more to do */ 1213} 1214 1215/* 1216 * This function gets called when a part of the kernel has a new latency 1217 * requirement. This means we need to get all processors out of their C-state, 1218 * and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that 1219 * wakes them all right up. 1220 */ 1221static int acpi_processor_latency_notify(struct notifier_block *b, 1222 unsigned long l, void *v) 1223{ 1224 smp_call_function(smp_callback, NULL, 0, 1); 1225 return NOTIFY_OK; 1226} 1227 1228static struct notifier_block acpi_processor_latency_notifier = { 1229 .notifier_call = acpi_processor_latency_notify, 1230}; 1231#endif 1232 1233int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, 1234 struct acpi_device *device) 1235{ 1236 acpi_status status = 0; 1237 static int first_run; 1238 struct proc_dir_entry *entry = NULL; 1239 unsigned int i; 1240 1241 1242 if (!first_run) { 1243 dmi_check_system(processor_power_dmi_table); 1244 if (max_cstate < ACPI_C_STATES_MAX) 1245 printk(KERN_NOTICE 1246 "ACPI: processor limited to max C-state %d\n", 1247 max_cstate); 1248 first_run++; 1249#ifdef CONFIG_SMP 1250 register_latency_notifier(&acpi_processor_latency_notifier); 1251#endif 1252 } 1253 1254 if (!pr) 1255 return -EINVAL; 1256 1257 if (acpi_gbl_FADT.cst_control && !nocst) { 1258 status = 1259 acpi_os_write_port(acpi_gbl_FADT.smi_command, acpi_gbl_FADT.cst_control, 8); 1260 if (ACPI_FAILURE(status)) { 1261 ACPI_EXCEPTION((AE_INFO, status, 1262 "Notifying BIOS of _CST ability failed")); 1263 } 1264 } 1265 1266 acpi_processor_get_power_info(pr); 1267 1268 /* 1269 * Install the idle handler if processor power management is supported. 1270 * Note that we use previously set idle handler will be used on 1271 * platforms that only support C1. 1272 */ 1273 if ((pr->flags.power) && (!boot_option_idle_override)) { 1274 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id); 1275 for (i = 1; i <= pr->power.count; i++) 1276 if (pr->power.states[i].valid) 1277 printk(" C%d[C%d]", i, 1278 pr->power.states[i].type); 1279 printk(")\n"); 1280 1281 if (pr->id == 0) { 1282 pm_idle_save = pm_idle; 1283 pm_idle = acpi_processor_idle; 1284 } 1285 } 1286 1287 /* 'power' [R] */ 1288 entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER, 1289 S_IRUGO, acpi_device_dir(device)); 1290 if (!entry) 1291 return -EIO; 1292 else { 1293 entry->proc_fops = &acpi_processor_power_fops; 1294 entry->data = acpi_driver_data(device); 1295 entry->owner = THIS_MODULE; 1296 } 1297 1298 pr->flags.power_setup_done = 1; 1299 1300 return 0; 1301} 1302 1303int acpi_processor_power_exit(struct acpi_processor *pr, 1304 struct acpi_device *device) 1305{ 1306 1307 pr->flags.power_setup_done = 0; 1308 1309 if (acpi_device_dir(device)) 1310 remove_proc_entry(ACPI_PROCESSOR_FILE_POWER, 1311 acpi_device_dir(device)); 1312 1313 /* Unregister the idle handler when processor #0 is removed. */ 1314 if (pr->id == 0) { 1315 pm_idle = pm_idle_save; 1316 1317 /* 1318 * We are about to unload the current idle thread pm callback 1319 * (pm_idle), Wait for all processors to update cached/local 1320 * copies of pm_idle before proceeding. 1321 */ 1322 cpu_idle_wait(); 1323#ifdef CONFIG_SMP 1324 unregister_latency_notifier(&acpi_processor_latency_notifier); 1325#endif 1326 } 1327 1328 return 0; 1329} 1330