1/* 2 * Thermal throttle event support code (such as syslog messaging and rate 3 * limiting) that was factored out from x86_64 (mce_intel.c) and i386 (p4.c). 4 * 5 * This allows consistent reporting of CPU thermal throttle events. 6 * 7 * Maintains a counter in /sys that keeps track of the number of thermal 8 * events, such that the user knows how bad the thermal problem might be 9 * (since the logging to syslog and mcelog is rate limited). 10 * 11 * Author: Dmitriy Zavin (dmitriyz@google.com) 12 * 13 * Credits: Adapted from Zwane Mwaikambo's original code in mce_intel.c. 14 * Inspired by Ross Biro's and Al Borchers' counter code. 15 */ 16#include <linux/interrupt.h> 17#include <linux/notifier.h> 18#include <linux/jiffies.h> 19#include <linux/kernel.h> 20#include <linux/percpu.h> 21#include <linux/sysdev.h> 22#include <linux/types.h> 23#include <linux/init.h> 24#include <linux/smp.h> 25#include <linux/cpu.h> 26 27#include <asm/processor.h> 28#include <asm/system.h> 29#include <asm/apic.h> 30#include <asm/idle.h> 31#include <asm/mce.h> 32#include <asm/msr.h> 33 34/* How long to wait between reporting thermal events */ 35#define CHECK_INTERVAL (300 * HZ) 36 37#define THERMAL_THROTTLING_EVENT 0 38#define POWER_LIMIT_EVENT 1 39 40/* 41 * Current thermal event state: 42 */ 43struct _thermal_state { 44 bool new_event; 45 int event; 46 u64 next_check; 47 unsigned long count; 48 unsigned long last_count; 49}; 50 51struct thermal_state { 52 struct _thermal_state core_throttle; 53 struct _thermal_state core_power_limit; 54 struct _thermal_state package_throttle; 55 struct _thermal_state package_power_limit; 56}; 57 58static DEFINE_PER_CPU(struct thermal_state, thermal_state); 59 60static atomic_t therm_throt_en = ATOMIC_INIT(0); 61 62static u32 lvtthmr_init __read_mostly; 63 64#ifdef CONFIG_SYSFS 65#define define_therm_throt_sysdev_one_ro(_name) \ 66 static SYSDEV_ATTR(_name, 0444, \ 67 therm_throt_sysdev_show_##_name, \ 68 NULL) \ 69 70#define define_therm_throt_sysdev_show_func(event, name) \ 71 \ 72static ssize_t therm_throt_sysdev_show_##event##_##name( \ 73 struct sys_device *dev, \ 74 struct sysdev_attribute *attr, \ 75 char *buf) \ 76{ \ 77 unsigned int cpu = dev->id; \ 78 ssize_t ret; \ 79 \ 80 preempt_disable(); /* CPU hotplug */ \ 81 if (cpu_online(cpu)) { \ 82 ret = sprintf(buf, "%lu\n", \ 83 per_cpu(thermal_state, cpu).event.name); \ 84 } else \ 85 ret = 0; \ 86 preempt_enable(); \ 87 \ 88 return ret; \ 89} 90 91define_therm_throt_sysdev_show_func(core_throttle, count); 92define_therm_throt_sysdev_one_ro(core_throttle_count); 93 94define_therm_throt_sysdev_show_func(core_power_limit, count); 95define_therm_throt_sysdev_one_ro(core_power_limit_count); 96 97define_therm_throt_sysdev_show_func(package_throttle, count); 98define_therm_throt_sysdev_one_ro(package_throttle_count); 99 100define_therm_throt_sysdev_show_func(package_power_limit, count); 101define_therm_throt_sysdev_one_ro(package_power_limit_count); 102 103static struct attribute *thermal_throttle_attrs[] = { 104 &attr_core_throttle_count.attr, 105 NULL 106}; 107 108static struct attribute_group thermal_attr_group = { 109 .attrs = thermal_throttle_attrs, 110 .name = "thermal_throttle" 111}; 112#endif /* CONFIG_SYSFS */ 113 114#define CORE_LEVEL 0 115#define PACKAGE_LEVEL 1 116 117/*** 118 * therm_throt_process - Process thermal throttling event from interrupt 119 * @curr: Whether the condition is current or not (boolean), since the 120 * thermal interrupt normally gets called both when the thermal 121 * event begins and once the event has ended. 122 * 123 * This function is called by the thermal interrupt after the 124 * IRQ has been acknowledged. 125 * 126 * It will take care of rate limiting and printing messages to the syslog. 127 * 128 * Returns: 0 : Event should NOT be further logged, i.e. still in 129 * "timeout" from previous log message. 130 * 1 : Event should be logged further, and a message has been 131 * printed to the syslog. 132 */ 133static int therm_throt_process(bool new_event, int event, int level) 134{ 135 struct _thermal_state *state; 136 unsigned int this_cpu = smp_processor_id(); 137 bool old_event; 138 u64 now; 139 struct thermal_state *pstate = &per_cpu(thermal_state, this_cpu); 140 141 now = get_jiffies_64(); 142 if (level == CORE_LEVEL) { 143 if (event == THERMAL_THROTTLING_EVENT) 144 state = &pstate->core_throttle; 145 else if (event == POWER_LIMIT_EVENT) 146 state = &pstate->core_power_limit; 147 else 148 return 0; 149 } else if (level == PACKAGE_LEVEL) { 150 if (event == THERMAL_THROTTLING_EVENT) 151 state = &pstate->package_throttle; 152 else if (event == POWER_LIMIT_EVENT) 153 state = &pstate->package_power_limit; 154 else 155 return 0; 156 } else 157 return 0; 158 159 old_event = state->new_event; 160 state->new_event = new_event; 161 162 if (new_event) 163 state->count++; 164 165 if (time_before64(now, state->next_check) && 166 state->count != state->last_count) 167 return 0; 168 169 state->next_check = now + CHECK_INTERVAL; 170 state->last_count = state->count; 171 172 /* if we just entered the thermal event */ 173 if (new_event) { 174 if (event == THERMAL_THROTTLING_EVENT) 175 printk(KERN_CRIT "CPU%d: %s temperature above threshold, cpu clock throttled (total events = %lu)\n", 176 this_cpu, 177 level == CORE_LEVEL ? "Core" : "Package", 178 state->count); 179 else 180 printk(KERN_CRIT "CPU%d: %s power limit notification (total events = %lu)\n", 181 this_cpu, 182 level == CORE_LEVEL ? "Core" : "Package", 183 state->count); 184 185 add_taint(TAINT_MACHINE_CHECK); 186 return 1; 187 } 188 if (old_event) { 189 if (event == THERMAL_THROTTLING_EVENT) 190 printk(KERN_INFO "CPU%d: %s temperature/speed normal\n", 191 this_cpu, 192 level == CORE_LEVEL ? "Core" : "Package"); 193 else 194 printk(KERN_INFO "CPU%d: %s power limit normal\n", 195 this_cpu, 196 level == CORE_LEVEL ? "Core" : "Package"); 197 return 1; 198 } 199 200 return 0; 201} 202 203#ifdef CONFIG_SYSFS 204/* Add/Remove thermal_throttle interface for CPU device: */ 205static __cpuinit int thermal_throttle_add_dev(struct sys_device *sys_dev, 206 unsigned int cpu) 207{ 208 int err; 209 struct cpuinfo_x86 *c = &cpu_data(cpu); 210 211 err = sysfs_create_group(&sys_dev->kobj, &thermal_attr_group); 212 if (err) 213 return err; 214 215 if (cpu_has(c, X86_FEATURE_PLN)) 216 err = sysfs_add_file_to_group(&sys_dev->kobj, 217 &attr_core_power_limit_count.attr, 218 thermal_attr_group.name); 219 if (cpu_has(c, X86_FEATURE_PTS)) { 220 err = sysfs_add_file_to_group(&sys_dev->kobj, 221 &attr_package_throttle_count.attr, 222 thermal_attr_group.name); 223 if (cpu_has(c, X86_FEATURE_PLN)) 224 err = sysfs_add_file_to_group(&sys_dev->kobj, 225 &attr_package_power_limit_count.attr, 226 thermal_attr_group.name); 227 } 228 229 return err; 230} 231 232static __cpuinit void thermal_throttle_remove_dev(struct sys_device *sys_dev) 233{ 234 sysfs_remove_group(&sys_dev->kobj, &thermal_attr_group); 235} 236 237/* Mutex protecting device creation against CPU hotplug: */ 238static DEFINE_MUTEX(therm_cpu_lock); 239 240/* Get notified when a cpu comes on/off. Be hotplug friendly. */ 241static __cpuinit int 242thermal_throttle_cpu_callback(struct notifier_block *nfb, 243 unsigned long action, 244 void *hcpu) 245{ 246 unsigned int cpu = (unsigned long)hcpu; 247 struct sys_device *sys_dev; 248 int err = 0; 249 250 sys_dev = get_cpu_sysdev(cpu); 251 252 switch (action) { 253 case CPU_UP_PREPARE: 254 case CPU_UP_PREPARE_FROZEN: 255 mutex_lock(&therm_cpu_lock); 256 err = thermal_throttle_add_dev(sys_dev, cpu); 257 mutex_unlock(&therm_cpu_lock); 258 WARN_ON(err); 259 break; 260 case CPU_UP_CANCELED: 261 case CPU_UP_CANCELED_FROZEN: 262 case CPU_DEAD: 263 case CPU_DEAD_FROZEN: 264 mutex_lock(&therm_cpu_lock); 265 thermal_throttle_remove_dev(sys_dev); 266 mutex_unlock(&therm_cpu_lock); 267 break; 268 } 269 return notifier_from_errno(err); 270} 271 272static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata = 273{ 274 .notifier_call = thermal_throttle_cpu_callback, 275}; 276 277static __init int thermal_throttle_init_device(void) 278{ 279 unsigned int cpu = 0; 280 int err; 281 282 if (!atomic_read(&therm_throt_en)) 283 return 0; 284 285 register_hotcpu_notifier(&thermal_throttle_cpu_notifier); 286 287#ifdef CONFIG_HOTPLUG_CPU 288 mutex_lock(&therm_cpu_lock); 289#endif 290 /* connect live CPUs to sysfs */ 291 for_each_online_cpu(cpu) { 292 err = thermal_throttle_add_dev(get_cpu_sysdev(cpu), cpu); 293 WARN_ON(err); 294 } 295#ifdef CONFIG_HOTPLUG_CPU 296 mutex_unlock(&therm_cpu_lock); 297#endif 298 299 return 0; 300} 301device_initcall(thermal_throttle_init_device); 302 303#endif /* CONFIG_SYSFS */ 304 305/* 306 * Set up the most two significant bit to notify mce log that this thermal 307 * event type. 308 * This is a temp solution. May be changed in the future with mce log 309 * infrasture. 310 */ 311#define CORE_THROTTLED (0) 312#define CORE_POWER_LIMIT ((__u64)1 << 62) 313#define PACKAGE_THROTTLED ((__u64)2 << 62) 314#define PACKAGE_POWER_LIMIT ((__u64)3 << 62) 315 316/* Thermal transition interrupt handler */ 317static void intel_thermal_interrupt(void) 318{ 319 __u64 msr_val; 320 struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); 321 322 rdmsrl(MSR_IA32_THERM_STATUS, msr_val); 323 324 if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT, 325 THERMAL_THROTTLING_EVENT, 326 CORE_LEVEL) != 0) 327 mce_log_therm_throt_event(CORE_THROTTLED | msr_val); 328 329 if (cpu_has(c, X86_FEATURE_PLN)) 330 if (therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT, 331 POWER_LIMIT_EVENT, 332 CORE_LEVEL) != 0) 333 mce_log_therm_throt_event(CORE_POWER_LIMIT | msr_val); 334 335 if (cpu_has(c, X86_FEATURE_PTS)) { 336 rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); 337 if (therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT, 338 THERMAL_THROTTLING_EVENT, 339 PACKAGE_LEVEL) != 0) 340 mce_log_therm_throt_event(PACKAGE_THROTTLED | msr_val); 341 if (cpu_has(c, X86_FEATURE_PLN)) 342 if (therm_throt_process(msr_val & 343 PACKAGE_THERM_STATUS_POWER_LIMIT, 344 POWER_LIMIT_EVENT, 345 PACKAGE_LEVEL) != 0) 346 mce_log_therm_throt_event(PACKAGE_POWER_LIMIT 347 | msr_val); 348 } 349} 350 351static void unexpected_thermal_interrupt(void) 352{ 353 printk(KERN_ERR "CPU%d: Unexpected LVT TMR interrupt!\n", 354 smp_processor_id()); 355 add_taint(TAINT_MACHINE_CHECK); 356} 357 358static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; 359 360asmlinkage void smp_thermal_interrupt(struct pt_regs *regs) 361{ 362 exit_idle(); 363 irq_enter(); 364 inc_irq_stat(irq_thermal_count); 365 smp_thermal_vector(); 366 irq_exit(); 367 /* Ack only at the end to avoid potential reentry */ 368 ack_APIC_irq(); 369} 370 371/* Thermal monitoring depends on APIC, ACPI and clock modulation */ 372static int intel_thermal_supported(struct cpuinfo_x86 *c) 373{ 374 if (!cpu_has_apic) 375 return 0; 376 if (!cpu_has(c, X86_FEATURE_ACPI) || !cpu_has(c, X86_FEATURE_ACC)) 377 return 0; 378 return 1; 379} 380 381void __init mcheck_intel_therm_init(void) 382{ 383 /* 384 * This function is only called on boot CPU. Save the init thermal 385 * LVT value on BSP and use that value to restore APs' thermal LVT 386 * entry BIOS programmed later 387 */ 388 if (intel_thermal_supported(&boot_cpu_data)) 389 lvtthmr_init = apic_read(APIC_LVTTHMR); 390} 391 392void intel_init_thermal(struct cpuinfo_x86 *c) 393{ 394 unsigned int cpu = smp_processor_id(); 395 int tm2 = 0; 396 u32 l, h; 397 398 if (!intel_thermal_supported(c)) 399 return; 400 401 /* 402 * First check if its enabled already, in which case there might 403 * be some SMM goo which handles it, so we can't even put a handler 404 * since it might be delivered via SMI already: 405 */ 406 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 407 408 /* 409 * The initial value of thermal LVT entries on all APs always reads 410 * 0x10000 because APs are woken up by BSP issuing INIT-SIPI-SIPI 411 * sequence to them and LVT registers are reset to 0s except for 412 * the mask bits which are set to 1s when APs receive INIT IPI. 413 * Always restore the value that BIOS has programmed on AP based on 414 * BSP's info we saved since BIOS is always setting the same value 415 * for all threads/cores 416 */ 417 apic_write(APIC_LVTTHMR, lvtthmr_init); 418 419 h = lvtthmr_init; 420 421 if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { 422 printk(KERN_DEBUG 423 "CPU%d: Thermal monitoring handled by SMI\n", cpu); 424 return; 425 } 426 427 /* Check whether a vector already exists */ 428 if (h & APIC_VECTOR_MASK) { 429 printk(KERN_DEBUG 430 "CPU%d: Thermal LVT vector (%#x) already installed\n", 431 cpu, (h & APIC_VECTOR_MASK)); 432 return; 433 } 434 435 /* early Pentium M models use different method for enabling TM2 */ 436 if (cpu_has(c, X86_FEATURE_TM2)) { 437 if (c->x86 == 6 && (c->x86_model == 9 || c->x86_model == 13)) { 438 rdmsr(MSR_THERM2_CTL, l, h); 439 if (l & MSR_THERM2_CTL_TM_SELECT) 440 tm2 = 1; 441 } else if (l & MSR_IA32_MISC_ENABLE_TM2) 442 tm2 = 1; 443 } 444 445 /* We'll mask the thermal vector in the lapic till we're ready: */ 446 h = THERMAL_APIC_VECTOR | APIC_DM_FIXED | APIC_LVT_MASKED; 447 apic_write(APIC_LVTTHMR, h); 448 449 rdmsr(MSR_IA32_THERM_INTERRUPT, l, h); 450 if (cpu_has(c, X86_FEATURE_PLN)) 451 wrmsr(MSR_IA32_THERM_INTERRUPT, 452 l | (THERM_INT_LOW_ENABLE 453 | THERM_INT_HIGH_ENABLE | THERM_INT_PLN_ENABLE), h); 454 else 455 wrmsr(MSR_IA32_THERM_INTERRUPT, 456 l | (THERM_INT_LOW_ENABLE | THERM_INT_HIGH_ENABLE), h); 457 458 if (cpu_has(c, X86_FEATURE_PTS)) { 459 rdmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, l, h); 460 if (cpu_has(c, X86_FEATURE_PLN)) 461 wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, 462 l | (PACKAGE_THERM_INT_LOW_ENABLE 463 | PACKAGE_THERM_INT_HIGH_ENABLE 464 | PACKAGE_THERM_INT_PLN_ENABLE), h); 465 else 466 wrmsr(MSR_IA32_PACKAGE_THERM_INTERRUPT, 467 l | (PACKAGE_THERM_INT_LOW_ENABLE 468 | PACKAGE_THERM_INT_HIGH_ENABLE), h); 469 } 470 471 smp_thermal_vector = intel_thermal_interrupt; 472 473 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 474 wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h); 475 476 /* Unmask the thermal vector: */ 477 l = apic_read(APIC_LVTTHMR); 478 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); 479 480 printk_once(KERN_INFO "CPU0: Thermal monitoring enabled (%s)\n", 481 tm2 ? "TM2" : "TM1"); 482 483 /* enable thermal throttle processing */ 484 atomic_set(&therm_throt_en, 1); 485} 486