1/* 2 * Detect hard and soft lockups on a system 3 * 4 * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. 5 * 6 * this code detects hard lockups: incidents in where on a CPU 7 * the kernel does not respond to anything except NMI. 8 * 9 * Note: Most of this code is borrowed heavily from softlockup.c, 10 * so thanks to Ingo for the initial implementation. 11 * Some chunks also taken from arch/x86/kernel/apic/nmi.c, thanks 12 * to those contributors as well. 13 */ 14 15#include <linux/mm.h> 16#include <linux/cpu.h> 17#include <linux/nmi.h> 18#include <linux/init.h> 19#include <linux/delay.h> 20#include <linux/freezer.h> 21#include <linux/kthread.h> 22#include <linux/lockdep.h> 23#include <linux/notifier.h> 24#include <linux/module.h> 25#include <linux/sysctl.h> 26 27#include <asm/irq_regs.h> 28#include <linux/perf_event.h> 29 30int watchdog_enabled; 31int __read_mostly softlockup_thresh = 60; 32 33static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); 34static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); 35static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); 36static DEFINE_PER_CPU(bool, softlockup_touch_sync); 37static DEFINE_PER_CPU(bool, soft_watchdog_warn); 38#ifdef CONFIG_HARDLOCKUP_DETECTOR 39static DEFINE_PER_CPU(bool, hard_watchdog_warn); 40static DEFINE_PER_CPU(bool, watchdog_nmi_touch); 41static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); 42static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); 43static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); 44#endif 45 46static int __read_mostly did_panic; 47static int __initdata no_watchdog; 48 49 50/* boot commands */ 51/* 52 * Should we panic when a soft-lockup or hard-lockup occurs: 53 */ 54#ifdef CONFIG_HARDLOCKUP_DETECTOR 55static int hardlockup_panic; 56 57static int __init hardlockup_panic_setup(char *str) 58{ 59 if (!strncmp(str, "panic", 5)) 60 hardlockup_panic = 1; 61 return 1; 62} 63__setup("nmi_watchdog=", hardlockup_panic_setup); 64#endif 65 66unsigned int __read_mostly softlockup_panic = 67 CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; 68 69static int __init softlockup_panic_setup(char *str) 70{ 71 softlockup_panic = simple_strtoul(str, NULL, 0); 72 73 return 1; 74} 75__setup("softlockup_panic=", softlockup_panic_setup); 76 77static int __init nowatchdog_setup(char *str) 78{ 79 no_watchdog = 1; 80 return 1; 81} 82__setup("nowatchdog", nowatchdog_setup); 83 84/* deprecated */ 85static int __init nosoftlockup_setup(char *str) 86{ 87 no_watchdog = 1; 88 return 1; 89} 90__setup("nosoftlockup", nosoftlockup_setup); 91/* */ 92 93 94/* 95 * Returns seconds, approximately. We don't need nanosecond 96 * resolution, and we don't need to waste time with a big divide when 97 * 2^30ns == 1.074s. 98 */ 99static unsigned long get_timestamp(int this_cpu) 100{ 101 return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ 102} 103 104static unsigned long get_sample_period(void) 105{ 106 /* 107 * convert softlockup_thresh from seconds to ns 108 * the divide by 5 is to give hrtimer 5 chances to 109 * increment before the hardlockup detector generates 110 * a warning 111 */ 112 return softlockup_thresh / 5 * NSEC_PER_SEC; 113} 114 115/* Commands for resetting the watchdog */ 116static void __touch_watchdog(void) 117{ 118 int this_cpu = smp_processor_id(); 119 120 __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu); 121} 122 123void touch_softlockup_watchdog(void) 124{ 125 __raw_get_cpu_var(watchdog_touch_ts) = 0; 126} 127EXPORT_SYMBOL(touch_softlockup_watchdog); 128 129void touch_all_softlockup_watchdogs(void) 130{ 131 int cpu; 132 133 /* 134 * this is done lockless 135 * do we care if a 0 races with a timestamp? 136 * all it means is the softlock check starts one cycle later 137 */ 138 for_each_online_cpu(cpu) 139 per_cpu(watchdog_touch_ts, cpu) = 0; 140} 141 142#ifdef CONFIG_HARDLOCKUP_DETECTOR 143void touch_nmi_watchdog(void) 144{ 145 if (watchdog_enabled) { 146 unsigned cpu; 147 148 for_each_present_cpu(cpu) { 149 if (per_cpu(watchdog_nmi_touch, cpu) != true) 150 per_cpu(watchdog_nmi_touch, cpu) = true; 151 } 152 } 153 touch_softlockup_watchdog(); 154} 155EXPORT_SYMBOL(touch_nmi_watchdog); 156 157#endif 158 159void touch_softlockup_watchdog_sync(void) 160{ 161 __raw_get_cpu_var(softlockup_touch_sync) = true; 162 __raw_get_cpu_var(watchdog_touch_ts) = 0; 163} 164 165#ifdef CONFIG_HARDLOCKUP_DETECTOR 166/* watchdog detector functions */ 167static int is_hardlockup(void) 168{ 169 unsigned long hrint = __get_cpu_var(hrtimer_interrupts); 170 171 if (__get_cpu_var(hrtimer_interrupts_saved) == hrint) 172 return 1; 173 174 __get_cpu_var(hrtimer_interrupts_saved) = hrint; 175 return 0; 176} 177#endif 178 179static int is_softlockup(unsigned long touch_ts) 180{ 181 unsigned long now = get_timestamp(smp_processor_id()); 182 183 /* Warn about unreasonable delays: */ 184 if (time_after(now, touch_ts + softlockup_thresh)) 185 return now - touch_ts; 186 187 return 0; 188} 189 190static int 191watchdog_panic(struct notifier_block *this, unsigned long event, void *ptr) 192{ 193 did_panic = 1; 194 195 return NOTIFY_DONE; 196} 197 198static struct notifier_block panic_block = { 199 .notifier_call = watchdog_panic, 200}; 201 202#ifdef CONFIG_HARDLOCKUP_DETECTOR 203static struct perf_event_attr wd_hw_attr = { 204 .type = PERF_TYPE_HARDWARE, 205 .config = PERF_COUNT_HW_CPU_CYCLES, 206 .size = sizeof(struct perf_event_attr), 207 .pinned = 1, 208 .disabled = 1, 209}; 210 211/* Callback function for perf event subsystem */ 212void watchdog_overflow_callback(struct perf_event *event, int nmi, 213 struct perf_sample_data *data, 214 struct pt_regs *regs) 215{ 216 /* Ensure the watchdog never gets throttled */ 217 event->hw.interrupts = 0; 218 219 if (__get_cpu_var(watchdog_nmi_touch) == true) { 220 __get_cpu_var(watchdog_nmi_touch) = false; 221 return; 222 } 223 224 /* check for a hardlockup 225 * This is done by making sure our timer interrupt 226 * is incrementing. The timer interrupt should have 227 * fired multiple times before we overflow'd. If it hasn't 228 * then this is a good indication the cpu is stuck 229 */ 230 if (is_hardlockup()) { 231 int this_cpu = smp_processor_id(); 232 233 /* only print hardlockups once */ 234 if (__get_cpu_var(hard_watchdog_warn) == true) 235 return; 236 237 if (hardlockup_panic) 238 panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); 239 else 240 WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); 241 242 __get_cpu_var(hard_watchdog_warn) = true; 243 return; 244 } 245 246 __get_cpu_var(hard_watchdog_warn) = false; 247 return; 248} 249static void watchdog_interrupt_count(void) 250{ 251 __get_cpu_var(hrtimer_interrupts)++; 252} 253#else 254static inline void watchdog_interrupt_count(void) { return; } 255#endif /* CONFIG_HARDLOCKUP_DETECTOR */ 256 257/* watchdog kicker functions */ 258static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) 259{ 260 unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts); 261 struct pt_regs *regs = get_irq_regs(); 262 int duration; 263 264 /* kick the hardlockup detector */ 265 watchdog_interrupt_count(); 266 267 /* kick the softlockup detector */ 268 wake_up_process(__get_cpu_var(softlockup_watchdog)); 269 270 /* .. and repeat */ 271 hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period())); 272 273 if (touch_ts == 0) { 274 if (unlikely(__get_cpu_var(softlockup_touch_sync))) { 275 /* 276 * If the time stamp was touched atomically 277 * make sure the scheduler tick is up to date. 278 */ 279 __get_cpu_var(softlockup_touch_sync) = false; 280 sched_clock_tick(); 281 } 282 __touch_watchdog(); 283 return HRTIMER_RESTART; 284 } 285 286 /* check for a softlockup 287 * This is done by making sure a high priority task is 288 * being scheduled. The task touches the watchdog to 289 * indicate it is getting cpu time. If it hasn't then 290 * this is a good indication some task is hogging the cpu 291 */ 292 duration = is_softlockup(touch_ts); 293 if (unlikely(duration)) { 294 /* only warn once */ 295 if (__get_cpu_var(soft_watchdog_warn) == true) 296 return HRTIMER_RESTART; 297 298 printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", 299 smp_processor_id(), duration, 300 current->comm, task_pid_nr(current)); 301 print_modules(); 302 print_irqtrace_events(current); 303 if (regs) 304 show_regs(regs); 305 else 306 dump_stack(); 307 308 if (softlockup_panic) 309 panic("softlockup: hung tasks"); 310 __get_cpu_var(soft_watchdog_warn) = true; 311 } else 312 __get_cpu_var(soft_watchdog_warn) = false; 313 314 return HRTIMER_RESTART; 315} 316 317 318/* 319 * The watchdog thread - touches the timestamp. 320 */ 321static int watchdog(void *unused) 322{ 323 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 324 struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); 325 326 sched_setscheduler(current, SCHED_FIFO, ¶m); 327 328 /* initialize timestamp */ 329 __touch_watchdog(); 330 331 /* kick off the timer for the hardlockup detector */ 332 /* done here because hrtimer_start can only pin to smp_processor_id() */ 333 hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()), 334 HRTIMER_MODE_REL_PINNED); 335 336 set_current_state(TASK_INTERRUPTIBLE); 337 /* 338 * Run briefly once per second to reset the softlockup timestamp. 339 * If this gets delayed for more than 60 seconds then the 340 * debug-printout triggers in watchdog_timer_fn(). 341 */ 342 while (!kthread_should_stop()) { 343 __touch_watchdog(); 344 schedule(); 345 346 if (kthread_should_stop()) 347 break; 348 349 set_current_state(TASK_INTERRUPTIBLE); 350 } 351 __set_current_state(TASK_RUNNING); 352 353 return 0; 354} 355 356 357#ifdef CONFIG_HARDLOCKUP_DETECTOR 358static int watchdog_nmi_enable(int cpu) 359{ 360 struct perf_event_attr *wd_attr; 361 struct perf_event *event = per_cpu(watchdog_ev, cpu); 362 363 /* is it already setup and enabled? */ 364 if (event && event->state > PERF_EVENT_STATE_OFF) 365 goto out; 366 367 /* it is setup but not enabled */ 368 if (event != NULL) 369 goto out_enable; 370 371 /* Try to register using hardware perf events */ 372 wd_attr = &wd_hw_attr; 373 wd_attr->sample_period = hw_nmi_get_sample_period(); 374 event = perf_event_create_kernel_counter(wd_attr, cpu, -1, watchdog_overflow_callback); 375 if (!IS_ERR(event)) { 376 printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n"); 377 goto out_save; 378 } 379 380 printk(KERN_ERR "NMI watchdog disabled for cpu%i: unable to create perf event: %ld\n", 381 cpu, PTR_ERR(event)); 382 return -1; 383 384 /* success path */ 385out_save: 386 per_cpu(watchdog_ev, cpu) = event; 387out_enable: 388 perf_event_enable(per_cpu(watchdog_ev, cpu)); 389out: 390 return 0; 391} 392 393static void watchdog_nmi_disable(int cpu) 394{ 395 struct perf_event *event = per_cpu(watchdog_ev, cpu); 396 397 if (event) { 398 perf_event_disable(event); 399 per_cpu(watchdog_ev, cpu) = NULL; 400 401 /* should be in cleanup, but blocks oprofile */ 402 perf_event_release_kernel(event); 403 } 404 return; 405} 406#else 407static int watchdog_nmi_enable(int cpu) { return 0; } 408static void watchdog_nmi_disable(int cpu) { return; } 409#endif /* CONFIG_HARDLOCKUP_DETECTOR */ 410 411/* prepare/enable/disable routines */ 412static int watchdog_prepare_cpu(int cpu) 413{ 414 struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); 415 416 WARN_ON(per_cpu(softlockup_watchdog, cpu)); 417 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 418 hrtimer->function = watchdog_timer_fn; 419 420 return 0; 421} 422 423static int watchdog_enable(int cpu) 424{ 425 struct task_struct *p = per_cpu(softlockup_watchdog, cpu); 426 427 /* enable the perf event */ 428 if (watchdog_nmi_enable(cpu) != 0) 429 return -1; 430 431 /* create the watchdog thread */ 432 if (!p) { 433 p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu); 434 if (IS_ERR(p)) { 435 printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu); 436 return -1; 437 } 438 kthread_bind(p, cpu); 439 per_cpu(watchdog_touch_ts, cpu) = 0; 440 per_cpu(softlockup_watchdog, cpu) = p; 441 wake_up_process(p); 442 } 443 444 return 0; 445} 446 447static void watchdog_disable(int cpu) 448{ 449 struct task_struct *p = per_cpu(softlockup_watchdog, cpu); 450 struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); 451 452 /* 453 * cancel the timer first to stop incrementing the stats 454 * and waking up the kthread 455 */ 456 hrtimer_cancel(hrtimer); 457 458 /* disable the perf event */ 459 watchdog_nmi_disable(cpu); 460 461 /* stop the watchdog thread */ 462 if (p) { 463 per_cpu(softlockup_watchdog, cpu) = NULL; 464 kthread_stop(p); 465 } 466} 467 468static void watchdog_enable_all_cpus(void) 469{ 470 int cpu; 471 472 watchdog_enabled = 0; 473 474 for_each_online_cpu(cpu) 475 if (!watchdog_enable(cpu)) 476 /* if any cpu succeeds, watchdog is considered 477 enabled for the system */ 478 watchdog_enabled = 1; 479 480 if (!watchdog_enabled) 481 printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n"); 482 483} 484 485static void watchdog_disable_all_cpus(void) 486{ 487 int cpu; 488 489 for_each_online_cpu(cpu) 490 watchdog_disable(cpu); 491 492 /* if all watchdogs are disabled, then they are disabled for the system */ 493 watchdog_enabled = 0; 494} 495 496 497/* sysctl functions */ 498#ifdef CONFIG_SYSCTL 499/* 500 * proc handler for /proc/sys/kernel/nmi_watchdog 501 */ 502 503int proc_dowatchdog_enabled(struct ctl_table *table, int write, 504 void __user *buffer, size_t *length, loff_t *ppos) 505{ 506 proc_dointvec(table, write, buffer, length, ppos); 507 508 if (write) { 509 if (watchdog_enabled) 510 watchdog_enable_all_cpus(); 511 else 512 watchdog_disable_all_cpus(); 513 } 514 return 0; 515} 516 517int proc_dowatchdog_thresh(struct ctl_table *table, int write, 518 void __user *buffer, 519 size_t *lenp, loff_t *ppos) 520{ 521 return proc_dointvec_minmax(table, write, buffer, lenp, ppos); 522} 523#endif /* CONFIG_SYSCTL */ 524 525 526/* 527 * Create/destroy watchdog threads as CPUs come and go: 528 */ 529static int __cpuinit 530cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 531{ 532 int hotcpu = (unsigned long)hcpu; 533 534 switch (action) { 535 case CPU_UP_PREPARE: 536 case CPU_UP_PREPARE_FROZEN: 537 if (watchdog_prepare_cpu(hotcpu)) 538 return NOTIFY_BAD; 539 break; 540 case CPU_ONLINE: 541 case CPU_ONLINE_FROZEN: 542 if (watchdog_enable(hotcpu)) 543 return NOTIFY_BAD; 544 break; 545#ifdef CONFIG_HOTPLUG_CPU 546 case CPU_UP_CANCELED: 547 case CPU_UP_CANCELED_FROZEN: 548 watchdog_disable(hotcpu); 549 break; 550 case CPU_DEAD: 551 case CPU_DEAD_FROZEN: 552 watchdog_disable(hotcpu); 553 break; 554#endif /* CONFIG_HOTPLUG_CPU */ 555 } 556 return NOTIFY_OK; 557} 558 559static struct notifier_block __cpuinitdata cpu_nfb = { 560 .notifier_call = cpu_callback 561}; 562 563static int __init spawn_watchdog_task(void) 564{ 565 void *cpu = (void *)(long)smp_processor_id(); 566 int err; 567 568 if (no_watchdog) 569 return 0; 570 571 err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); 572 WARN_ON(err == NOTIFY_BAD); 573 574 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); 575 register_cpu_notifier(&cpu_nfb); 576 577 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 578 579 return 0; 580} 581early_initcall(spawn_watchdog_task); 582