1/* 2 * Detect Soft Lockups 3 * 4 * started by Ingo Molnar, Copyright (C) 2005, 2006 Red Hat, Inc. 5 * 6 * this code detects soft lockups: incidents in where on a CPU 7 * the kernel does not reschedule for 10 seconds or more. 8 */ 9#include <linux/mm.h> 10#include <linux/cpu.h> 11#include <linux/init.h> 12#include <linux/delay.h> 13#include <linux/kthread.h> 14#include <linux/notifier.h> 15#include <linux/module.h> 16 17static DEFINE_SPINLOCK(print_lock); 18 19static DEFINE_PER_CPU(unsigned long, touch_timestamp); 20static DEFINE_PER_CPU(unsigned long, print_timestamp); 21static DEFINE_PER_CPU(struct task_struct *, watchdog_task); 22 23static int did_panic = 0; 24 25static int 26softlock_panic(struct notifier_block *this, unsigned long event, void *ptr) 27{ 28 did_panic = 1; 29 30 return NOTIFY_DONE; 31} 32 33static struct notifier_block panic_block = { 34 .notifier_call = softlock_panic, 35}; 36 37/* 38 * Returns seconds, approximately. We don't need nanosecond 39 * resolution, and we don't need to waste time with a big divide when 40 * 2^30ns == 1.074s. 41 */ 42static unsigned long get_timestamp(void) 43{ 44 return sched_clock() >> 30; /* 2^30 ~= 10^9 */ 45} 46 47void touch_softlockup_watchdog(void) 48{ 49 __raw_get_cpu_var(touch_timestamp) = get_timestamp(); 50} 51EXPORT_SYMBOL(touch_softlockup_watchdog); 52 53void touch_all_softlockup_watchdogs(void) 54{ 55 int cpu; 56 57 /* Cause each CPU to re-update its timestamp rather than complain */ 58 for_each_online_cpu(cpu) 59 per_cpu(touch_timestamp, cpu) = 0; 60} 61EXPORT_SYMBOL(touch_all_softlockup_watchdogs); 62 63/* 64 * This callback runs from the timer interrupt, and checks 65 * whether the watchdog thread has hung or not: 66 */ 67void softlockup_tick(void) 68{ 69 int this_cpu = smp_processor_id(); 70 unsigned long touch_timestamp = per_cpu(touch_timestamp, this_cpu); 71 unsigned long print_timestamp; 72 unsigned long now; 73 74 if (touch_timestamp == 0) { 75 touch_softlockup_watchdog(); 76 return; 77 } 78 79 print_timestamp = per_cpu(print_timestamp, this_cpu); 80 81 /* report at most once a second */ 82 if (print_timestamp < (touch_timestamp + 1) || 83 did_panic || 84 !per_cpu(watchdog_task, this_cpu)) 85 return; 86 87 /* do not print during early bootup: */ 88 if (unlikely(system_state != SYSTEM_RUNNING)) { 89 touch_softlockup_watchdog(); 90 return; 91 } 92 93 now = get_timestamp(); 94 95 /* Wake up the high-prio watchdog task every second: */ 96 if (now > (touch_timestamp + 1)) 97 wake_up_process(per_cpu(watchdog_task, this_cpu)); 98 99 /* Warn about unreasonable 10+ seconds delays: */ 100 if (now > (touch_timestamp + 10)) { 101 per_cpu(print_timestamp, this_cpu) = touch_timestamp; 102 103 spin_lock(&print_lock); 104 printk(KERN_ERR "BUG: soft lockup detected on CPU#%d!\n", 105 this_cpu); 106 dump_stack(); 107 spin_unlock(&print_lock); 108 } 109} 110 111/* 112 * The watchdog thread - runs every second and touches the timestamp. 113 */ 114static int watchdog(void * __bind_cpu) 115{ 116 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 117 118 sched_setscheduler(current, SCHED_FIFO, ¶m); 119 current->flags |= PF_NOFREEZE; 120 121 /* initialize timestamp */ 122 touch_softlockup_watchdog(); 123 124 /* 125 * Run briefly once per second to reset the softlockup timestamp. 126 * If this gets delayed for more than 10 seconds then the 127 * debug-printout triggers in softlockup_tick(). 128 */ 129 while (!kthread_should_stop()) { 130 set_current_state(TASK_INTERRUPTIBLE); 131 touch_softlockup_watchdog(); 132 schedule(); 133 } 134 135 return 0; 136} 137 138/* 139 * Create/destroy watchdog threads as CPUs come and go: 140 */ 141static int __cpuinit 142cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 143{ 144 int hotcpu = (unsigned long)hcpu; 145 struct task_struct *p; 146 147 switch (action) { 148 case CPU_UP_PREPARE: 149 case CPU_UP_PREPARE_FROZEN: 150 BUG_ON(per_cpu(watchdog_task, hotcpu)); 151 p = kthread_create(watchdog, hcpu, "watchdog/%d", hotcpu); 152 if (IS_ERR(p)) { 153 printk("watchdog for %i failed\n", hotcpu); 154 return NOTIFY_BAD; 155 } 156 per_cpu(touch_timestamp, hotcpu) = 0; 157 per_cpu(watchdog_task, hotcpu) = p; 158 kthread_bind(p, hotcpu); 159 break; 160 case CPU_ONLINE: 161 case CPU_ONLINE_FROZEN: 162 wake_up_process(per_cpu(watchdog_task, hotcpu)); 163 break; 164#ifdef CONFIG_HOTPLUG_CPU 165 case CPU_UP_CANCELED: 166 case CPU_UP_CANCELED_FROZEN: 167 if (!per_cpu(watchdog_task, hotcpu)) 168 break; 169 /* Unbind so it can run. Fall thru. */ 170 kthread_bind(per_cpu(watchdog_task, hotcpu), 171 any_online_cpu(cpu_online_map)); 172 case CPU_DEAD: 173 case CPU_DEAD_FROZEN: 174 p = per_cpu(watchdog_task, hotcpu); 175 per_cpu(watchdog_task, hotcpu) = NULL; 176 kthread_stop(p); 177 break; 178#endif /* CONFIG_HOTPLUG_CPU */ 179 } 180 return NOTIFY_OK; 181} 182 183static struct notifier_block __cpuinitdata cpu_nfb = { 184 .notifier_call = cpu_callback 185}; 186 187__init void spawn_softlockup_task(void) 188{ 189 void *cpu = (void *)(long)smp_processor_id(); 190 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); 191 192 BUG_ON(err == NOTIFY_BAD); 193 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); 194 register_cpu_notifier(&cpu_nfb); 195 196 atomic_notifier_chain_register(&panic_notifier_list, &panic_block); 197} 198