1/*
2 *  linux/arch/x86_64/nmi.c
3 *
4 *  NMI watchdog support on APIC systems
5 *
6 *  Started by Ingo Molnar <mingo@redhat.com>
7 *
8 *  Fixes:
9 *  Mikael Pettersson	: AMD K7 support for local APIC NMI watchdog.
10 *  Mikael Pettersson	: Power Management for local APIC NMI watchdog.
11 *  Pavel Machek and
12 *  Mikael Pettersson	: PM converted to driver model. Disable/enable API.
13 */
14
15#include <linux/nmi.h>
16#include <linux/mm.h>
17#include <linux/delay.h>
18#include <linux/interrupt.h>
19#include <linux/module.h>
20#include <linux/sysdev.h>
21#include <linux/sysctl.h>
22#include <linux/kprobes.h>
23#include <linux/cpumask.h>
24#include <linux/kdebug.h>
25
26#include <asm/smp.h>
27#include <asm/nmi.h>
28#include <asm/proto.h>
29#include <asm/mce.h>
30
31int unknown_nmi_panic;
32int nmi_watchdog_enabled;
33int panic_on_unrecovered_nmi;
34
35static cpumask_t backtrace_mask = CPU_MASK_NONE;
36
37/* nmi_active:
38 * >0: the lapic NMI watchdog is active, but can be disabled
39 * <0: the lapic NMI watchdog has not been set up, and cannot
40 *     be enabled
41 *  0: the lapic NMI watchdog is disabled, but can be enabled
42 */
43atomic_t nmi_active = ATOMIC_INIT(0);		/* oprofile uses this */
44int panic_on_timeout;
45
46unsigned int nmi_watchdog = NMI_DEFAULT;
47static unsigned int nmi_hz = HZ;
48
49static DEFINE_PER_CPU(short, wd_enabled);
50
51/* local prototypes */
52static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu);
53
54/* Run after command line and cpu_init init, but before all other checks */
55void nmi_watchdog_default(void)
56{
57	if (nmi_watchdog != NMI_DEFAULT)
58		return;
59	nmi_watchdog = NMI_NONE;
60}
61
62static int endflag __initdata = 0;
63
64#ifdef CONFIG_SMP
65/* The performance counters used by NMI_LOCAL_APIC don't trigger when
66 * the CPU is idle. To make sure the NMI watchdog really ticks on all
67 * CPUs during the test make them busy.
68 */
69static __init void nmi_cpu_busy(void *data)
70{
71	local_irq_enable_in_hardirq();
72	/* Intentionally don't use cpu_relax here. This is
73	   to make sure that the performance counter really ticks,
74	   even if there is a simulator or similar that catches the
75	   pause instruction. On a real HT machine this is fine because
76	   all other CPUs are busy with "useless" delay loops and don't
77	   care if they get somewhat less cycles. */
78	while (endflag == 0)
79		mb();
80}
81#endif
82
83int __init check_nmi_watchdog (void)
84{
85	int *counts;
86	int cpu;
87
88	if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DEFAULT))
89		return 0;
90
91	if (!atomic_read(&nmi_active))
92		return 0;
93
94	counts = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
95	if (!counts)
96		return -1;
97
98	printk(KERN_INFO "testing NMI watchdog ... ");
99
100#ifdef CONFIG_SMP
101	if (nmi_watchdog == NMI_LOCAL_APIC)
102		smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
103#endif
104
105	for (cpu = 0; cpu < NR_CPUS; cpu++)
106		counts[cpu] = cpu_pda(cpu)->__nmi_count;
107	local_irq_enable();
108	mdelay((20*1000)/nmi_hz); // wait 20 ticks
109
110	for_each_online_cpu(cpu) {
111		if (!per_cpu(wd_enabled, cpu))
112			continue;
113		if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) {
114			printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n",
115			       cpu,
116			       counts[cpu],
117			       cpu_pda(cpu)->__nmi_count);
118			per_cpu(wd_enabled, cpu) = 0;
119			atomic_dec(&nmi_active);
120		}
121	}
122	if (!atomic_read(&nmi_active)) {
123		kfree(counts);
124		atomic_set(&nmi_active, -1);
125		endflag = 1;
126		return -1;
127	}
128	endflag = 1;
129	printk("OK.\n");
130
131	/* now that we know it works we can reduce NMI frequency to
132	   something more reasonable; makes a difference in some configs */
133	if (nmi_watchdog == NMI_LOCAL_APIC)
134		nmi_hz = lapic_adjust_nmi_hz(1);
135
136	kfree(counts);
137	return 0;
138}
139
140int __init setup_nmi_watchdog(char *str)
141{
142	int nmi;
143
144	if (!strncmp(str,"panic",5)) {
145		panic_on_timeout = 1;
146		str = strchr(str, ',');
147		if (!str)
148			return 1;
149		++str;
150	}
151
152	get_option(&str, &nmi);
153
154	if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE))
155		return 0;
156
157	nmi_watchdog = nmi;
158	return 1;
159}
160
161__setup("nmi_watchdog=", setup_nmi_watchdog);
162
163
164static void __acpi_nmi_disable(void *__unused)
165{
166	apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
167}
168
169/*
170 * Disable timer based NMIs on all CPUs:
171 */
172void acpi_nmi_disable(void)
173{
174	if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
175		on_each_cpu(__acpi_nmi_disable, NULL, 0, 1);
176}
177
178static void __acpi_nmi_enable(void *__unused)
179{
180	apic_write(APIC_LVT0, APIC_DM_NMI);
181}
182
183/*
184 * Enable timer based NMIs on all CPUs:
185 */
186void acpi_nmi_enable(void)
187{
188	if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
189		on_each_cpu(__acpi_nmi_enable, NULL, 0, 1);
190}
191#ifdef CONFIG_PM
192
193static int nmi_pm_active; /* nmi_active before suspend */
194
195static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
196{
197	/* only CPU0 goes here, other CPUs should be offline */
198	nmi_pm_active = atomic_read(&nmi_active);
199	stop_apic_nmi_watchdog(NULL);
200	BUG_ON(atomic_read(&nmi_active) != 0);
201	return 0;
202}
203
204static int lapic_nmi_resume(struct sys_device *dev)
205{
206	/* only CPU0 goes here, other CPUs should be offline */
207	if (nmi_pm_active > 0) {
208		setup_apic_nmi_watchdog(NULL);
209		touch_nmi_watchdog();
210	}
211	return 0;
212}
213
214static struct sysdev_class nmi_sysclass = {
215	set_kset_name("lapic_nmi"),
216	.resume		= lapic_nmi_resume,
217	.suspend	= lapic_nmi_suspend,
218};
219
220static struct sys_device device_lapic_nmi = {
221	.id		= 0,
222	.cls	= &nmi_sysclass,
223};
224
225static int __init init_lapic_nmi_sysfs(void)
226{
227	int error;
228
229	/* should really be a BUG_ON but b/c this is an
230	 * init call, it just doesn't work.  -dcz
231	 */
232	if (nmi_watchdog != NMI_LOCAL_APIC)
233		return 0;
234
235	if ( atomic_read(&nmi_active) < 0 )
236		return 0;
237
238	error = sysdev_class_register(&nmi_sysclass);
239	if (!error)
240		error = sysdev_register(&device_lapic_nmi);
241	return error;
242}
243/* must come after the local APIC's device_initcall() */
244late_initcall(init_lapic_nmi_sysfs);
245
246#endif	/* CONFIG_PM */
247
248void setup_apic_nmi_watchdog(void *unused)
249{
250	if (__get_cpu_var(wd_enabled) == 1)
251		return;
252
253	/* cheap hack to support suspend/resume */
254	/* if cpu0 is not active neither should the other cpus */
255	if ((smp_processor_id() != 0) && (atomic_read(&nmi_active) <= 0))
256		return;
257
258	switch (nmi_watchdog) {
259	case NMI_LOCAL_APIC:
260		__get_cpu_var(wd_enabled) = 1;
261		if (lapic_watchdog_init(nmi_hz) < 0) {
262			__get_cpu_var(wd_enabled) = 0;
263			return;
264		}
265		/* FALL THROUGH */
266	case NMI_IO_APIC:
267		__get_cpu_var(wd_enabled) = 1;
268		atomic_inc(&nmi_active);
269	}
270}
271
272void stop_apic_nmi_watchdog(void *unused)
273{
274	/* only support LOCAL and IO APICs for now */
275	if ((nmi_watchdog != NMI_LOCAL_APIC) &&
276	    (nmi_watchdog != NMI_IO_APIC))
277	    	return;
278	if (__get_cpu_var(wd_enabled) == 0)
279		return;
280	if (nmi_watchdog == NMI_LOCAL_APIC)
281		lapic_watchdog_stop();
282	__get_cpu_var(wd_enabled) = 0;
283	atomic_dec(&nmi_active);
284}
285
286/*
287 * the best way to detect whether a CPU has a 'hard lockup' problem
288 * is to check it's local APIC timer IRQ counts. If they are not
289 * changing then that CPU has some problem.
290 *
291 * as these watchdog NMI IRQs are generated on every CPU, we only
292 * have to check the current processor.
293 */
294
295static DEFINE_PER_CPU(unsigned, last_irq_sum);
296static DEFINE_PER_CPU(local_t, alert_counter);
297static DEFINE_PER_CPU(int, nmi_touch);
298
299void touch_nmi_watchdog (void)
300{
301	if (nmi_watchdog > 0) {
302		unsigned cpu;
303
304		/*
305 		 * Tell other CPUs to reset their alert counters. We cannot
306		 * do it ourselves because the alert count increase is not
307		 * atomic.
308		 */
309		for_each_present_cpu (cpu)
310			per_cpu(nmi_touch, cpu) = 1;
311	}
312
313 	touch_softlockup_watchdog();
314}
315
316int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
317{
318	int sum;
319	int touched = 0;
320	int cpu = smp_processor_id();
321	int rc = 0;
322
323	/* check for other users first */
324	if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
325			== NOTIFY_STOP) {
326		rc = 1;
327		touched = 1;
328	}
329
330	sum = read_pda(apic_timer_irqs);
331	if (__get_cpu_var(nmi_touch)) {
332		__get_cpu_var(nmi_touch) = 0;
333		touched = 1;
334	}
335
336	if (cpu_isset(cpu, backtrace_mask)) {
337		static DEFINE_SPINLOCK(lock);	/* Serialise the printks */
338
339		spin_lock(&lock);
340		printk("NMI backtrace for cpu %d\n", cpu);
341		dump_stack();
342		spin_unlock(&lock);
343		cpu_clear(cpu, backtrace_mask);
344	}
345
346#ifdef CONFIG_X86_MCE
347	/* Could check oops_in_progress here too, but it's safer
348	   not too */
349	if (atomic_read(&mce_entry) > 0)
350		touched = 1;
351#endif
352	/* if the apic timer isn't firing, this cpu isn't doing much */
353	if (!touched && __get_cpu_var(last_irq_sum) == sum) {
354		/*
355		 * Ayiee, looks like this CPU is stuck ...
356		 * wait a few IRQs (5 seconds) before doing the oops ...
357		 */
358		local_inc(&__get_cpu_var(alert_counter));
359		if (local_read(&__get_cpu_var(alert_counter)) == 5*nmi_hz)
360			die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs,
361				panic_on_timeout);
362	} else {
363		__get_cpu_var(last_irq_sum) = sum;
364		local_set(&__get_cpu_var(alert_counter), 0);
365	}
366
367	/* see if the nmi watchdog went off */
368	if (!__get_cpu_var(wd_enabled))
369		return rc;
370	switch (nmi_watchdog) {
371	case NMI_LOCAL_APIC:
372		rc |= lapic_wd_event(nmi_hz);
373		break;
374	case NMI_IO_APIC:
375		/* don't know how to accurately check for this.
376		 * just assume it was a watchdog timer interrupt
377		 * This matches the old behaviour.
378		 */
379		rc = 1;
380		break;
381	}
382	return rc;
383}
384
385asmlinkage __kprobes void do_nmi(struct pt_regs * regs, long error_code)
386{
387	nmi_enter();
388	add_pda(__nmi_count,1);
389	default_do_nmi(regs);
390	nmi_exit();
391}
392
393int do_nmi_callback(struct pt_regs * regs, int cpu)
394{
395#ifdef CONFIG_SYSCTL
396	if (unknown_nmi_panic)
397		return unknown_nmi_panic_callback(regs, cpu);
398#endif
399	return 0;
400}
401
402#ifdef CONFIG_SYSCTL
403
404static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
405{
406	unsigned char reason = get_nmi_reason();
407	char buf[64];
408
409	sprintf(buf, "NMI received for unknown reason %02x\n", reason);
410	die_nmi(buf, regs, 1);	/* Always panic here */
411	return 0;
412}
413
414/*
415 * proc handler for /proc/sys/kernel/nmi
416 */
417int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
418			void __user *buffer, size_t *length, loff_t *ppos)
419{
420	int old_state;
421
422	nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0;
423	old_state = nmi_watchdog_enabled;
424	proc_dointvec(table, write, file, buffer, length, ppos);
425	if (!!old_state == !!nmi_watchdog_enabled)
426		return 0;
427
428	if (atomic_read(&nmi_active) < 0) {
429		printk( KERN_WARNING "NMI watchdog is permanently disabled\n");
430		return -EIO;
431	}
432
433	/* if nmi_watchdog is not set yet, then set it */
434	nmi_watchdog_default();
435
436	if (nmi_watchdog == NMI_LOCAL_APIC) {
437		if (nmi_watchdog_enabled)
438			enable_lapic_nmi_watchdog();
439		else
440			disable_lapic_nmi_watchdog();
441	} else {
442		printk( KERN_WARNING
443			"NMI watchdog doesn't know what hardware to touch\n");
444		return -EIO;
445	}
446	return 0;
447}
448
449#endif
450
451void __trigger_all_cpu_backtrace(void)
452{
453	int i;
454
455	backtrace_mask = cpu_online_map;
456	/* Wait for up to 10 seconds for all CPUs to do the backtrace */
457	for (i = 0; i < 10 * 1000; i++) {
458		if (cpus_empty(backtrace_mask))
459			break;
460		mdelay(1);
461	}
462}
463
464EXPORT_SYMBOL(nmi_active);
465EXPORT_SYMBOL(nmi_watchdog);
466EXPORT_SYMBOL(touch_nmi_watchdog);
467