1/*
2 *  linux/kernel/panic.c
3 *
4 *  Copyright (C) 1991, 1992  Linus Torvalds
5 */
6
7/*
8 * This function is used through-out the kernel (including mm and fs)
9 * to indicate a major problem.
10 */
11#include <linux/debug_locks.h>
12#include <linux/interrupt.h>
13#include <linux/kmsg_dump.h>
14#include <linux/kallsyms.h>
15#include <linux/notifier.h>
16#include <linux/module.h>
17#include <linux/random.h>
18#include <linux/reboot.h>
19#include <linux/delay.h>
20#include <linux/kexec.h>
21#include <linux/sched.h>
22#include <linux/sysrq.h>
23#include <linux/init.h>
24#include <linux/nmi.h>
25#include <linux/dmi.h>
26
27#define PANIC_TIMER_STEP 100
28#define PANIC_BLINK_SPD 18
29
30int panic_on_oops;
31static unsigned long tainted_mask;
32static int pause_on_oops;
33static int pause_on_oops_flag;
34static DEFINE_SPINLOCK(pause_on_oops_lock);
35
36int panic_timeout;
37
38ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
39
40EXPORT_SYMBOL(panic_notifier_list);
41
42static long no_blink(int state)
43{
44	return 0;
45}
46
47/* Returns how long it waited in ms */
48long (*panic_blink)(int state);
49EXPORT_SYMBOL(panic_blink);
50
51#ifdef CONFIG_CRASHLOG
52void  nvram_store_crash(void);
53#endif
54
55/**
56 *	panic - halt the system
57 *	@fmt: The text string to print
58 *
59 *	Display a message, then perform cleanups.
60 *
61 *	This function never returns.
62 */
63NORET_TYPE void panic(const char * fmt, ...)
64{
65	static char buf[1024];
66	va_list args;
67	long i, i_next = 0;
68	int state = 0;
69
70	/*
71	 * It's possible to come here directly from a panic-assertion and
72	 * not have preempt disabled. Some functions called from here want
73	 * preempt to be disabled. No point enabling it later though...
74	 */
75	preempt_disable();
76
77	console_verbose();
78	bust_spinlocks(1);
79	va_start(args, fmt);
80	vsnprintf(buf, sizeof(buf), fmt, args);
81	va_end(args);
82#ifdef CONFIG_DUMP_PREV_OOPS_MSG
83        enable_oopsbuf(1);
84#endif
85	printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
86#ifdef CONFIG_DEBUG_BUGVERBOSE
87	dump_stack();
88#endif
89
90	/*
91	 * If we have crashed and we have a crash kernel loaded let it handle
92	 * everything else.
93	 * Do we want to call this before we try to display a message?
94	 */
95	crash_kexec(NULL);
96
97	kmsg_dump(KMSG_DUMP_PANIC);
98
99	/*
100	 * Note smp_send_stop is the usual smp shutdown function, which
101	 * unfortunately means it may not be hardened to work in a panic
102	 * situation.
103	 */
104	smp_send_stop();
105
106	atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
107
108	bust_spinlocks(0);
109
110#ifdef CONFIG_CRASHLOG
111	nvram_store_crash();
112#endif
113
114	if (!panic_blink)
115		panic_blink = no_blink;
116
117	if (panic_timeout > 0) {
118		/*
119		 * Delay timeout seconds before rebooting the machine.
120		 * We can't use the "normal" timers since we just panicked.
121		 */
122		printk(KERN_EMERG "Rebooting in %d seconds..", panic_timeout);
123
124		for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
125			touch_nmi_watchdog();
126			if (i >= i_next) {
127				i += panic_blink(state ^= 1);
128				i_next = i + 3600 / PANIC_BLINK_SPD;
129			}
130			mdelay(PANIC_TIMER_STEP);
131		}
132		/*
133		 * This will not be a clean reboot, with everything
134		 * shutting down.  But if there is a chance of
135		 * rebooting the system it will be rebooted.
136		 */
137		emergency_restart();
138	}
139#ifdef __sparc__
140	{
141		extern int stop_a_enabled;
142		/* Make sure the user can actually press Stop-A (L1-A) */
143		stop_a_enabled = 1;
144		printk(KERN_EMERG "Press Stop-A (L1-A) to return to the boot prom\n");
145	}
146#endif
147#if defined(CONFIG_S390)
148	{
149		unsigned long caller;
150
151		caller = (unsigned long)__builtin_return_address(0);
152		disabled_wait(caller);
153	}
154#endif
155	local_irq_enable();
156	for (i = 0; ; i += PANIC_TIMER_STEP) {
157		touch_softlockup_watchdog();
158		if (i >= i_next) {
159			i += panic_blink(state ^= 1);
160			i_next = i + 3600 / PANIC_BLINK_SPD;
161		}
162		mdelay(PANIC_TIMER_STEP);
163	}
164}
165
166EXPORT_SYMBOL(panic);
167
168
169struct tnt {
170	u8	bit;
171	char	true;
172	char	false;
173};
174
175static const struct tnt tnts[] = {
176	{ TAINT_PROPRIETARY_MODULE,	'P', 'G' },
177	{ TAINT_FORCED_MODULE,		'F', ' ' },
178	{ TAINT_UNSAFE_SMP,		'S', ' ' },
179	{ TAINT_FORCED_RMMOD,		'R', ' ' },
180	{ TAINT_MACHINE_CHECK,		'M', ' ' },
181	{ TAINT_BAD_PAGE,		'B', ' ' },
182	{ TAINT_USER,			'U', ' ' },
183	{ TAINT_DIE,			'D', ' ' },
184	{ TAINT_OVERRIDDEN_ACPI_TABLE,	'A', ' ' },
185	{ TAINT_WARN,			'W', ' ' },
186	{ TAINT_CRAP,			'C', ' ' },
187	{ TAINT_FIRMWARE_WORKAROUND,	'I', ' ' },
188};
189
190/**
191 *	print_tainted - return a string to represent the kernel taint state.
192 *
193 *  'P' - Proprietary module has been loaded.
194 *  'F' - Module has been forcibly loaded.
195 *  'S' - SMP with CPUs not designed for SMP.
196 *  'R' - User forced a module unload.
197 *  'M' - System experienced a machine check exception.
198 *  'B' - System has hit bad_page.
199 *  'U' - Userspace-defined naughtiness.
200 *  'D' - Kernel has oopsed before
201 *  'A' - ACPI table overridden.
202 *  'W' - Taint on warning.
203 *  'C' - modules from drivers/staging are loaded.
204 *  'I' - Working around severe firmware bug.
205 *
206 *	The string is overwritten by the next call to print_tainted().
207 */
208const char *print_tainted(void)
209{
210	static char buf[ARRAY_SIZE(tnts) + sizeof("Tainted: ") + 1];
211
212	if (tainted_mask) {
213		char *s;
214		int i;
215
216		s = buf + sprintf(buf, "Tainted: ");
217		for (i = 0; i < ARRAY_SIZE(tnts); i++) {
218			const struct tnt *t = &tnts[i];
219			*s++ = test_bit(t->bit, &tainted_mask) ?
220					t->true : t->false;
221		}
222		*s = 0;
223	} else
224		snprintf(buf, sizeof(buf), "Not tainted");
225
226	return buf;
227}
228
229int test_taint(unsigned flag)
230{
231	return test_bit(flag, &tainted_mask);
232}
233EXPORT_SYMBOL(test_taint);
234
235unsigned long get_taint(void)
236{
237	return tainted_mask;
238}
239
240void add_taint(unsigned flag)
241{
242	/*
243	 * Can't trust the integrity of the kernel anymore.
244	 * We don't call directly debug_locks_off() because the issue
245	 * is not necessarily serious enough to set oops_in_progress to 1
246	 * Also we want to keep up lockdep for staging development and
247	 * post-warning case.
248	 */
249	if (flag != TAINT_CRAP && flag != TAINT_WARN && __debug_locks_off())
250		printk(KERN_WARNING "Disabling lock debugging due to kernel taint\n");
251
252	set_bit(flag, &tainted_mask);
253}
254EXPORT_SYMBOL(add_taint);
255
256static void spin_msec(int msecs)
257{
258	int i;
259
260	for (i = 0; i < msecs; i++) {
261		touch_nmi_watchdog();
262		mdelay(1);
263	}
264}
265
266/*
267 * It just happens that oops_enter() and oops_exit() are identically
268 * implemented...
269 */
270static void do_oops_enter_exit(void)
271{
272	unsigned long flags;
273	static int spin_counter;
274
275	if (!pause_on_oops)
276		return;
277
278	spin_lock_irqsave(&pause_on_oops_lock, flags);
279	if (pause_on_oops_flag == 0) {
280		/* This CPU may now print the oops message */
281		pause_on_oops_flag = 1;
282	} else {
283		/* We need to stall this CPU */
284		if (!spin_counter) {
285			/* This CPU gets to do the counting */
286			spin_counter = pause_on_oops;
287			do {
288				spin_unlock(&pause_on_oops_lock);
289				spin_msec(MSEC_PER_SEC);
290				spin_lock(&pause_on_oops_lock);
291			} while (--spin_counter);
292			pause_on_oops_flag = 0;
293		} else {
294			/* This CPU waits for a different one */
295			while (spin_counter) {
296				spin_unlock(&pause_on_oops_lock);
297				spin_msec(1);
298				spin_lock(&pause_on_oops_lock);
299			}
300		}
301	}
302	spin_unlock_irqrestore(&pause_on_oops_lock, flags);
303}
304
305/*
306 * Return true if the calling CPU is allowed to print oops-related info.
307 * This is a bit racy..
308 */
309int oops_may_print(void)
310{
311	return pause_on_oops_flag == 0;
312}
313
314/*
315 * Called when the architecture enters its oops handler, before it prints
316 * anything.  If this is the first CPU to oops, and it's oopsing the first
317 * time then let it proceed.
318 *
319 * This is all enabled by the pause_on_oops kernel boot option.  We do all
320 * this to ensure that oopses don't scroll off the screen.  It has the
321 * side-effect of preventing later-oopsing CPUs from mucking up the display,
322 * too.
323 *
324 * It turns out that the CPU which is allowed to print ends up pausing for
325 * the right duration, whereas all the other CPUs pause for twice as long:
326 * once in oops_enter(), once in oops_exit().
327 */
328void oops_enter(void)
329{
330	tracing_off();
331	/* can't trust the integrity of the kernel anymore: */
332	debug_locks_off();
333	do_oops_enter_exit();
334#ifdef CONFIG_DUMP_PREV_OOPS_MSG
335	enable_oopsbuf(1);
336#endif
337}
338
339/*
340 * 64-bit random ID for oopses:
341 */
342static u64 oops_id;
343
344static int init_oops_id(void)
345{
346	if (!oops_id)
347		get_random_bytes(&oops_id, sizeof(oops_id));
348	else
349		oops_id++;
350
351	return 0;
352}
353late_initcall(init_oops_id);
354
355void print_oops_end_marker(void)
356{
357	init_oops_id();
358	printk(KERN_WARNING "---[ end trace %016llx ]---\n",
359		(unsigned long long)oops_id);
360}
361
362/*
363 * Called when the architecture exits its oops handler, after printing
364 * everything.
365 */
366void oops_exit(void)
367{
368	do_oops_enter_exit();
369	print_oops_end_marker();
370	kmsg_dump(KMSG_DUMP_OOPS);
371}
372
373#ifdef WANT_WARN_ON_SLOWPATH
374struct slowpath_args {
375	const char *fmt;
376	va_list args;
377};
378
379static void warn_slowpath_common(const char *file, int line, void *caller,
380				 unsigned taint, struct slowpath_args *args)
381{
382	const char *board;
383
384	printk(KERN_WARNING "------------[ cut here ]------------\n");
385	printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
386	board = dmi_get_system_info(DMI_PRODUCT_NAME);
387	if (board)
388		printk(KERN_WARNING "Hardware name: %s\n", board);
389
390	if (args)
391		vprintk(args->fmt, args->args);
392
393	print_modules();
394	dump_stack();
395	print_oops_end_marker();
396	add_taint(taint);
397}
398
399void warn_slowpath_fmt(const char *file, int line, const char *fmt, ...)
400{
401	struct slowpath_args args;
402
403	args.fmt = fmt;
404	va_start(args.args, fmt);
405	warn_slowpath_common(file, line, __builtin_return_address(0),
406			     TAINT_WARN, &args);
407	va_end(args.args);
408}
409EXPORT_SYMBOL(warn_slowpath_fmt);
410
411void warn_slowpath_fmt_taint(const char *file, int line,
412			     unsigned taint, const char *fmt, ...)
413{
414	struct slowpath_args args;
415
416	args.fmt = fmt;
417	va_start(args.args, fmt);
418	warn_slowpath_common(file, line, __builtin_return_address(0),
419			     taint, &args);
420	va_end(args.args);
421}
422EXPORT_SYMBOL(warn_slowpath_fmt_taint);
423
424void warn_slowpath_null(const char *file, int line)
425{
426	warn_slowpath_common(file, line, __builtin_return_address(0),
427			     TAINT_WARN, NULL);
428}
429EXPORT_SYMBOL(warn_slowpath_null);
430#endif
431
432#ifdef CONFIG_CC_STACKPROTECTOR
433
434/*
435 * Called when gcc's -fstack-protector feature is used, and
436 * gcc detects corruption of the on-stack canary value
437 */
438void __stack_chk_fail(void)
439{
440	panic("stack-protector: Kernel stack is corrupted in: %p\n",
441		__builtin_return_address(0));
442}
443EXPORT_SYMBOL(__stack_chk_fail);
444
445#endif
446
447core_param(panic, panic_timeout, int, 0644);
448core_param(pause_on_oops, pause_on_oops, int, 0644);
449