1/*
2 * kernel/lockdep.c
3 *
4 * Runtime locking correctness validator
5 *
6 * Started by Ingo Molnar:
7 *
8 *  Copyright (C) 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
9 *
10 * this code maps all the lock dependencies as they occur in a live kernel
11 * and will warn about the following classes of locking bugs:
12 *
13 * - lock inversion scenarios
14 * - circular lock dependencies
15 * - hardirq/softirq safe/unsafe locking bugs
16 *
17 * Bugs are reported even if the current locking scenario does not cause
18 * any deadlock at this point.
19 *
20 * I.e. if anytime in the past two locks were taken in a different order,
21 * even if it happened for another task, even if those were different
22 * locks (but of the same class as this lock), this code will detect it.
23 *
24 * Thanks to Arjan van de Ven for coming up with the initial idea of
25 * mapping lock dependencies runtime.
26 */
27#include <linux/mutex.h>
28#include <linux/sched.h>
29#include <linux/delay.h>
30#include <linux/module.h>
31#include <linux/proc_fs.h>
32#include <linux/seq_file.h>
33#include <linux/spinlock.h>
34#include <linux/kallsyms.h>
35#include <linux/interrupt.h>
36#include <linux/stacktrace.h>
37#include <linux/debug_locks.h>
38#include <linux/irqflags.h>
39#include <linux/utsname.h>
40
41#include <asm/sections.h>
42
43#include "lockdep_internals.h"
44
45/*
46 * lockdep_lock: protects the lockdep graph, the hashes and the
47 *               class/list/hash allocators.
48 *
49 * This is one of the rare exceptions where it's justified
50 * to use a raw spinlock - we really dont want the spinlock
51 * code to recurse back into the lockdep code...
52 */
53static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
54
55static int graph_lock(void)
56{
57	__raw_spin_lock(&lockdep_lock);
58	/*
59	 * Make sure that if another CPU detected a bug while
60	 * walking the graph we dont change it (while the other
61	 * CPU is busy printing out stuff with the graph lock
62	 * dropped already)
63	 */
64	if (!debug_locks) {
65		__raw_spin_unlock(&lockdep_lock);
66		return 0;
67	}
68	return 1;
69}
70
71static inline int graph_unlock(void)
72{
73	if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
74		return DEBUG_LOCKS_WARN_ON(1);
75
76	__raw_spin_unlock(&lockdep_lock);
77	return 0;
78}
79
80/*
81 * Turn lock debugging off and return with 0 if it was off already,
82 * and also release the graph lock:
83 */
84static inline int debug_locks_off_graph_unlock(void)
85{
86	int ret = debug_locks_off();
87
88	__raw_spin_unlock(&lockdep_lock);
89
90	return ret;
91}
92
93static int lockdep_initialized;
94
95unsigned long nr_list_entries;
96static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
97
98/*
99 * Allocate a lockdep entry. (assumes the graph_lock held, returns
100 * with NULL on failure)
101 */
102static struct lock_list *alloc_list_entry(void)
103{
104	if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
105		if (!debug_locks_off_graph_unlock())
106			return NULL;
107
108		printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
109		printk("turning off the locking correctness validator.\n");
110		return NULL;
111	}
112	return list_entries + nr_list_entries++;
113}
114
115/*
116 * All data structures here are protected by the global debug_lock.
117 *
118 * Mutex key structs only get allocated, once during bootup, and never
119 * get freed - this significantly simplifies the debugging code.
120 */
121unsigned long nr_lock_classes;
122static struct lock_class lock_classes[MAX_LOCKDEP_KEYS];
123
124/*
125 * We keep a global list of all lock classes. The list only grows,
126 * never shrinks. The list is only accessed with the lockdep
127 * spinlock lock held.
128 */
129LIST_HEAD(all_lock_classes);
130
131/*
132 * The lockdep classes are in a hash-table as well, for fast lookup:
133 */
134#define CLASSHASH_BITS		(MAX_LOCKDEP_KEYS_BITS - 1)
135#define CLASSHASH_SIZE		(1UL << CLASSHASH_BITS)
136#define CLASSHASH_MASK		(CLASSHASH_SIZE - 1)
137#define __classhashfn(key)	((((unsigned long)key >> CLASSHASH_BITS) + (unsigned long)key) & CLASSHASH_MASK)
138#define classhashentry(key)	(classhash_table + __classhashfn((key)))
139
140static struct list_head classhash_table[CLASSHASH_SIZE];
141
142unsigned long nr_lock_chains;
143static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
144
145/*
146 * We put the lock dependency chains into a hash-table as well, to cache
147 * their existence:
148 */
149#define CHAINHASH_BITS		(MAX_LOCKDEP_CHAINS_BITS-1)
150#define CHAINHASH_SIZE		(1UL << CHAINHASH_BITS)
151#define CHAINHASH_MASK		(CHAINHASH_SIZE - 1)
152#define __chainhashfn(chain) \
153		(((chain >> CHAINHASH_BITS) + chain) & CHAINHASH_MASK)
154#define chainhashentry(chain)	(chainhash_table + __chainhashfn((chain)))
155
156static struct list_head chainhash_table[CHAINHASH_SIZE];
157
158/*
159 * The hash key of the lock dependency chains is a hash itself too:
160 * it's a hash of all locks taken up to that lock, including that lock.
161 * It's a 64-bit hash, because it's important for the keys to be
162 * unique.
163 */
164#define iterate_chain_key(key1, key2) \
165	(((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \
166	((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \
167	(key2))
168
169void lockdep_off(void)
170{
171	current->lockdep_recursion++;
172}
173
174EXPORT_SYMBOL(lockdep_off);
175
176void lockdep_on(void)
177{
178	current->lockdep_recursion--;
179}
180
181EXPORT_SYMBOL(lockdep_on);
182
183/*
184 * Debugging switches:
185 */
186
187#define VERBOSE			0
188#define VERY_VERBOSE		0
189
190#if VERBOSE
191# define HARDIRQ_VERBOSE	1
192# define SOFTIRQ_VERBOSE	1
193#else
194# define HARDIRQ_VERBOSE	0
195# define SOFTIRQ_VERBOSE	0
196#endif
197
198#if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE
199/*
200 * Quick filtering for interesting events:
201 */
202static int class_filter(struct lock_class *class)
203{
204	/* Filter everything else. 1 would be to allow everything else */
205	return 0;
206}
207#endif
208
209static int verbose(struct lock_class *class)
210{
211#if VERBOSE
212	return class_filter(class);
213#endif
214	return 0;
215}
216
217#ifdef CONFIG_TRACE_IRQFLAGS
218
219static int hardirq_verbose(struct lock_class *class)
220{
221#if HARDIRQ_VERBOSE
222	return class_filter(class);
223#endif
224	return 0;
225}
226
227static int softirq_verbose(struct lock_class *class)
228{
229#if SOFTIRQ_VERBOSE
230	return class_filter(class);
231#endif
232	return 0;
233}
234
235#endif
236
237/*
238 * Stack-trace: tightly packed array of stack backtrace
239 * addresses. Protected by the graph_lock.
240 */
241unsigned long nr_stack_trace_entries;
242static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
243
244static int save_trace(struct stack_trace *trace)
245{
246	trace->nr_entries = 0;
247	trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries;
248	trace->entries = stack_trace + nr_stack_trace_entries;
249
250	trace->skip = 3;
251
252	save_stack_trace(trace);
253
254	trace->max_entries = trace->nr_entries;
255
256	nr_stack_trace_entries += trace->nr_entries;
257
258	if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
259		if (!debug_locks_off_graph_unlock())
260			return 0;
261
262		printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
263		printk("turning off the locking correctness validator.\n");
264		dump_stack();
265
266		return 0;
267	}
268
269	return 1;
270}
271
272unsigned int nr_hardirq_chains;
273unsigned int nr_softirq_chains;
274unsigned int nr_process_chains;
275unsigned int max_lockdep_depth;
276unsigned int max_recursion_depth;
277
278#ifdef CONFIG_DEBUG_LOCKDEP
279/*
280 * We cannot printk in early bootup code. Not even early_printk()
281 * might work. So we mark any initialization errors and printk
282 * about it later on, in lockdep_info().
283 */
284static int lockdep_init_error;
285
286/*
287 * Various lockdep statistics:
288 */
289atomic_t chain_lookup_hits;
290atomic_t chain_lookup_misses;
291atomic_t hardirqs_on_events;
292atomic_t hardirqs_off_events;
293atomic_t redundant_hardirqs_on;
294atomic_t redundant_hardirqs_off;
295atomic_t softirqs_on_events;
296atomic_t softirqs_off_events;
297atomic_t redundant_softirqs_on;
298atomic_t redundant_softirqs_off;
299atomic_t nr_unused_locks;
300atomic_t nr_cyclic_checks;
301atomic_t nr_cyclic_check_recursions;
302atomic_t nr_find_usage_forwards_checks;
303atomic_t nr_find_usage_forwards_recursions;
304atomic_t nr_find_usage_backwards_checks;
305atomic_t nr_find_usage_backwards_recursions;
306# define debug_atomic_inc(ptr)		atomic_inc(ptr)
307# define debug_atomic_dec(ptr)		atomic_dec(ptr)
308# define debug_atomic_read(ptr)		atomic_read(ptr)
309#else
310# define debug_atomic_inc(ptr)		do { } while (0)
311# define debug_atomic_dec(ptr)		do { } while (0)
312# define debug_atomic_read(ptr)		0
313#endif
314
315/*
316 * Locking printouts:
317 */
318
319static const char *usage_str[] =
320{
321	[LOCK_USED] =			"initial-use ",
322	[LOCK_USED_IN_HARDIRQ] =	"in-hardirq-W",
323	[LOCK_USED_IN_SOFTIRQ] =	"in-softirq-W",
324	[LOCK_ENABLED_SOFTIRQS] =	"softirq-on-W",
325	[LOCK_ENABLED_HARDIRQS] =	"hardirq-on-W",
326	[LOCK_USED_IN_HARDIRQ_READ] =	"in-hardirq-R",
327	[LOCK_USED_IN_SOFTIRQ_READ] =	"in-softirq-R",
328	[LOCK_ENABLED_SOFTIRQS_READ] =	"softirq-on-R",
329	[LOCK_ENABLED_HARDIRQS_READ] =	"hardirq-on-R",
330};
331
332const char * __get_key_name(struct lockdep_subclass_key *key, char *str)
333{
334	return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str);
335}
336
337void
338get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4)
339{
340	*c1 = '.', *c2 = '.', *c3 = '.', *c4 = '.';
341
342	if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
343		*c1 = '+';
344	else
345		if (class->usage_mask & LOCKF_ENABLED_HARDIRQS)
346			*c1 = '-';
347
348	if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
349		*c2 = '+';
350	else
351		if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS)
352			*c2 = '-';
353
354	if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
355		*c3 = '-';
356	if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ) {
357		*c3 = '+';
358		if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ)
359			*c3 = '?';
360	}
361
362	if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
363		*c4 = '-';
364	if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ) {
365		*c4 = '+';
366		if (class->usage_mask & LOCKF_ENABLED_SOFTIRQS_READ)
367			*c4 = '?';
368	}
369}
370
371static void print_lock_name(struct lock_class *class)
372{
373	char str[KSYM_NAME_LEN + 1], c1, c2, c3, c4;
374	const char *name;
375
376	get_usage_chars(class, &c1, &c2, &c3, &c4);
377
378	name = class->name;
379	if (!name) {
380		name = __get_key_name(class->key, str);
381		printk(" (%s", name);
382	} else {
383		printk(" (%s", name);
384		if (class->name_version > 1)
385			printk("#%d", class->name_version);
386		if (class->subclass)
387			printk("/%d", class->subclass);
388	}
389	printk("){%c%c%c%c}", c1, c2, c3, c4);
390}
391
392static void print_lockdep_cache(struct lockdep_map *lock)
393{
394	const char *name;
395	char str[KSYM_NAME_LEN + 1];
396
397	name = lock->name;
398	if (!name)
399		name = __get_key_name(lock->key->subkeys, str);
400
401	printk("%s", name);
402}
403
404static void print_lock(struct held_lock *hlock)
405{
406	print_lock_name(hlock->class);
407	printk(", at: ");
408	print_ip_sym(hlock->acquire_ip);
409}
410
411static void lockdep_print_held_locks(struct task_struct *curr)
412{
413	int i, depth = curr->lockdep_depth;
414
415	if (!depth) {
416		printk("no locks held by %s/%d.\n", curr->comm, curr->pid);
417		return;
418	}
419	printk("%d lock%s held by %s/%d:\n",
420		depth, depth > 1 ? "s" : "", curr->comm, curr->pid);
421
422	for (i = 0; i < depth; i++) {
423		printk(" #%d: ", i);
424		print_lock(curr->held_locks + i);
425	}
426}
427
428static void print_lock_class_header(struct lock_class *class, int depth)
429{
430	int bit;
431
432	printk("%*s->", depth, "");
433	print_lock_name(class);
434	printk(" ops: %lu", class->ops);
435	printk(" {\n");
436
437	for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
438		if (class->usage_mask & (1 << bit)) {
439			int len = depth;
440
441			len += printk("%*s   %s", depth, "", usage_str[bit]);
442			len += printk(" at:\n");
443			print_stack_trace(class->usage_traces + bit, len);
444		}
445	}
446	printk("%*s }\n", depth, "");
447
448	printk("%*s ... key      at: ",depth,"");
449	print_ip_sym((unsigned long)class->key);
450}
451
452/*
453 * printk all lock dependencies starting at <entry>:
454 */
455static void print_lock_dependencies(struct lock_class *class, int depth)
456{
457	struct lock_list *entry;
458
459	if (DEBUG_LOCKS_WARN_ON(depth >= 20))
460		return;
461
462	print_lock_class_header(class, depth);
463
464	list_for_each_entry(entry, &class->locks_after, entry) {
465		if (DEBUG_LOCKS_WARN_ON(!entry->class))
466			return;
467
468		print_lock_dependencies(entry->class, depth + 1);
469
470		printk("%*s ... acquired at:\n",depth,"");
471		print_stack_trace(&entry->trace, 2);
472		printk("\n");
473	}
474}
475
476/*
477 * Add a new dependency to the head of the list:
478 */
479static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
480			    struct list_head *head, unsigned long ip, int distance)
481{
482	struct lock_list *entry;
483	/*
484	 * Lock not present yet - get a new dependency struct and
485	 * add it to the list:
486	 */
487	entry = alloc_list_entry();
488	if (!entry)
489		return 0;
490
491	entry->class = this;
492	entry->distance = distance;
493	if (!save_trace(&entry->trace))
494		return 0;
495
496	/*
497	 * Since we never remove from the dependency list, the list can
498	 * be walked lockless by other CPUs, it's only allocation
499	 * that must be protected by the spinlock. But this also means
500	 * we must make new entries visible only once writes to the
501	 * entry become visible - hence the RCU op:
502	 */
503	list_add_tail_rcu(&entry->entry, head);
504
505	return 1;
506}
507
508/*
509 * Recursive, forwards-direction lock-dependency checking, used for
510 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
511 * checking.
512 *
513 * (to keep the stackframe of the recursive functions small we
514 *  use these global variables, and we also mark various helper
515 *  functions as noinline.)
516 */
517static struct held_lock *check_source, *check_target;
518
519/*
520 * Print a dependency chain entry (this is only done when a deadlock
521 * has been detected):
522 */
523static noinline int
524print_circular_bug_entry(struct lock_list *target, unsigned int depth)
525{
526	if (debug_locks_silent)
527		return 0;
528	printk("\n-> #%u", depth);
529	print_lock_name(target->class);
530	printk(":\n");
531	print_stack_trace(&target->trace, 6);
532
533	return 0;
534}
535
536static void print_kernel_version(void)
537{
538	printk("%s %.*s\n", init_utsname()->release,
539		(int)strcspn(init_utsname()->version, " "),
540		init_utsname()->version);
541}
542
543/*
544 * When a circular dependency is detected, print the
545 * header first:
546 */
547static noinline int
548print_circular_bug_header(struct lock_list *entry, unsigned int depth)
549{
550	struct task_struct *curr = current;
551
552	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
553		return 0;
554
555	printk("\n=======================================================\n");
556	printk(  "[ INFO: possible circular locking dependency detected ]\n");
557	print_kernel_version();
558	printk(  "-------------------------------------------------------\n");
559	printk("%s/%d is trying to acquire lock:\n",
560		curr->comm, curr->pid);
561	print_lock(check_source);
562	printk("\nbut task is already holding lock:\n");
563	print_lock(check_target);
564	printk("\nwhich lock already depends on the new lock.\n\n");
565	printk("\nthe existing dependency chain (in reverse order) is:\n");
566
567	print_circular_bug_entry(entry, depth);
568
569	return 0;
570}
571
572static noinline int print_circular_bug_tail(void)
573{
574	struct task_struct *curr = current;
575	struct lock_list this;
576
577	if (debug_locks_silent)
578		return 0;
579
580	this.class = check_source->class;
581	if (!save_trace(&this.trace))
582		return 0;
583
584	print_circular_bug_entry(&this, 0);
585
586	printk("\nother info that might help us debug this:\n\n");
587	lockdep_print_held_locks(curr);
588
589	printk("\nstack backtrace:\n");
590	dump_stack();
591
592	return 0;
593}
594
595#define RECURSION_LIMIT 40
596
597static int noinline print_infinite_recursion_bug(void)
598{
599	if (!debug_locks_off_graph_unlock())
600		return 0;
601
602	WARN_ON(1);
603
604	return 0;
605}
606
607/*
608 * Prove that the dependency graph starting at <entry> can not
609 * lead to <target>. Print an error and return 0 if it does.
610 */
611static noinline int
612check_noncircular(struct lock_class *source, unsigned int depth)
613{
614	struct lock_list *entry;
615
616	debug_atomic_inc(&nr_cyclic_check_recursions);
617	if (depth > max_recursion_depth)
618		max_recursion_depth = depth;
619	if (depth >= RECURSION_LIMIT)
620		return print_infinite_recursion_bug();
621	/*
622	 * Check this lock's dependency list:
623	 */
624	list_for_each_entry(entry, &source->locks_after, entry) {
625		if (entry->class == check_target->class)
626			return print_circular_bug_header(entry, depth+1);
627		debug_atomic_inc(&nr_cyclic_checks);
628		if (!check_noncircular(entry->class, depth+1))
629			return print_circular_bug_entry(entry, depth+1);
630	}
631	return 1;
632}
633
634static int very_verbose(struct lock_class *class)
635{
636#if VERY_VERBOSE
637	return class_filter(class);
638#endif
639	return 0;
640}
641#ifdef CONFIG_TRACE_IRQFLAGS
642
643/*
644 * Forwards and backwards subgraph searching, for the purposes of
645 * proving that two subgraphs can be connected by a new dependency
646 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
647 */
648static enum lock_usage_bit find_usage_bit;
649static struct lock_class *forwards_match, *backwards_match;
650
651/*
652 * Find a node in the forwards-direction dependency sub-graph starting
653 * at <source> that matches <find_usage_bit>.
654 *
655 * Return 2 if such a node exists in the subgraph, and put that node
656 * into <forwards_match>.
657 *
658 * Return 1 otherwise and keep <forwards_match> unchanged.
659 * Return 0 on error.
660 */
661static noinline int
662find_usage_forwards(struct lock_class *source, unsigned int depth)
663{
664	struct lock_list *entry;
665	int ret;
666
667	if (depth > max_recursion_depth)
668		max_recursion_depth = depth;
669	if (depth >= RECURSION_LIMIT)
670		return print_infinite_recursion_bug();
671
672	debug_atomic_inc(&nr_find_usage_forwards_checks);
673	if (source->usage_mask & (1 << find_usage_bit)) {
674		forwards_match = source;
675		return 2;
676	}
677
678	/*
679	 * Check this lock's dependency list:
680	 */
681	list_for_each_entry(entry, &source->locks_after, entry) {
682		debug_atomic_inc(&nr_find_usage_forwards_recursions);
683		ret = find_usage_forwards(entry->class, depth+1);
684		if (ret == 2 || ret == 0)
685			return ret;
686	}
687	return 1;
688}
689
690/*
691 * Find a node in the backwards-direction dependency sub-graph starting
692 * at <source> that matches <find_usage_bit>.
693 *
694 * Return 2 if such a node exists in the subgraph, and put that node
695 * into <backwards_match>.
696 *
697 * Return 1 otherwise and keep <backwards_match> unchanged.
698 * Return 0 on error.
699 */
700static noinline int
701find_usage_backwards(struct lock_class *source, unsigned int depth)
702{
703	struct lock_list *entry;
704	int ret;
705
706	if (!__raw_spin_is_locked(&lockdep_lock))
707		return DEBUG_LOCKS_WARN_ON(1);
708
709	if (depth > max_recursion_depth)
710		max_recursion_depth = depth;
711	if (depth >= RECURSION_LIMIT)
712		return print_infinite_recursion_bug();
713
714	debug_atomic_inc(&nr_find_usage_backwards_checks);
715	if (source->usage_mask & (1 << find_usage_bit)) {
716		backwards_match = source;
717		return 2;
718	}
719
720	/*
721	 * Check this lock's dependency list:
722	 */
723	list_for_each_entry(entry, &source->locks_before, entry) {
724		debug_atomic_inc(&nr_find_usage_backwards_recursions);
725		ret = find_usage_backwards(entry->class, depth+1);
726		if (ret == 2 || ret == 0)
727			return ret;
728	}
729	return 1;
730}
731
732static int
733print_bad_irq_dependency(struct task_struct *curr,
734			 struct held_lock *prev,
735			 struct held_lock *next,
736			 enum lock_usage_bit bit1,
737			 enum lock_usage_bit bit2,
738			 const char *irqclass)
739{
740	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
741		return 0;
742
743	printk("\n======================================================\n");
744	printk(  "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n",
745		irqclass, irqclass);
746	print_kernel_version();
747	printk(  "------------------------------------------------------\n");
748	printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
749		curr->comm, curr->pid,
750		curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
751		curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
752		curr->hardirqs_enabled,
753		curr->softirqs_enabled);
754	print_lock(next);
755
756	printk("\nand this task is already holding:\n");
757	print_lock(prev);
758	printk("which would create a new lock dependency:\n");
759	print_lock_name(prev->class);
760	printk(" ->");
761	print_lock_name(next->class);
762	printk("\n");
763
764	printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
765		irqclass);
766	print_lock_name(backwards_match);
767	printk("\n... which became %s-irq-safe at:\n", irqclass);
768
769	print_stack_trace(backwards_match->usage_traces + bit1, 1);
770
771	printk("\nto a %s-irq-unsafe lock:\n", irqclass);
772	print_lock_name(forwards_match);
773	printk("\n... which became %s-irq-unsafe at:\n", irqclass);
774	printk("...");
775
776	print_stack_trace(forwards_match->usage_traces + bit2, 1);
777
778	printk("\nother info that might help us debug this:\n\n");
779	lockdep_print_held_locks(curr);
780
781	printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass);
782	print_lock_dependencies(backwards_match, 0);
783
784	printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass);
785	print_lock_dependencies(forwards_match, 0);
786
787	printk("\nstack backtrace:\n");
788	dump_stack();
789
790	return 0;
791}
792
793static int
794check_usage(struct task_struct *curr, struct held_lock *prev,
795	    struct held_lock *next, enum lock_usage_bit bit_backwards,
796	    enum lock_usage_bit bit_forwards, const char *irqclass)
797{
798	int ret;
799
800	find_usage_bit = bit_backwards;
801	/* fills in <backwards_match> */
802	ret = find_usage_backwards(prev->class, 0);
803	if (!ret || ret == 1)
804		return ret;
805
806	find_usage_bit = bit_forwards;
807	ret = find_usage_forwards(next->class, 0);
808	if (!ret || ret == 1)
809		return ret;
810	/* ret == 2 */
811	return print_bad_irq_dependency(curr, prev, next,
812			bit_backwards, bit_forwards, irqclass);
813}
814
815#endif
816
817static int
818print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
819		   struct held_lock *next)
820{
821	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
822		return 0;
823
824	printk("\n=============================================\n");
825	printk(  "[ INFO: possible recursive locking detected ]\n");
826	print_kernel_version();
827	printk(  "---------------------------------------------\n");
828	printk("%s/%d is trying to acquire lock:\n",
829		curr->comm, curr->pid);
830	print_lock(next);
831	printk("\nbut task is already holding lock:\n");
832	print_lock(prev);
833
834	printk("\nother info that might help us debug this:\n");
835	lockdep_print_held_locks(curr);
836
837	printk("\nstack backtrace:\n");
838	dump_stack();
839
840	return 0;
841}
842
843/*
844 * Check whether we are holding such a class already.
845 *
846 * (Note that this has to be done separately, because the graph cannot
847 * detect such classes of deadlocks.)
848 *
849 * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read
850 */
851static int
852check_deadlock(struct task_struct *curr, struct held_lock *next,
853	       struct lockdep_map *next_instance, int read)
854{
855	struct held_lock *prev;
856	int i;
857
858	for (i = 0; i < curr->lockdep_depth; i++) {
859		prev = curr->held_locks + i;
860		if (prev->class != next->class)
861			continue;
862		/*
863		 * Allow read-after-read recursion of the same
864		 * lock class (i.e. read_lock(lock)+read_lock(lock)):
865		 */
866		if ((read == 2) && prev->read)
867			return 2;
868		return print_deadlock_bug(curr, prev, next);
869	}
870	return 1;
871}
872
873/*
874 * There was a chain-cache miss, and we are about to add a new dependency
875 * to a previous lock. We recursively validate the following rules:
876 *
877 *  - would the adding of the <prev> -> <next> dependency create a
878 *    circular dependency in the graph? [== circular deadlock]
879 *
880 *  - does the new prev->next dependency connect any hardirq-safe lock
881 *    (in the full backwards-subgraph starting at <prev>) with any
882 *    hardirq-unsafe lock (in the full forwards-subgraph starting at
883 *    <next>)? [== illegal lock inversion with hardirq contexts]
884 *
885 *  - does the new prev->next dependency connect any softirq-safe lock
886 *    (in the full backwards-subgraph starting at <prev>) with any
887 *    softirq-unsafe lock (in the full forwards-subgraph starting at
888 *    <next>)? [== illegal lock inversion with softirq contexts]
889 *
890 * any of these scenarios could lead to a deadlock.
891 *
892 * Then if all the validations pass, we add the forwards and backwards
893 * dependency.
894 */
895static int
896check_prev_add(struct task_struct *curr, struct held_lock *prev,
897	       struct held_lock *next, int distance)
898{
899	struct lock_list *entry;
900	int ret;
901
902	/*
903	 * Prove that the new <prev> -> <next> dependency would not
904	 * create a circular dependency in the graph. (We do this by
905	 * forward-recursing into the graph starting at <next>, and
906	 * checking whether we can reach <prev>.)
907	 *
908	 * We are using global variables to control the recursion, to
909	 * keep the stackframe size of the recursive functions low:
910	 */
911	check_source = next;
912	check_target = prev;
913	if (!(check_noncircular(next->class, 0)))
914		return print_circular_bug_tail();
915
916#ifdef CONFIG_TRACE_IRQFLAGS
917	/*
918	 * Prove that the new dependency does not connect a hardirq-safe
919	 * lock with a hardirq-unsafe lock - to achieve this we search
920	 * the backwards-subgraph starting at <prev>, and the
921	 * forwards-subgraph starting at <next>:
922	 */
923	if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
924					LOCK_ENABLED_HARDIRQS, "hard"))
925		return 0;
926
927	/*
928	 * Prove that the new dependency does not connect a hardirq-safe-read
929	 * lock with a hardirq-unsafe lock - to achieve this we search
930	 * the backwards-subgraph starting at <prev>, and the
931	 * forwards-subgraph starting at <next>:
932	 */
933	if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
934					LOCK_ENABLED_HARDIRQS, "hard-read"))
935		return 0;
936
937	/*
938	 * Prove that the new dependency does not connect a softirq-safe
939	 * lock with a softirq-unsafe lock - to achieve this we search
940	 * the backwards-subgraph starting at <prev>, and the
941	 * forwards-subgraph starting at <next>:
942	 */
943	if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
944					LOCK_ENABLED_SOFTIRQS, "soft"))
945		return 0;
946	/*
947	 * Prove that the new dependency does not connect a softirq-safe-read
948	 * lock with a softirq-unsafe lock - to achieve this we search
949	 * the backwards-subgraph starting at <prev>, and the
950	 * forwards-subgraph starting at <next>:
951	 */
952	if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
953					LOCK_ENABLED_SOFTIRQS, "soft"))
954		return 0;
955#endif
956	/*
957	 * For recursive read-locks we do all the dependency checks,
958	 * but we dont store read-triggered dependencies (only
959	 * write-triggered dependencies). This ensures that only the
960	 * write-side dependencies matter, and that if for example a
961	 * write-lock never takes any other locks, then the reads are
962	 * equivalent to a NOP.
963	 */
964	if (next->read == 2 || prev->read == 2)
965		return 1;
966	/*
967	 * Is the <prev> -> <next> dependency already present?
968	 *
969	 * (this may occur even though this is a new chain: consider
970	 *  e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3
971	 *  chains - the second one will be new, but L1 already has
972	 *  L2 added to its dependency list, due to the first chain.)
973	 */
974	list_for_each_entry(entry, &prev->class->locks_after, entry) {
975		if (entry->class == next->class) {
976			if (distance == 1)
977				entry->distance = 1;
978			return 2;
979		}
980	}
981
982	/*
983	 * Ok, all validations passed, add the new lock
984	 * to the previous lock's dependency list:
985	 */
986	ret = add_lock_to_list(prev->class, next->class,
987			       &prev->class->locks_after, next->acquire_ip, distance);
988
989	if (!ret)
990		return 0;
991
992	ret = add_lock_to_list(next->class, prev->class,
993			       &next->class->locks_before, next->acquire_ip, distance);
994	if (!ret)
995		return 0;
996
997	/*
998	 * Debugging printouts:
999	 */
1000	if (verbose(prev->class) || verbose(next->class)) {
1001		graph_unlock();
1002		printk("\n new dependency: ");
1003		print_lock_name(prev->class);
1004		printk(" => ");
1005		print_lock_name(next->class);
1006		printk("\n");
1007		dump_stack();
1008		return graph_lock();
1009	}
1010	return 1;
1011}
1012
1013/*
1014 * Add the dependency to all directly-previous locks that are 'relevant'.
1015 * The ones that are relevant are (in increasing distance from curr):
1016 * all consecutive trylock entries and the final non-trylock entry - or
1017 * the end of this context's lock-chain - whichever comes first.
1018 */
1019static int
1020check_prevs_add(struct task_struct *curr, struct held_lock *next)
1021{
1022	int depth = curr->lockdep_depth;
1023	struct held_lock *hlock;
1024
1025	/*
1026	 * Debugging checks.
1027	 *
1028	 * Depth must not be zero for a non-head lock:
1029	 */
1030	if (!depth)
1031		goto out_bug;
1032	/*
1033	 * At least two relevant locks must exist for this
1034	 * to be a head:
1035	 */
1036	if (curr->held_locks[depth].irq_context !=
1037			curr->held_locks[depth-1].irq_context)
1038		goto out_bug;
1039
1040	for (;;) {
1041		int distance = curr->lockdep_depth - depth + 1;
1042		hlock = curr->held_locks + depth-1;
1043		/*
1044		 * Only non-recursive-read entries get new dependencies
1045		 * added:
1046		 */
1047		if (hlock->read != 2) {
1048			if (!check_prev_add(curr, hlock, next, distance))
1049				return 0;
1050			/*
1051			 * Stop after the first non-trylock entry,
1052			 * as non-trylock entries have added their
1053			 * own direct dependencies already, so this
1054			 * lock is connected to them indirectly:
1055			 */
1056			if (!hlock->trylock)
1057				break;
1058		}
1059		depth--;
1060		/*
1061		 * End of lock-stack?
1062		 */
1063		if (!depth)
1064			break;
1065		/*
1066		 * Stop the search if we cross into another context:
1067		 */
1068		if (curr->held_locks[depth].irq_context !=
1069				curr->held_locks[depth-1].irq_context)
1070			break;
1071	}
1072	return 1;
1073out_bug:
1074	if (!debug_locks_off_graph_unlock())
1075		return 0;
1076
1077	WARN_ON(1);
1078
1079	return 0;
1080}
1081
1082
1083/*
1084 * Is this the address of a static object:
1085 */
1086static int static_obj(void *obj)
1087{
1088	unsigned long start = (unsigned long) &_stext,
1089		      end   = (unsigned long) &_end,
1090		      addr  = (unsigned long) obj;
1091#ifdef CONFIG_SMP
1092	int i;
1093#endif
1094
1095	/*
1096	 * static variable?
1097	 */
1098	if ((addr >= start) && (addr < end))
1099		return 1;
1100
1101#ifdef CONFIG_SMP
1102	/*
1103	 * percpu var?
1104	 */
1105	for_each_possible_cpu(i) {
1106		start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
1107		end   = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
1108					+ per_cpu_offset(i);
1109
1110		if ((addr >= start) && (addr < end))
1111			return 1;
1112	}
1113#endif
1114
1115	/*
1116	 * module var?
1117	 */
1118	return is_module_address(addr);
1119}
1120
1121/*
1122 * To make lock name printouts unique, we calculate a unique
1123 * class->name_version generation counter:
1124 */
1125static int count_matching_names(struct lock_class *new_class)
1126{
1127	struct lock_class *class;
1128	int count = 0;
1129
1130	if (!new_class->name)
1131		return 0;
1132
1133	list_for_each_entry(class, &all_lock_classes, lock_entry) {
1134		if (new_class->key - new_class->subclass == class->key)
1135			return class->name_version;
1136		if (class->name && !strcmp(class->name, new_class->name))
1137			count = max(count, class->name_version);
1138	}
1139
1140	return count + 1;
1141}
1142
1143/*
1144 * Register a lock's class in the hash-table, if the class is not present
1145 * yet. Otherwise we look it up. We cache the result in the lock object
1146 * itself, so actual lookup of the hash should be once per lock object.
1147 */
1148static inline struct lock_class *
1149look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
1150{
1151	struct lockdep_subclass_key *key;
1152	struct list_head *hash_head;
1153	struct lock_class *class;
1154
1155#ifdef CONFIG_DEBUG_LOCKDEP
1156	/*
1157	 * If the architecture calls into lockdep before initializing
1158	 * the hashes then we'll warn about it later. (we cannot printk
1159	 * right now)
1160	 */
1161	if (unlikely(!lockdep_initialized)) {
1162		lockdep_init();
1163		lockdep_init_error = 1;
1164	}
1165#endif
1166
1167	/*
1168	 * Static locks do not have their class-keys yet - for them the key
1169	 * is the lock object itself:
1170	 */
1171	if (unlikely(!lock->key))
1172		lock->key = (void *)lock;
1173
1174	/*
1175	 * NOTE: the class-key must be unique. For dynamic locks, a static
1176	 * lock_class_key variable is passed in through the mutex_init()
1177	 * (or spin_lock_init()) call - which acts as the key. For static
1178	 * locks we use the lock object itself as the key.
1179	 */
1180	BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lock_class));
1181
1182	key = lock->key->subkeys + subclass;
1183
1184	hash_head = classhashentry(key);
1185
1186	/*
1187	 * We can walk the hash lockfree, because the hash only
1188	 * grows, and we are careful when adding entries to the end:
1189	 */
1190	list_for_each_entry(class, hash_head, hash_entry)
1191		if (class->key == key)
1192			return class;
1193
1194	return NULL;
1195}
1196
1197/*
1198 * Register a lock's class in the hash-table, if the class is not present
1199 * yet. Otherwise we look it up. We cache the result in the lock object
1200 * itself, so actual lookup of the hash should be once per lock object.
1201 */
1202static inline struct lock_class *
1203register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1204{
1205	struct lockdep_subclass_key *key;
1206	struct list_head *hash_head;
1207	struct lock_class *class;
1208	unsigned long flags;
1209
1210	class = look_up_lock_class(lock, subclass);
1211	if (likely(class))
1212		return class;
1213
1214	/*
1215	 * Debug-check: all keys must be persistent!
1216 	 */
1217	if (!static_obj(lock->key)) {
1218		debug_locks_off();
1219		printk("INFO: trying to register non-static key.\n");
1220		printk("the code is fine but needs lockdep annotation.\n");
1221		printk("turning off the locking correctness validator.\n");
1222		dump_stack();
1223
1224		return NULL;
1225	}
1226
1227	key = lock->key->subkeys + subclass;
1228	hash_head = classhashentry(key);
1229
1230	raw_local_irq_save(flags);
1231	if (!graph_lock()) {
1232		raw_local_irq_restore(flags);
1233		return NULL;
1234	}
1235	/*
1236	 * We have to do the hash-walk again, to avoid races
1237	 * with another CPU:
1238	 */
1239	list_for_each_entry(class, hash_head, hash_entry)
1240		if (class->key == key)
1241			goto out_unlock_set;
1242	/*
1243	 * Allocate a new key from the static array, and add it to
1244	 * the hash:
1245	 */
1246	if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
1247		if (!debug_locks_off_graph_unlock()) {
1248			raw_local_irq_restore(flags);
1249			return NULL;
1250		}
1251		raw_local_irq_restore(flags);
1252
1253		printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
1254		printk("turning off the locking correctness validator.\n");
1255		return NULL;
1256	}
1257	class = lock_classes + nr_lock_classes++;
1258	debug_atomic_inc(&nr_unused_locks);
1259	class->key = key;
1260	class->name = lock->name;
1261	class->subclass = subclass;
1262	INIT_LIST_HEAD(&class->lock_entry);
1263	INIT_LIST_HEAD(&class->locks_before);
1264	INIT_LIST_HEAD(&class->locks_after);
1265	class->name_version = count_matching_names(class);
1266	/*
1267	 * We use RCU's safe list-add method to make
1268	 * parallel walking of the hash-list safe:
1269	 */
1270	list_add_tail_rcu(&class->hash_entry, hash_head);
1271
1272	if (verbose(class)) {
1273		graph_unlock();
1274		raw_local_irq_restore(flags);
1275
1276		printk("\nnew class %p: %s", class->key, class->name);
1277		if (class->name_version > 1)
1278			printk("#%d", class->name_version);
1279		printk("\n");
1280		dump_stack();
1281
1282		raw_local_irq_save(flags);
1283		if (!graph_lock()) {
1284			raw_local_irq_restore(flags);
1285			return NULL;
1286		}
1287	}
1288out_unlock_set:
1289	graph_unlock();
1290	raw_local_irq_restore(flags);
1291
1292	if (!subclass || force)
1293		lock->class_cache = class;
1294
1295	if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
1296		return NULL;
1297
1298	return class;
1299}
1300
1301/*
1302 * Look up a dependency chain. If the key is not present yet then
1303 * add it and return 1 - in this case the new dependency chain is
1304 * validated. If the key is already hashed, return 0.
1305 * (On return with 1 graph_lock is held.)
1306 */
1307static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class)
1308{
1309	struct list_head *hash_head = chainhashentry(chain_key);
1310	struct lock_chain *chain;
1311
1312	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1313		return 0;
1314	/*
1315	 * We can walk it lock-free, because entries only get added
1316	 * to the hash:
1317	 */
1318	list_for_each_entry(chain, hash_head, entry) {
1319		if (chain->chain_key == chain_key) {
1320cache_hit:
1321			debug_atomic_inc(&chain_lookup_hits);
1322			if (very_verbose(class))
1323				printk("\nhash chain already cached, key: "
1324					"%016Lx tail class: [%p] %s\n",
1325					(unsigned long long)chain_key,
1326					class->key, class->name);
1327			return 0;
1328		}
1329	}
1330	if (very_verbose(class))
1331		printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n",
1332			(unsigned long long)chain_key, class->key, class->name);
1333	/*
1334	 * Allocate a new chain entry from the static array, and add
1335	 * it to the hash:
1336	 */
1337	if (!graph_lock())
1338		return 0;
1339	/*
1340	 * We have to walk the chain again locked - to avoid duplicates:
1341	 */
1342	list_for_each_entry(chain, hash_head, entry) {
1343		if (chain->chain_key == chain_key) {
1344			graph_unlock();
1345			goto cache_hit;
1346		}
1347	}
1348	if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
1349		if (!debug_locks_off_graph_unlock())
1350			return 0;
1351
1352		printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
1353		printk("turning off the locking correctness validator.\n");
1354		return 0;
1355	}
1356	chain = lock_chains + nr_lock_chains++;
1357	chain->chain_key = chain_key;
1358	list_add_tail_rcu(&chain->entry, hash_head);
1359	debug_atomic_inc(&chain_lookup_misses);
1360#ifdef CONFIG_TRACE_IRQFLAGS
1361	if (current->hardirq_context)
1362		nr_hardirq_chains++;
1363	else {
1364		if (current->softirq_context)
1365			nr_softirq_chains++;
1366		else
1367			nr_process_chains++;
1368	}
1369#else
1370	nr_process_chains++;
1371#endif
1372
1373	return 1;
1374}
1375
1376/*
1377 * We are building curr_chain_key incrementally, so double-check
1378 * it from scratch, to make sure that it's done correctly:
1379 */
1380static void check_chain_key(struct task_struct *curr)
1381{
1382#ifdef CONFIG_DEBUG_LOCKDEP
1383	struct held_lock *hlock, *prev_hlock = NULL;
1384	unsigned int i, id;
1385	u64 chain_key = 0;
1386
1387	for (i = 0; i < curr->lockdep_depth; i++) {
1388		hlock = curr->held_locks + i;
1389		if (chain_key != hlock->prev_chain_key) {
1390			debug_locks_off();
1391			printk("hm#1, depth: %u [%u], %016Lx != %016Lx\n",
1392				curr->lockdep_depth, i,
1393				(unsigned long long)chain_key,
1394				(unsigned long long)hlock->prev_chain_key);
1395			WARN_ON(1);
1396			return;
1397		}
1398		id = hlock->class - lock_classes;
1399		if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
1400			return;
1401
1402		if (prev_hlock && (prev_hlock->irq_context !=
1403							hlock->irq_context))
1404			chain_key = 0;
1405		chain_key = iterate_chain_key(chain_key, id);
1406		prev_hlock = hlock;
1407	}
1408	if (chain_key != curr->curr_chain_key) {
1409		debug_locks_off();
1410		printk("hm#2, depth: %u [%u], %016Lx != %016Lx\n",
1411			curr->lockdep_depth, i,
1412			(unsigned long long)chain_key,
1413			(unsigned long long)curr->curr_chain_key);
1414		WARN_ON(1);
1415	}
1416#endif
1417}
1418
1419#ifdef CONFIG_TRACE_IRQFLAGS
1420
1421/*
1422 * print irq inversion bug:
1423 */
1424static int
1425print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1426			struct held_lock *this, int forwards,
1427			const char *irqclass)
1428{
1429	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1430		return 0;
1431
1432	printk("\n=========================================================\n");
1433	printk(  "[ INFO: possible irq lock inversion dependency detected ]\n");
1434	print_kernel_version();
1435	printk(  "---------------------------------------------------------\n");
1436	printk("%s/%d just changed the state of lock:\n",
1437		curr->comm, curr->pid);
1438	print_lock(this);
1439	if (forwards)
1440		printk("but this lock took another, %s-irq-unsafe lock in the past:\n", irqclass);
1441	else
1442		printk("but this lock was taken by another, %s-irq-safe lock in the past:\n", irqclass);
1443	print_lock_name(other);
1444	printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
1445
1446	printk("\nother info that might help us debug this:\n");
1447	lockdep_print_held_locks(curr);
1448
1449	printk("\nthe first lock's dependencies:\n");
1450	print_lock_dependencies(this->class, 0);
1451
1452	printk("\nthe second lock's dependencies:\n");
1453	print_lock_dependencies(other, 0);
1454
1455	printk("\nstack backtrace:\n");
1456	dump_stack();
1457
1458	return 0;
1459}
1460
1461/*
1462 * Prove that in the forwards-direction subgraph starting at <this>
1463 * there is no lock matching <mask>:
1464 */
1465static int
1466check_usage_forwards(struct task_struct *curr, struct held_lock *this,
1467		     enum lock_usage_bit bit, const char *irqclass)
1468{
1469	int ret;
1470
1471	find_usage_bit = bit;
1472	/* fills in <forwards_match> */
1473	ret = find_usage_forwards(this->class, 0);
1474	if (!ret || ret == 1)
1475		return ret;
1476
1477	return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass);
1478}
1479
1480/*
1481 * Prove that in the backwards-direction subgraph starting at <this>
1482 * there is no lock matching <mask>:
1483 */
1484static int
1485check_usage_backwards(struct task_struct *curr, struct held_lock *this,
1486		      enum lock_usage_bit bit, const char *irqclass)
1487{
1488	int ret;
1489
1490	find_usage_bit = bit;
1491	/* fills in <backwards_match> */
1492	ret = find_usage_backwards(this->class, 0);
1493	if (!ret || ret == 1)
1494		return ret;
1495
1496	return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass);
1497}
1498
1499void print_irqtrace_events(struct task_struct *curr)
1500{
1501	printk("irq event stamp: %u\n", curr->irq_events);
1502	printk("hardirqs last  enabled at (%u): ", curr->hardirq_enable_event);
1503	print_ip_sym(curr->hardirq_enable_ip);
1504	printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
1505	print_ip_sym(curr->hardirq_disable_ip);
1506	printk("softirqs last  enabled at (%u): ", curr->softirq_enable_event);
1507	print_ip_sym(curr->softirq_enable_ip);
1508	printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
1509	print_ip_sym(curr->softirq_disable_ip);
1510}
1511
1512#endif
1513
1514static int
1515print_usage_bug(struct task_struct *curr, struct held_lock *this,
1516		enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
1517{
1518	if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1519		return 0;
1520
1521	printk("\n=================================\n");
1522	printk(  "[ INFO: inconsistent lock state ]\n");
1523	print_kernel_version();
1524	printk(  "---------------------------------\n");
1525
1526	printk("inconsistent {%s} -> {%s} usage.\n",
1527		usage_str[prev_bit], usage_str[new_bit]);
1528
1529	printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
1530		curr->comm, curr->pid,
1531		trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
1532		trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
1533		trace_hardirqs_enabled(curr),
1534		trace_softirqs_enabled(curr));
1535	print_lock(this);
1536
1537	printk("{%s} state was registered at:\n", usage_str[prev_bit]);
1538	print_stack_trace(this->class->usage_traces + prev_bit, 1);
1539
1540	print_irqtrace_events(curr);
1541	printk("\nother info that might help us debug this:\n");
1542	lockdep_print_held_locks(curr);
1543
1544	printk("\nstack backtrace:\n");
1545	dump_stack();
1546
1547	return 0;
1548}
1549
1550/*
1551 * Print out an error if an invalid bit is set:
1552 */
1553static inline int
1554valid_state(struct task_struct *curr, struct held_lock *this,
1555	    enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
1556{
1557	if (unlikely(this->class->usage_mask & (1 << bad_bit)))
1558		return print_usage_bug(curr, this, bad_bit, new_bit);
1559	return 1;
1560}
1561
1562#define STRICT_READ_CHECKS	1
1563
1564/*
1565 * Mark a lock with a usage bit, and validate the state transition:
1566 */
1567static int mark_lock(struct task_struct *curr, struct held_lock *this,
1568		     enum lock_usage_bit new_bit)
1569{
1570	unsigned int new_mask = 1 << new_bit, ret = 1;
1571
1572	/*
1573	 * If already set then do not dirty the cacheline,
1574	 * nor do any checks:
1575	 */
1576	if (likely(this->class->usage_mask & new_mask))
1577		return 1;
1578
1579	if (!graph_lock())
1580		return 0;
1581	/*
1582	 * Make sure we didnt race:
1583	 */
1584	if (unlikely(this->class->usage_mask & new_mask)) {
1585		graph_unlock();
1586		return 1;
1587	}
1588
1589	this->class->usage_mask |= new_mask;
1590
1591	if (!save_trace(this->class->usage_traces + new_bit))
1592		return 0;
1593
1594	switch (new_bit) {
1595#ifdef CONFIG_TRACE_IRQFLAGS
1596	case LOCK_USED_IN_HARDIRQ:
1597		if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
1598			return 0;
1599		if (!valid_state(curr, this, new_bit,
1600				 LOCK_ENABLED_HARDIRQS_READ))
1601			return 0;
1602		/*
1603		 * just marked it hardirq-safe, check that this lock
1604		 * took no hardirq-unsafe lock in the past:
1605		 */
1606		if (!check_usage_forwards(curr, this,
1607					  LOCK_ENABLED_HARDIRQS, "hard"))
1608			return 0;
1609#if STRICT_READ_CHECKS
1610		/*
1611		 * just marked it hardirq-safe, check that this lock
1612		 * took no hardirq-unsafe-read lock in the past:
1613		 */
1614		if (!check_usage_forwards(curr, this,
1615				LOCK_ENABLED_HARDIRQS_READ, "hard-read"))
1616			return 0;
1617#endif
1618		if (hardirq_verbose(this->class))
1619			ret = 2;
1620		break;
1621	case LOCK_USED_IN_SOFTIRQ:
1622		if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
1623			return 0;
1624		if (!valid_state(curr, this, new_bit,
1625				 LOCK_ENABLED_SOFTIRQS_READ))
1626			return 0;
1627		/*
1628		 * just marked it softirq-safe, check that this lock
1629		 * took no softirq-unsafe lock in the past:
1630		 */
1631		if (!check_usage_forwards(curr, this,
1632					  LOCK_ENABLED_SOFTIRQS, "soft"))
1633			return 0;
1634#if STRICT_READ_CHECKS
1635		/*
1636		 * just marked it softirq-safe, check that this lock
1637		 * took no softirq-unsafe-read lock in the past:
1638		 */
1639		if (!check_usage_forwards(curr, this,
1640				LOCK_ENABLED_SOFTIRQS_READ, "soft-read"))
1641			return 0;
1642#endif
1643		if (softirq_verbose(this->class))
1644			ret = 2;
1645		break;
1646	case LOCK_USED_IN_HARDIRQ_READ:
1647		if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
1648			return 0;
1649		/*
1650		 * just marked it hardirq-read-safe, check that this lock
1651		 * took no hardirq-unsafe lock in the past:
1652		 */
1653		if (!check_usage_forwards(curr, this,
1654					  LOCK_ENABLED_HARDIRQS, "hard"))
1655			return 0;
1656		if (hardirq_verbose(this->class))
1657			ret = 2;
1658		break;
1659	case LOCK_USED_IN_SOFTIRQ_READ:
1660		if (!valid_state(curr, this, new_bit, LOCK_ENABLED_SOFTIRQS))
1661			return 0;
1662		/*
1663		 * just marked it softirq-read-safe, check that this lock
1664		 * took no softirq-unsafe lock in the past:
1665		 */
1666		if (!check_usage_forwards(curr, this,
1667					  LOCK_ENABLED_SOFTIRQS, "soft"))
1668			return 0;
1669		if (softirq_verbose(this->class))
1670			ret = 2;
1671		break;
1672	case LOCK_ENABLED_HARDIRQS:
1673		if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
1674			return 0;
1675		if (!valid_state(curr, this, new_bit,
1676				 LOCK_USED_IN_HARDIRQ_READ))
1677			return 0;
1678		/*
1679		 * just marked it hardirq-unsafe, check that no hardirq-safe
1680		 * lock in the system ever took it in the past:
1681		 */
1682		if (!check_usage_backwards(curr, this,
1683					   LOCK_USED_IN_HARDIRQ, "hard"))
1684			return 0;
1685#if STRICT_READ_CHECKS
1686		/*
1687		 * just marked it hardirq-unsafe, check that no
1688		 * hardirq-safe-read lock in the system ever took
1689		 * it in the past:
1690		 */
1691		if (!check_usage_backwards(curr, this,
1692				   LOCK_USED_IN_HARDIRQ_READ, "hard-read"))
1693			return 0;
1694#endif
1695		if (hardirq_verbose(this->class))
1696			ret = 2;
1697		break;
1698	case LOCK_ENABLED_SOFTIRQS:
1699		if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
1700			return 0;
1701		if (!valid_state(curr, this, new_bit,
1702				 LOCK_USED_IN_SOFTIRQ_READ))
1703			return 0;
1704		/*
1705		 * just marked it softirq-unsafe, check that no softirq-safe
1706		 * lock in the system ever took it in the past:
1707		 */
1708		if (!check_usage_backwards(curr, this,
1709					   LOCK_USED_IN_SOFTIRQ, "soft"))
1710			return 0;
1711#if STRICT_READ_CHECKS
1712		/*
1713		 * just marked it softirq-unsafe, check that no
1714		 * softirq-safe-read lock in the system ever took
1715		 * it in the past:
1716		 */
1717		if (!check_usage_backwards(curr, this,
1718				   LOCK_USED_IN_SOFTIRQ_READ, "soft-read"))
1719			return 0;
1720#endif
1721		if (softirq_verbose(this->class))
1722			ret = 2;
1723		break;
1724	case LOCK_ENABLED_HARDIRQS_READ:
1725		if (!valid_state(curr, this, new_bit, LOCK_USED_IN_HARDIRQ))
1726			return 0;
1727#if STRICT_READ_CHECKS
1728		/*
1729		 * just marked it hardirq-read-unsafe, check that no
1730		 * hardirq-safe lock in the system ever took it in the past:
1731		 */
1732		if (!check_usage_backwards(curr, this,
1733					   LOCK_USED_IN_HARDIRQ, "hard"))
1734			return 0;
1735#endif
1736		if (hardirq_verbose(this->class))
1737			ret = 2;
1738		break;
1739	case LOCK_ENABLED_SOFTIRQS_READ:
1740		if (!valid_state(curr, this, new_bit, LOCK_USED_IN_SOFTIRQ))
1741			return 0;
1742#if STRICT_READ_CHECKS
1743		/*
1744		 * just marked it softirq-read-unsafe, check that no
1745		 * softirq-safe lock in the system ever took it in the past:
1746		 */
1747		if (!check_usage_backwards(curr, this,
1748					   LOCK_USED_IN_SOFTIRQ, "soft"))
1749			return 0;
1750#endif
1751		if (softirq_verbose(this->class))
1752			ret = 2;
1753		break;
1754#endif
1755	case LOCK_USED:
1756		/*
1757		 * Add it to the global list of classes:
1758		 */
1759		list_add_tail_rcu(&this->class->lock_entry, &all_lock_classes);
1760		debug_atomic_dec(&nr_unused_locks);
1761		break;
1762	default:
1763		if (!debug_locks_off_graph_unlock())
1764			return 0;
1765		WARN_ON(1);
1766		return 0;
1767	}
1768
1769	graph_unlock();
1770
1771	/*
1772	 * We must printk outside of the graph_lock:
1773	 */
1774	if (ret == 2) {
1775		printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
1776		print_lock(this);
1777		print_irqtrace_events(curr);
1778		dump_stack();
1779	}
1780
1781	return ret;
1782}
1783
1784#ifdef CONFIG_TRACE_IRQFLAGS
1785/*
1786 * Mark all held locks with a usage bit:
1787 */
1788static int
1789mark_held_locks(struct task_struct *curr, int hardirq)
1790{
1791	enum lock_usage_bit usage_bit;
1792	struct held_lock *hlock;
1793	int i;
1794
1795	for (i = 0; i < curr->lockdep_depth; i++) {
1796		hlock = curr->held_locks + i;
1797
1798		if (hardirq) {
1799			if (hlock->read)
1800				usage_bit = LOCK_ENABLED_HARDIRQS_READ;
1801			else
1802				usage_bit = LOCK_ENABLED_HARDIRQS;
1803		} else {
1804			if (hlock->read)
1805				usage_bit = LOCK_ENABLED_SOFTIRQS_READ;
1806			else
1807				usage_bit = LOCK_ENABLED_SOFTIRQS;
1808		}
1809		if (!mark_lock(curr, hlock, usage_bit))
1810			return 0;
1811	}
1812
1813	return 1;
1814}
1815
1816/*
1817 * Debugging helper: via this flag we know that we are in
1818 * 'early bootup code', and will warn about any invalid irqs-on event:
1819 */
1820static int early_boot_irqs_enabled;
1821
1822void early_boot_irqs_off(void)
1823{
1824	early_boot_irqs_enabled = 0;
1825}
1826
1827void early_boot_irqs_on(void)
1828{
1829	early_boot_irqs_enabled = 1;
1830}
1831
1832/*
1833 * Hardirqs will be enabled:
1834 */
1835void trace_hardirqs_on(void)
1836{
1837	struct task_struct *curr = current;
1838	unsigned long ip;
1839
1840	if (unlikely(!debug_locks || current->lockdep_recursion))
1841		return;
1842
1843	if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled)))
1844		return;
1845
1846	if (unlikely(curr->hardirqs_enabled)) {
1847		debug_atomic_inc(&redundant_hardirqs_on);
1848		return;
1849	}
1850	/* we'll do an OFF -> ON transition: */
1851	curr->hardirqs_enabled = 1;
1852	ip = (unsigned long) __builtin_return_address(0);
1853
1854	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1855		return;
1856	if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
1857		return;
1858	/*
1859	 * We are going to turn hardirqs on, so set the
1860	 * usage bit for all held locks:
1861	 */
1862	if (!mark_held_locks(curr, 1))
1863		return;
1864	/*
1865	 * If we have softirqs enabled, then set the usage
1866	 * bit for all held locks. (disabled hardirqs prevented
1867	 * this bit from being set before)
1868	 */
1869	if (curr->softirqs_enabled)
1870		if (!mark_held_locks(curr, 0))
1871			return;
1872
1873	curr->hardirq_enable_ip = ip;
1874	curr->hardirq_enable_event = ++curr->irq_events;
1875	debug_atomic_inc(&hardirqs_on_events);
1876}
1877
1878EXPORT_SYMBOL(trace_hardirqs_on);
1879
1880/*
1881 * Hardirqs were disabled:
1882 */
1883void trace_hardirqs_off(void)
1884{
1885	struct task_struct *curr = current;
1886
1887	if (unlikely(!debug_locks || current->lockdep_recursion))
1888		return;
1889
1890	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1891		return;
1892
1893	if (curr->hardirqs_enabled) {
1894		/*
1895		 * We have done an ON -> OFF transition:
1896		 */
1897		curr->hardirqs_enabled = 0;
1898		curr->hardirq_disable_ip = _RET_IP_;
1899		curr->hardirq_disable_event = ++curr->irq_events;
1900		debug_atomic_inc(&hardirqs_off_events);
1901	} else
1902		debug_atomic_inc(&redundant_hardirqs_off);
1903}
1904
1905EXPORT_SYMBOL(trace_hardirqs_off);
1906
1907/*
1908 * Softirqs will be enabled:
1909 */
1910void trace_softirqs_on(unsigned long ip)
1911{
1912	struct task_struct *curr = current;
1913
1914	if (unlikely(!debug_locks))
1915		return;
1916
1917	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1918		return;
1919
1920	if (curr->softirqs_enabled) {
1921		debug_atomic_inc(&redundant_softirqs_on);
1922		return;
1923	}
1924
1925	/*
1926	 * We'll do an OFF -> ON transition:
1927	 */
1928	curr->softirqs_enabled = 1;
1929	curr->softirq_enable_ip = ip;
1930	curr->softirq_enable_event = ++curr->irq_events;
1931	debug_atomic_inc(&softirqs_on_events);
1932	/*
1933	 * We are going to turn softirqs on, so set the
1934	 * usage bit for all held locks, if hardirqs are
1935	 * enabled too:
1936	 */
1937	if (curr->hardirqs_enabled)
1938		mark_held_locks(curr, 0);
1939}
1940
1941/*
1942 * Softirqs were disabled:
1943 */
1944void trace_softirqs_off(unsigned long ip)
1945{
1946	struct task_struct *curr = current;
1947
1948	if (unlikely(!debug_locks))
1949		return;
1950
1951	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1952		return;
1953
1954	if (curr->softirqs_enabled) {
1955		/*
1956		 * We have done an ON -> OFF transition:
1957		 */
1958		curr->softirqs_enabled = 0;
1959		curr->softirq_disable_ip = ip;
1960		curr->softirq_disable_event = ++curr->irq_events;
1961		debug_atomic_inc(&softirqs_off_events);
1962		DEBUG_LOCKS_WARN_ON(!softirq_count());
1963	} else
1964		debug_atomic_inc(&redundant_softirqs_off);
1965}
1966
1967#endif
1968
1969/*
1970 * Initialize a lock instance's lock-class mapping info:
1971 */
1972void lockdep_init_map(struct lockdep_map *lock, const char *name,
1973		      struct lock_class_key *key, int subclass)
1974{
1975	if (unlikely(!debug_locks))
1976		return;
1977
1978	if (DEBUG_LOCKS_WARN_ON(!key))
1979		return;
1980	if (DEBUG_LOCKS_WARN_ON(!name))
1981		return;
1982	/*
1983	 * Sanity check, the lock-class key must be persistent:
1984	 */
1985	if (!static_obj(key)) {
1986		printk("BUG: key %p not in .data!\n", key);
1987		DEBUG_LOCKS_WARN_ON(1);
1988		return;
1989	}
1990	lock->name = name;
1991	lock->key = key;
1992	lock->class_cache = NULL;
1993	if (subclass)
1994		register_lock_class(lock, subclass, 1);
1995}
1996
1997EXPORT_SYMBOL_GPL(lockdep_init_map);
1998
1999/*
2000 * This gets called for every mutex_lock*()/spin_lock*() operation.
2001 * We maintain the dependency maps and validate the locking attempt:
2002 */
2003static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2004			  int trylock, int read, int check, int hardirqs_off,
2005			  unsigned long ip)
2006{
2007	struct task_struct *curr = current;
2008	struct lock_class *class = NULL;
2009	struct held_lock *hlock;
2010	unsigned int depth, id;
2011	int chain_head = 0;
2012	u64 chain_key;
2013
2014	if (unlikely(!debug_locks))
2015		return 0;
2016
2017	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2018		return 0;
2019
2020	if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
2021		debug_locks_off();
2022		printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n");
2023		printk("turning off the locking correctness validator.\n");
2024		return 0;
2025	}
2026
2027	if (!subclass)
2028		class = lock->class_cache;
2029	/*
2030	 * Not cached yet or subclass?
2031	 */
2032	if (unlikely(!class)) {
2033		class = register_lock_class(lock, subclass, 0);
2034		if (!class)
2035			return 0;
2036	}
2037	debug_atomic_inc((atomic_t *)&class->ops);
2038	if (very_verbose(class)) {
2039		printk("\nacquire class [%p] %s", class->key, class->name);
2040		if (class->name_version > 1)
2041			printk("#%d", class->name_version);
2042		printk("\n");
2043		dump_stack();
2044	}
2045
2046	/*
2047	 * Add the lock to the list of currently held locks.
2048	 * (we dont increase the depth just yet, up until the
2049	 * dependency checks are done)
2050	 */
2051	depth = curr->lockdep_depth;
2052	if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
2053		return 0;
2054
2055	hlock = curr->held_locks + depth;
2056
2057	hlock->class = class;
2058	hlock->acquire_ip = ip;
2059	hlock->instance = lock;
2060	hlock->trylock = trylock;
2061	hlock->read = read;
2062	hlock->check = check;
2063	hlock->hardirqs_off = hardirqs_off;
2064
2065	if (check != 2)
2066		goto out_calc_hash;
2067#ifdef CONFIG_TRACE_IRQFLAGS
2068	/*
2069	 * If non-trylock use in a hardirq or softirq context, then
2070	 * mark the lock as used in these contexts:
2071	 */
2072	if (!trylock) {
2073		if (read) {
2074			if (curr->hardirq_context)
2075				if (!mark_lock(curr, hlock,
2076						LOCK_USED_IN_HARDIRQ_READ))
2077					return 0;
2078			if (curr->softirq_context)
2079				if (!mark_lock(curr, hlock,
2080						LOCK_USED_IN_SOFTIRQ_READ))
2081					return 0;
2082		} else {
2083			if (curr->hardirq_context)
2084				if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2085					return 0;
2086			if (curr->softirq_context)
2087				if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2088					return 0;
2089		}
2090	}
2091	if (!hardirqs_off) {
2092		if (read) {
2093			if (!mark_lock(curr, hlock,
2094					LOCK_ENABLED_HARDIRQS_READ))
2095				return 0;
2096			if (curr->softirqs_enabled)
2097				if (!mark_lock(curr, hlock,
2098						LOCK_ENABLED_SOFTIRQS_READ))
2099					return 0;
2100		} else {
2101			if (!mark_lock(curr, hlock,
2102					LOCK_ENABLED_HARDIRQS))
2103				return 0;
2104			if (curr->softirqs_enabled)
2105				if (!mark_lock(curr, hlock,
2106						LOCK_ENABLED_SOFTIRQS))
2107					return 0;
2108		}
2109	}
2110#endif
2111	/* mark it as used: */
2112	if (!mark_lock(curr, hlock, LOCK_USED))
2113		return 0;
2114out_calc_hash:
2115	/*
2116	 * Calculate the chain hash: it's the combined has of all the
2117	 * lock keys along the dependency chain. We save the hash value
2118	 * at every step so that we can get the current hash easily
2119	 * after unlock. The chain hash is then used to cache dependency
2120	 * results.
2121	 *
2122	 * The 'key ID' is what is the most compact key value to drive
2123	 * the hash, not class->key.
2124	 */
2125	id = class - lock_classes;
2126	if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
2127		return 0;
2128
2129	chain_key = curr->curr_chain_key;
2130	if (!depth) {
2131		if (DEBUG_LOCKS_WARN_ON(chain_key != 0))
2132			return 0;
2133		chain_head = 1;
2134	}
2135
2136	hlock->prev_chain_key = chain_key;
2137
2138#ifdef CONFIG_TRACE_IRQFLAGS
2139	/*
2140	 * Keep track of points where we cross into an interrupt context:
2141	 */
2142	hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2143				curr->softirq_context;
2144	if (depth) {
2145		struct held_lock *prev_hlock;
2146
2147		prev_hlock = curr->held_locks + depth-1;
2148		/*
2149		 * If we cross into another context, reset the
2150		 * hash key (this also prevents the checking and the
2151		 * adding of the dependency to 'prev'):
2152		 */
2153		if (prev_hlock->irq_context != hlock->irq_context) {
2154			chain_key = 0;
2155			chain_head = 1;
2156		}
2157	}
2158#endif
2159	chain_key = iterate_chain_key(chain_key, id);
2160	curr->curr_chain_key = chain_key;
2161
2162	/*
2163	 * Trylock needs to maintain the stack of held locks, but it
2164	 * does not add new dependencies, because trylock can be done
2165	 * in any order.
2166	 *
2167	 * We look up the chain_key and do the O(N^2) check and update of
2168	 * the dependencies only if this is a new dependency chain.
2169	 * (If lookup_chain_cache() returns with 1 it acquires
2170	 * graph_lock for us)
2171	 */
2172	if (!trylock && (check == 2) && lookup_chain_cache(chain_key, class)) {
2173		/*
2174		 * Check whether last held lock:
2175		 *
2176		 * - is irq-safe, if this lock is irq-unsafe
2177		 * - is softirq-safe, if this lock is hardirq-unsafe
2178		 *
2179		 * And check whether the new lock's dependency graph
2180		 * could lead back to the previous lock.
2181		 *
2182		 * any of these scenarios could lead to a deadlock. If
2183		 * All validations
2184		 */
2185		int ret = check_deadlock(curr, hlock, lock, read);
2186
2187		if (!ret)
2188			return 0;
2189		/*
2190		 * Mark recursive read, as we jump over it when
2191		 * building dependencies (just like we jump over
2192		 * trylock entries):
2193		 */
2194		if (ret == 2)
2195			hlock->read = 2;
2196		/*
2197		 * Add dependency only if this lock is not the head
2198		 * of the chain, and if it's not a secondary read-lock:
2199		 */
2200		if (!chain_head && ret != 2)
2201			if (!check_prevs_add(curr, hlock))
2202				return 0;
2203		graph_unlock();
2204	} else
2205		/* after lookup_chain_cache(): */
2206		if (unlikely(!debug_locks))
2207			return 0;
2208
2209	curr->lockdep_depth++;
2210	check_chain_key(curr);
2211#ifdef CONFIG_DEBUG_LOCKDEP
2212	if (unlikely(!debug_locks))
2213		return 0;
2214#endif
2215	if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
2216		debug_locks_off();
2217		printk("BUG: MAX_LOCK_DEPTH too low!\n");
2218		printk("turning off the locking correctness validator.\n");
2219		return 0;
2220	}
2221
2222	if (unlikely(curr->lockdep_depth > max_lockdep_depth))
2223		max_lockdep_depth = curr->lockdep_depth;
2224
2225	return 1;
2226}
2227
2228static int
2229print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
2230			   unsigned long ip)
2231{
2232	if (!debug_locks_off())
2233		return 0;
2234	if (debug_locks_silent)
2235		return 0;
2236
2237	printk("\n=====================================\n");
2238	printk(  "[ BUG: bad unlock balance detected! ]\n");
2239	printk(  "-------------------------------------\n");
2240	printk("%s/%d is trying to release lock (",
2241		curr->comm, curr->pid);
2242	print_lockdep_cache(lock);
2243	printk(") at:\n");
2244	print_ip_sym(ip);
2245	printk("but there are no more locks to release!\n");
2246	printk("\nother info that might help us debug this:\n");
2247	lockdep_print_held_locks(curr);
2248
2249	printk("\nstack backtrace:\n");
2250	dump_stack();
2251
2252	return 0;
2253}
2254
2255/*
2256 * Common debugging checks for both nested and non-nested unlock:
2257 */
2258static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2259			unsigned long ip)
2260{
2261	if (unlikely(!debug_locks))
2262		return 0;
2263	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2264		return 0;
2265
2266	if (curr->lockdep_depth <= 0)
2267		return print_unlock_inbalance_bug(curr, lock, ip);
2268
2269	return 1;
2270}
2271
2272/*
2273 * Remove the lock to the list of currently held locks in a
2274 * potentially non-nested (out of order) manner. This is a
2275 * relatively rare operation, as all the unlock APIs default
2276 * to nested mode (which uses lock_release()):
2277 */
2278static int
2279lock_release_non_nested(struct task_struct *curr,
2280			struct lockdep_map *lock, unsigned long ip)
2281{
2282	struct held_lock *hlock, *prev_hlock;
2283	unsigned int depth;
2284	int i;
2285
2286	/*
2287	 * Check whether the lock exists in the current stack
2288	 * of held locks:
2289	 */
2290	depth = curr->lockdep_depth;
2291	if (DEBUG_LOCKS_WARN_ON(!depth))
2292		return 0;
2293
2294	prev_hlock = NULL;
2295	for (i = depth-1; i >= 0; i--) {
2296		hlock = curr->held_locks + i;
2297		/*
2298		 * We must not cross into another context:
2299		 */
2300		if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2301			break;
2302		if (hlock->instance == lock)
2303			goto found_it;
2304		prev_hlock = hlock;
2305	}
2306	return print_unlock_inbalance_bug(curr, lock, ip);
2307
2308found_it:
2309	/*
2310	 * We have the right lock to unlock, 'hlock' points to it.
2311	 * Now we remove it from the stack, and add back the other
2312	 * entries (if any), recalculating the hash along the way:
2313	 */
2314	curr->lockdep_depth = i;
2315	curr->curr_chain_key = hlock->prev_chain_key;
2316
2317	for (i++; i < depth; i++) {
2318		hlock = curr->held_locks + i;
2319		if (!__lock_acquire(hlock->instance,
2320			hlock->class->subclass, hlock->trylock,
2321				hlock->read, hlock->check, hlock->hardirqs_off,
2322				hlock->acquire_ip))
2323			return 0;
2324	}
2325
2326	if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
2327		return 0;
2328	return 1;
2329}
2330
2331/*
2332 * Remove the lock to the list of currently held locks - this gets
2333 * called on mutex_unlock()/spin_unlock*() (or on a failed
2334 * mutex_lock_interruptible()). This is done for unlocks that nest
2335 * perfectly. (i.e. the current top of the lock-stack is unlocked)
2336 */
2337static int lock_release_nested(struct task_struct *curr,
2338			       struct lockdep_map *lock, unsigned long ip)
2339{
2340	struct held_lock *hlock;
2341	unsigned int depth;
2342
2343	/*
2344	 * Pop off the top of the lock stack:
2345	 */
2346	depth = curr->lockdep_depth - 1;
2347	hlock = curr->held_locks + depth;
2348
2349	/*
2350	 * Is the unlock non-nested:
2351	 */
2352	if (hlock->instance != lock)
2353		return lock_release_non_nested(curr, lock, ip);
2354	curr->lockdep_depth--;
2355
2356	if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0)))
2357		return 0;
2358
2359	curr->curr_chain_key = hlock->prev_chain_key;
2360
2361#ifdef CONFIG_DEBUG_LOCKDEP
2362	hlock->prev_chain_key = 0;
2363	hlock->class = NULL;
2364	hlock->acquire_ip = 0;
2365	hlock->irq_context = 0;
2366#endif
2367	return 1;
2368}
2369
2370/*
2371 * Remove the lock to the list of currently held locks - this gets
2372 * called on mutex_unlock()/spin_unlock*() (or on a failed
2373 * mutex_lock_interruptible()). This is done for unlocks that nest
2374 * perfectly. (i.e. the current top of the lock-stack is unlocked)
2375 */
2376static void
2377__lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2378{
2379	struct task_struct *curr = current;
2380
2381	if (!check_unlock(curr, lock, ip))
2382		return;
2383
2384	if (nested) {
2385		if (!lock_release_nested(curr, lock, ip))
2386			return;
2387	} else {
2388		if (!lock_release_non_nested(curr, lock, ip))
2389			return;
2390	}
2391
2392	check_chain_key(curr);
2393}
2394
2395/*
2396 * Check whether we follow the irq-flags state precisely:
2397 */
2398static void check_flags(unsigned long flags)
2399{
2400#if defined(CONFIG_DEBUG_LOCKDEP) && defined(CONFIG_TRACE_IRQFLAGS)
2401	if (!debug_locks)
2402		return;
2403
2404	if (irqs_disabled_flags(flags))
2405		DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled);
2406	else
2407		DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled);
2408
2409	/*
2410	 * We dont accurately track softirq state in e.g.
2411	 * hardirq contexts (such as on 4KSTACKS), so only
2412	 * check if not in hardirq contexts:
2413	 */
2414	if (!hardirq_count()) {
2415		if (softirq_count())
2416			DEBUG_LOCKS_WARN_ON(current->softirqs_enabled);
2417		else
2418			DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
2419	}
2420
2421	if (!debug_locks)
2422		print_irqtrace_events(current);
2423#endif
2424}
2425
2426/*
2427 * We are not always called with irqs disabled - do that here,
2428 * and also avoid lockdep recursion:
2429 */
2430void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2431		  int trylock, int read, int check, unsigned long ip)
2432{
2433	unsigned long flags;
2434
2435	if (unlikely(current->lockdep_recursion))
2436		return;
2437
2438	raw_local_irq_save(flags);
2439	check_flags(flags);
2440
2441	current->lockdep_recursion = 1;
2442	__lock_acquire(lock, subclass, trylock, read, check,
2443		       irqs_disabled_flags(flags), ip);
2444	current->lockdep_recursion = 0;
2445	raw_local_irq_restore(flags);
2446}
2447
2448EXPORT_SYMBOL_GPL(lock_acquire);
2449
2450void lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2451{
2452	unsigned long flags;
2453
2454	if (unlikely(current->lockdep_recursion))
2455		return;
2456
2457	raw_local_irq_save(flags);
2458	check_flags(flags);
2459	current->lockdep_recursion = 1;
2460	__lock_release(lock, nested, ip);
2461	current->lockdep_recursion = 0;
2462	raw_local_irq_restore(flags);
2463}
2464
2465EXPORT_SYMBOL_GPL(lock_release);
2466
2467/*
2468 * Used by the testsuite, sanitize the validator state
2469 * after a simulated failure:
2470 */
2471
2472void lockdep_reset(void)
2473{
2474	unsigned long flags;
2475	int i;
2476
2477	raw_local_irq_save(flags);
2478	current->curr_chain_key = 0;
2479	current->lockdep_depth = 0;
2480	current->lockdep_recursion = 0;
2481	memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
2482	nr_hardirq_chains = 0;
2483	nr_softirq_chains = 0;
2484	nr_process_chains = 0;
2485	debug_locks = 1;
2486	for (i = 0; i < CHAINHASH_SIZE; i++)
2487		INIT_LIST_HEAD(chainhash_table + i);
2488	raw_local_irq_restore(flags);
2489}
2490
2491static void zap_class(struct lock_class *class)
2492{
2493	int i;
2494
2495	/*
2496	 * Remove all dependencies this lock is
2497	 * involved in:
2498	 */
2499	for (i = 0; i < nr_list_entries; i++) {
2500		if (list_entries[i].class == class)
2501			list_del_rcu(&list_entries[i].entry);
2502	}
2503	/*
2504	 * Unhash the class and remove it from the all_lock_classes list:
2505	 */
2506	list_del_rcu(&class->hash_entry);
2507	list_del_rcu(&class->lock_entry);
2508
2509}
2510
2511static inline int within(void *addr, void *start, unsigned long size)
2512{
2513	return addr >= start && addr < start + size;
2514}
2515
2516void lockdep_free_key_range(void *start, unsigned long size)
2517{
2518	struct lock_class *class, *next;
2519	struct list_head *head;
2520	unsigned long flags;
2521	int i;
2522
2523	raw_local_irq_save(flags);
2524	graph_lock();
2525
2526	/*
2527	 * Unhash all classes that were created by this module:
2528	 */
2529	for (i = 0; i < CLASSHASH_SIZE; i++) {
2530		head = classhash_table + i;
2531		if (list_empty(head))
2532			continue;
2533		list_for_each_entry_safe(class, next, head, hash_entry)
2534			if (within(class->key, start, size))
2535				zap_class(class);
2536	}
2537
2538	graph_unlock();
2539	raw_local_irq_restore(flags);
2540}
2541
2542void lockdep_reset_lock(struct lockdep_map *lock)
2543{
2544	struct lock_class *class, *next;
2545	struct list_head *head;
2546	unsigned long flags;
2547	int i, j;
2548
2549	raw_local_irq_save(flags);
2550
2551	/*
2552	 * Remove all classes this lock might have:
2553	 */
2554	for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) {
2555		/*
2556		 * If the class exists we look it up and zap it:
2557		 */
2558		class = look_up_lock_class(lock, j);
2559		if (class)
2560			zap_class(class);
2561	}
2562	/*
2563	 * Debug check: in the end all mapped classes should
2564	 * be gone.
2565	 */
2566	graph_lock();
2567	for (i = 0; i < CLASSHASH_SIZE; i++) {
2568		head = classhash_table + i;
2569		if (list_empty(head))
2570			continue;
2571		list_for_each_entry_safe(class, next, head, hash_entry) {
2572			if (unlikely(class == lock->class_cache)) {
2573				if (debug_locks_off_graph_unlock())
2574					WARN_ON(1);
2575				goto out_restore;
2576			}
2577		}
2578	}
2579	graph_unlock();
2580
2581out_restore:
2582	raw_local_irq_restore(flags);
2583}
2584
2585void lockdep_init(void)
2586{
2587	int i;
2588
2589	/*
2590	 * Some architectures have their own start_kernel()
2591	 * code which calls lockdep_init(), while we also
2592	 * call lockdep_init() from the start_kernel() itself,
2593	 * and we want to initialize the hashes only once:
2594	 */
2595	if (lockdep_initialized)
2596		return;
2597
2598	for (i = 0; i < CLASSHASH_SIZE; i++)
2599		INIT_LIST_HEAD(classhash_table + i);
2600
2601	for (i = 0; i < CHAINHASH_SIZE; i++)
2602		INIT_LIST_HEAD(chainhash_table + i);
2603
2604	lockdep_initialized = 1;
2605}
2606
2607void __init lockdep_info(void)
2608{
2609	printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n");
2610
2611	printk("... MAX_LOCKDEP_SUBCLASSES:    %lu\n", MAX_LOCKDEP_SUBCLASSES);
2612	printk("... MAX_LOCK_DEPTH:          %lu\n", MAX_LOCK_DEPTH);
2613	printk("... MAX_LOCKDEP_KEYS:        %lu\n", MAX_LOCKDEP_KEYS);
2614	printk("... CLASSHASH_SIZE:           %lu\n", CLASSHASH_SIZE);
2615	printk("... MAX_LOCKDEP_ENTRIES:     %lu\n", MAX_LOCKDEP_ENTRIES);
2616	printk("... MAX_LOCKDEP_CHAINS:      %lu\n", MAX_LOCKDEP_CHAINS);
2617	printk("... CHAINHASH_SIZE:          %lu\n", CHAINHASH_SIZE);
2618
2619	printk(" memory used by lock dependency info: %lu kB\n",
2620		(sizeof(struct lock_class) * MAX_LOCKDEP_KEYS +
2621		sizeof(struct list_head) * CLASSHASH_SIZE +
2622		sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
2623		sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
2624		sizeof(struct list_head) * CHAINHASH_SIZE) / 1024);
2625
2626	printk(" per task-struct memory footprint: %lu bytes\n",
2627		sizeof(struct held_lock) * MAX_LOCK_DEPTH);
2628
2629#ifdef CONFIG_DEBUG_LOCKDEP
2630	if (lockdep_init_error)
2631		printk("WARNING: lockdep init error! Arch code didnt call lockdep_init() early enough?\n");
2632#endif
2633}
2634
2635static inline int in_range(const void *start, const void *addr, const void *end)
2636{
2637	return addr >= start && addr <= end;
2638}
2639
2640static void
2641print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
2642		     const void *mem_to, struct held_lock *hlock)
2643{
2644	if (!debug_locks_off())
2645		return;
2646	if (debug_locks_silent)
2647		return;
2648
2649	printk("\n=========================\n");
2650	printk(  "[ BUG: held lock freed! ]\n");
2651	printk(  "-------------------------\n");
2652	printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
2653		curr->comm, curr->pid, mem_from, mem_to-1);
2654	print_lock(hlock);
2655	lockdep_print_held_locks(curr);
2656
2657	printk("\nstack backtrace:\n");
2658	dump_stack();
2659}
2660
2661/*
2662 * Called when kernel memory is freed (or unmapped), or if a lock
2663 * is destroyed or reinitialized - this code checks whether there is
2664 * any held lock in the memory range of <from> to <to>:
2665 */
2666void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
2667{
2668	const void *mem_to = mem_from + mem_len, *lock_from, *lock_to;
2669	struct task_struct *curr = current;
2670	struct held_lock *hlock;
2671	unsigned long flags;
2672	int i;
2673
2674	if (unlikely(!debug_locks))
2675		return;
2676
2677	local_irq_save(flags);
2678	for (i = 0; i < curr->lockdep_depth; i++) {
2679		hlock = curr->held_locks + i;
2680
2681		lock_from = (void *)hlock->instance;
2682		lock_to = (void *)(hlock->instance + 1);
2683
2684		if (!in_range(mem_from, lock_from, mem_to) &&
2685					!in_range(mem_from, lock_to, mem_to))
2686			continue;
2687
2688		print_freed_lock_bug(curr, mem_from, mem_to, hlock);
2689		break;
2690	}
2691	local_irq_restore(flags);
2692}
2693EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
2694
2695static void print_held_locks_bug(struct task_struct *curr)
2696{
2697	if (!debug_locks_off())
2698		return;
2699	if (debug_locks_silent)
2700		return;
2701
2702	printk("\n=====================================\n");
2703	printk(  "[ BUG: lock held at task exit time! ]\n");
2704	printk(  "-------------------------------------\n");
2705	printk("%s/%d is exiting with locks still held!\n",
2706		curr->comm, curr->pid);
2707	lockdep_print_held_locks(curr);
2708
2709	printk("\nstack backtrace:\n");
2710	dump_stack();
2711}
2712
2713void debug_check_no_locks_held(struct task_struct *task)
2714{
2715	if (unlikely(task->lockdep_depth > 0))
2716		print_held_locks_bug(task);
2717}
2718
2719void debug_show_all_locks(void)
2720{
2721	struct task_struct *g, *p;
2722	int count = 10;
2723	int unlock = 1;
2724
2725	if (unlikely(!debug_locks)) {
2726		printk("INFO: lockdep is turned off.\n");
2727		return;
2728	}
2729	printk("\nShowing all locks held in the system:\n");
2730
2731	/*
2732	 * Here we try to get the tasklist_lock as hard as possible,
2733	 * if not successful after 2 seconds we ignore it (but keep
2734	 * trying). This is to enable a debug printout even if a
2735	 * tasklist_lock-holding task deadlocks or crashes.
2736	 */
2737retry:
2738	if (!read_trylock(&tasklist_lock)) {
2739		if (count == 10)
2740			printk("hm, tasklist_lock locked, retrying... ");
2741		if (count) {
2742			count--;
2743			printk(" #%d", 10-count);
2744			mdelay(200);
2745			goto retry;
2746		}
2747		printk(" ignoring it.\n");
2748		unlock = 0;
2749	}
2750	if (count != 10)
2751		printk(" locked it.\n");
2752
2753	do_each_thread(g, p) {
2754		if (p->lockdep_depth)
2755			lockdep_print_held_locks(p);
2756		if (!unlock)
2757			if (read_trylock(&tasklist_lock))
2758				unlock = 1;
2759	} while_each_thread(g, p);
2760
2761	printk("\n");
2762	printk("=============================================\n\n");
2763
2764	if (unlock)
2765		read_unlock(&tasklist_lock);
2766}
2767
2768EXPORT_SYMBOL_GPL(debug_show_all_locks);
2769
2770void debug_show_held_locks(struct task_struct *task)
2771{
2772	if (unlikely(!debug_locks)) {
2773		printk("INFO: lockdep is turned off.\n");
2774		return;
2775	}
2776	lockdep_print_held_locks(task);
2777}
2778
2779EXPORT_SYMBOL_GPL(debug_show_held_locks);
2780