• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /asuswrt-rt-n18u-9.0.0.4.380.2695/release/src-rt-6.x.4708/linux/linux-2.6/include/linux/
1/*
2 * Runtime locking correctness validator
3 *
4 *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
5 *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6 *
7 * see Documentation/lockdep-design.txt for more details.
8 */
9#ifndef __LINUX_LOCKDEP_H
10#define __LINUX_LOCKDEP_H
11
12struct task_struct;
13struct lockdep_map;
14
15/* for sysctl */
16extern int prove_locking;
17extern int lock_stat;
18
19#ifdef CONFIG_LOCKDEP
20
21#include <linux/linkage.h>
22#include <linux/list.h>
23#include <linux/debug_locks.h>
24#include <linux/stacktrace.h>
25
26/*
27 * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
28 * the total number of states... :-(
29 */
30#define XXX_LOCK_USAGE_STATES		(1+3*4)
31
32#define MAX_LOCKDEP_SUBCLASSES		8UL
33
34/*
35 * Lock-classes are keyed via unique addresses, by embedding the
36 * lockclass-key into the kernel (or module) .data section. (For
37 * static locks we use the lock address itself as the key.)
38 */
39struct lockdep_subclass_key {
40	char __one_byte;
41} __attribute__ ((__packed__));
42
43struct lock_class_key {
44	struct lockdep_subclass_key	subkeys[MAX_LOCKDEP_SUBCLASSES];
45};
46
47extern struct lock_class_key __lockdep_no_validate__;
48
49#define LOCKSTAT_POINTS		4
50
51/*
52 * The lock-class itself:
53 */
54struct lock_class {
55	/*
56	 * class-hash:
57	 */
58	struct list_head		hash_entry;
59
60	/*
61	 * global list of all lock-classes:
62	 */
63	struct list_head		lock_entry;
64
65	struct lockdep_subclass_key	*key;
66	unsigned int			subclass;
67	unsigned int			dep_gen_id;
68
69	/*
70	 * IRQ/softirq usage tracking bits:
71	 */
72	unsigned long			usage_mask;
73	struct stack_trace		usage_traces[XXX_LOCK_USAGE_STATES];
74
75	/*
76	 * These fields represent a directed graph of lock dependencies,
77	 * to every node we attach a list of "forward" and a list of
78	 * "backward" graph nodes.
79	 */
80	struct list_head		locks_after, locks_before;
81
82	/*
83	 * Generation counter, when doing certain classes of graph walking,
84	 * to ensure that we check one node only once:
85	 */
86	unsigned int			version;
87
88	/*
89	 * Statistics counter:
90	 */
91	unsigned long			ops;
92
93	const char			*name;
94	int				name_version;
95
96#ifdef CONFIG_LOCK_STAT
97	unsigned long			contention_point[LOCKSTAT_POINTS];
98	unsigned long			contending_point[LOCKSTAT_POINTS];
99#endif
100};
101
102#ifdef CONFIG_LOCK_STAT
103struct lock_time {
104	s64				min;
105	s64				max;
106	s64				total;
107	unsigned long			nr;
108};
109
110enum bounce_type {
111	bounce_acquired_write,
112	bounce_acquired_read,
113	bounce_contended_write,
114	bounce_contended_read,
115	nr_bounce_types,
116
117	bounce_acquired = bounce_acquired_write,
118	bounce_contended = bounce_contended_write,
119};
120
121struct lock_class_stats {
122	unsigned long			contention_point[4];
123	unsigned long			contending_point[4];
124	struct lock_time		read_waittime;
125	struct lock_time		write_waittime;
126	struct lock_time		read_holdtime;
127	struct lock_time		write_holdtime;
128	unsigned long			bounces[nr_bounce_types];
129};
130
131struct lock_class_stats lock_stats(struct lock_class *class);
132void clear_lock_stats(struct lock_class *class);
133#endif
134
135/*
136 * Map the lock object (the lock instance) to the lock-class object.
137 * This is embedded into specific lock instances:
138 */
139struct lockdep_map {
140	struct lock_class_key		*key;
141	struct lock_class		*class_cache;
142	const char			*name;
143#ifdef CONFIG_LOCK_STAT
144	int				cpu;
145	unsigned long			ip;
146#endif
147};
148
149/*
150 * Every lock has a list of other locks that were taken after it.
151 * We only grow the list, never remove from it:
152 */
153struct lock_list {
154	struct list_head		entry;
155	struct lock_class		*class;
156	struct stack_trace		trace;
157	int				distance;
158
159	/*
160	 * The parent field is used to implement breadth-first search, and the
161	 * bit 0 is reused to indicate if the lock has been accessed in BFS.
162	 */
163	struct lock_list		*parent;
164};
165
166/*
167 * We record lock dependency chains, so that we can cache them:
168 */
169struct lock_chain {
170	u8				irq_context;
171	u8				depth;
172	u16				base;
173	struct list_head		entry;
174	u64				chain_key;
175};
176
177#define MAX_LOCKDEP_KEYS_BITS		13
178/*
179 * Subtract one because we offset hlock->class_idx by 1 in order
180 * to make 0 mean no class. This avoids overflowing the class_idx
181 * bitfield and hitting the BUG in hlock_class().
182 */
183#define MAX_LOCKDEP_KEYS		((1UL << MAX_LOCKDEP_KEYS_BITS) - 1)
184
185struct held_lock {
186	/*
187	 * One-way hash of the dependency chain up to this point. We
188	 * hash the hashes step by step as the dependency chain grows.
189	 *
190	 * We use it for dependency-caching and we skip detection
191	 * passes and dependency-updates if there is a cache-hit, so
192	 * it is absolutely critical for 100% coverage of the validator
193	 * to have a unique key value for every unique dependency path
194	 * that can occur in the system, to make a unique hash value
195	 * as likely as possible - hence the 64-bit width.
196	 *
197	 * The task struct holds the current hash value (initialized
198	 * with zero), here we store the previous hash value:
199	 */
200	u64				prev_chain_key;
201	unsigned long			acquire_ip;
202	struct lockdep_map		*instance;
203	struct lockdep_map		*nest_lock;
204#ifdef CONFIG_LOCK_STAT
205	u64 				waittime_stamp;
206	u64				holdtime_stamp;
207#endif
208	unsigned int			class_idx:MAX_LOCKDEP_KEYS_BITS;
209	/*
210	 * The lock-stack is unified in that the lock chains of interrupt
211	 * contexts nest ontop of process context chains, but we 'separate'
212	 * the hashes by starting with 0 if we cross into an interrupt
213	 * context, and we also keep do not add cross-context lock
214	 * dependencies - the lock usage graph walking covers that area
215	 * anyway, and we'd just unnecessarily increase the number of
216	 * dependencies otherwise. [Note: hardirq and softirq contexts
217	 * are separated from each other too.]
218	 *
219	 * The following field is used to detect when we cross into an
220	 * interrupt context:
221	 */
222	unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
223	unsigned int trylock:1;						/* 16 bits */
224
225	unsigned int read:2;        /* see lock_acquire() comment */
226	unsigned int check:2;       /* see lock_acquire() comment */
227	unsigned int hardirqs_off:1;
228	unsigned int references:11;					/* 32 bits */
229};
230
231/*
232 * Initialization, self-test and debugging-output methods:
233 */
234extern void lockdep_init(void);
235extern void lockdep_info(void);
236extern void lockdep_reset(void);
237extern void lockdep_reset_lock(struct lockdep_map *lock);
238extern void lockdep_free_key_range(void *start, unsigned long size);
239extern void lockdep_sys_exit(void);
240
241extern void lockdep_off(void);
242extern void lockdep_on(void);
243
244/*
245 * These methods are used by specific locking variants (spinlocks,
246 * rwlocks, mutexes and rwsems) to pass init/acquire/release events
247 * to lockdep:
248 */
249
250extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
251			     struct lock_class_key *key, int subclass);
252
253/*
254 * To initialize a lockdep_map statically use this macro.
255 * Note that _name must not be NULL.
256 */
257#define STATIC_LOCKDEP_MAP_INIT(_name, _key) \
258	{ .name = (_name), .key = (void *)(_key), }
259
260/*
261 * Reinitialize a lock key - for cases where there is special locking or
262 * special initialization of locks so that the validator gets the scope
263 * of dependencies wrong: they are either too broad (they need a class-split)
264 * or they are too narrow (they suffer from a false class-split):
265 */
266#define lockdep_set_class(lock, key) \
267		lockdep_init_map(&(lock)->dep_map, #key, key, 0)
268#define lockdep_set_class_and_name(lock, key, name) \
269		lockdep_init_map(&(lock)->dep_map, name, key, 0)
270#define lockdep_set_class_and_subclass(lock, key, sub) \
271		lockdep_init_map(&(lock)->dep_map, #key, key, sub)
272#define lockdep_set_subclass(lock, sub)	\
273		lockdep_init_map(&(lock)->dep_map, #lock, \
274				 (lock)->dep_map.key, sub)
275
276#define lockdep_set_novalidate_class(lock) \
277	lockdep_set_class(lock, &__lockdep_no_validate__)
278/*
279 * Compare locking classes
280 */
281#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
282
283static inline int lockdep_match_key(struct lockdep_map *lock,
284				    struct lock_class_key *key)
285{
286	return lock->key == key;
287}
288
289/*
290 * Acquire a lock.
291 *
292 * Values for "read":
293 *
294 *   0: exclusive (write) acquire
295 *   1: read-acquire (no recursion allowed)
296 *   2: read-acquire with same-instance recursion allowed
297 *
298 * Values for check:
299 *
300 *   0: disabled
301 *   1: simple checks (freeing, held-at-exit-time, etc.)
302 *   2: full validation
303 */
304extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
305			 int trylock, int read, int check,
306			 struct lockdep_map *nest_lock, unsigned long ip);
307
308extern void lock_release(struct lockdep_map *lock, int nested,
309			 unsigned long ip);
310
311#define lockdep_is_held(lock)	lock_is_held(&(lock)->dep_map)
312
313extern int lock_is_held(struct lockdep_map *lock);
314
315extern void lock_set_class(struct lockdep_map *lock, const char *name,
316			   struct lock_class_key *key, unsigned int subclass,
317			   unsigned long ip);
318
319static inline void lock_set_subclass(struct lockdep_map *lock,
320		unsigned int subclass, unsigned long ip)
321{
322	lock_set_class(lock, lock->name, lock->key, subclass, ip);
323}
324
325extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
326extern void lockdep_clear_current_reclaim_state(void);
327extern void lockdep_trace_alloc(gfp_t mask);
328
329# define INIT_LOCKDEP				.lockdep_recursion = 0, .lockdep_reclaim_gfp = 0,
330
331#define lockdep_depth(tsk)	(debug_locks ? (tsk)->lockdep_depth : 0)
332
333#define lockdep_assert_held(l)	WARN_ON(debug_locks && !lockdep_is_held(l))
334
335#else /* !LOCKDEP */
336
337static inline void lockdep_off(void)
338{
339}
340
341static inline void lockdep_on(void)
342{
343}
344
345# define lock_acquire(l, s, t, r, c, n, i)	do { } while (0)
346# define lock_release(l, n, i)			do { } while (0)
347# define lock_set_class(l, n, k, s, i)		do { } while (0)
348# define lock_set_subclass(l, s, i)		do { } while (0)
349# define lockdep_set_current_reclaim_state(g)	do { } while (0)
350# define lockdep_clear_current_reclaim_state()	do { } while (0)
351# define lockdep_trace_alloc(g)			do { } while (0)
352# define lockdep_init()				do { } while (0)
353# define lockdep_info()				do { } while (0)
354# define lockdep_init_map(lock, name, key, sub) \
355		do { (void)(name); (void)(key); } while (0)
356# define lockdep_set_class(lock, key)		do { (void)(key); } while (0)
357# define lockdep_set_class_and_name(lock, key, name) \
358		do { (void)(key); (void)(name); } while (0)
359#define lockdep_set_class_and_subclass(lock, key, sub) \
360		do { (void)(key); } while (0)
361#define lockdep_set_subclass(lock, sub)		do { } while (0)
362
363#define lockdep_set_novalidate_class(lock) do { } while (0)
364
365/*
366 * We don't define lockdep_match_class() and lockdep_match_key() for !LOCKDEP
367 * case since the result is not well defined and the caller should rather
368 * #ifdef the call himself.
369 */
370
371# define INIT_LOCKDEP
372# define lockdep_reset()		do { debug_locks = 1; } while (0)
373# define lockdep_free_key_range(start, size)	do { } while (0)
374# define lockdep_sys_exit() 			do { } while (0)
375/*
376 * The class key takes no space if lockdep is disabled:
377 */
378struct lock_class_key { };
379
380#define lockdep_depth(tsk)	(0)
381
382#define lockdep_assert_held(l)			do { } while (0)
383
384#endif /* !LOCKDEP */
385
386#ifdef CONFIG_LOCK_STAT
387
388extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
389extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
390
391#define LOCK_CONTENDED(_lock, try, lock)			\
392do {								\
393	if (!try(_lock)) {					\
394		lock_contended(&(_lock)->dep_map, _RET_IP_);	\
395		lock(_lock);					\
396	}							\
397	lock_acquired(&(_lock)->dep_map, _RET_IP_);			\
398} while (0)
399
400#else /* CONFIG_LOCK_STAT */
401
402#define lock_contended(lockdep_map, ip) do {} while (0)
403#define lock_acquired(lockdep_map, ip) do {} while (0)
404
405#define LOCK_CONTENDED(_lock, try, lock) \
406	lock(_lock)
407
408#endif /* CONFIG_LOCK_STAT */
409
410#ifdef CONFIG_LOCKDEP
411
412/*
413 * On lockdep we dont want the hand-coded irq-enable of
414 * _raw_*_lock_flags() code, because lockdep assumes
415 * that interrupts are not re-enabled during lock-acquire:
416 */
417#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
418	LOCK_CONTENDED((_lock), (try), (lock))
419
420#else /* CONFIG_LOCKDEP */
421
422#define LOCK_CONTENDED_FLAGS(_lock, try, lock, lockfl, flags) \
423	lockfl((_lock), (flags))
424
425#endif /* CONFIG_LOCKDEP */
426
427#ifdef CONFIG_GENERIC_HARDIRQS
428extern void early_init_irq_lock_class(void);
429#else
430static inline void early_init_irq_lock_class(void)
431{
432}
433#endif
434
435#ifdef CONFIG_TRACE_IRQFLAGS
436extern void early_boot_irqs_off(void);
437extern void early_boot_irqs_on(void);
438extern void print_irqtrace_events(struct task_struct *curr);
439#else
440static inline void early_boot_irqs_off(void)
441{
442}
443static inline void early_boot_irqs_on(void)
444{
445}
446static inline void print_irqtrace_events(struct task_struct *curr)
447{
448}
449#endif
450
451/*
452 * For trivial one-depth nesting of a lock-class, the following
453 * global define can be used. (Subsystems with multiple levels
454 * of nesting should define their own lock-nesting subclasses.)
455 */
456#define SINGLE_DEPTH_NESTING			1
457
458/*
459 * Map the dependency ops to NOP or to real lockdep ops, depending
460 * on the per lock-class debug mode:
461 */
462
463#ifdef CONFIG_DEBUG_LOCK_ALLOC
464# ifdef CONFIG_PROVE_LOCKING
465#  define spin_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
466#  define spin_acquire_nest(l, s, t, n, i)	lock_acquire(l, s, t, 0, 2, n, i)
467# else
468#  define spin_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
469#  define spin_acquire_nest(l, s, t, n, i)	lock_acquire(l, s, t, 0, 1, NULL, i)
470# endif
471# define spin_release(l, n, i)			lock_release(l, n, i)
472#else
473# define spin_acquire(l, s, t, i)		do { } while (0)
474# define spin_release(l, n, i)			do { } while (0)
475#endif
476
477#ifdef CONFIG_DEBUG_LOCK_ALLOC
478# ifdef CONFIG_PROVE_LOCKING
479#  define rwlock_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
480#  define rwlock_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 2, 2, NULL, i)
481# else
482#  define rwlock_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
483#  define rwlock_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 2, 1, NULL, i)
484# endif
485# define rwlock_release(l, n, i)		lock_release(l, n, i)
486#else
487# define rwlock_acquire(l, s, t, i)		do { } while (0)
488# define rwlock_acquire_read(l, s, t, i)	do { } while (0)
489# define rwlock_release(l, n, i)		do { } while (0)
490#endif
491
492#ifdef CONFIG_DEBUG_LOCK_ALLOC
493# ifdef CONFIG_PROVE_LOCKING
494#  define mutex_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
495# else
496#  define mutex_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
497# endif
498# define mutex_release(l, n, i)			lock_release(l, n, i)
499#else
500# define mutex_acquire(l, s, t, i)		do { } while (0)
501# define mutex_release(l, n, i)			do { } while (0)
502#endif
503
504#ifdef CONFIG_DEBUG_LOCK_ALLOC
505# ifdef CONFIG_PROVE_LOCKING
506#  define rwsem_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 2, NULL, i)
507#  define rwsem_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 1, 2, NULL, i)
508# else
509#  define rwsem_acquire(l, s, t, i)		lock_acquire(l, s, t, 0, 1, NULL, i)
510#  define rwsem_acquire_read(l, s, t, i)	lock_acquire(l, s, t, 1, 1, NULL, i)
511# endif
512# define rwsem_release(l, n, i)			lock_release(l, n, i)
513#else
514# define rwsem_acquire(l, s, t, i)		do { } while (0)
515# define rwsem_acquire_read(l, s, t, i)		do { } while (0)
516# define rwsem_release(l, n, i)			do { } while (0)
517#endif
518
519#ifdef CONFIG_DEBUG_LOCK_ALLOC
520# ifdef CONFIG_PROVE_LOCKING
521#  define lock_map_acquire(l)		lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
522# else
523#  define lock_map_acquire(l)		lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
524# endif
525# define lock_map_release(l)			lock_release(l, 1, _THIS_IP_)
526#else
527# define lock_map_acquire(l)			do { } while (0)
528# define lock_map_release(l)			do { } while (0)
529#endif
530
531#ifdef CONFIG_PROVE_LOCKING
532# define might_lock(lock) 						\
533do {									\
534	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
535	lock_acquire(&(lock)->dep_map, 0, 0, 0, 2, NULL, _THIS_IP_);	\
536	lock_release(&(lock)->dep_map, 0, _THIS_IP_);			\
537} while (0)
538# define might_lock_read(lock) 						\
539do {									\
540	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
541	lock_acquire(&(lock)->dep_map, 0, 0, 1, 2, NULL, _THIS_IP_);	\
542	lock_release(&(lock)->dep_map, 0, _THIS_IP_);			\
543} while (0)
544#else
545# define might_lock(lock) do { } while (0)
546# define might_lock_read(lock) do { } while (0)
547#endif
548
549#ifdef CONFIG_PROVE_RCU
550extern void lockdep_rcu_dereference(const char *file, const int line);
551#endif
552
553#endif /* __LINUX_LOCKDEP_H */
554