• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/kernel/

Lines Matching refs:curr

524 static void lockdep_print_held_locks(struct task_struct *curr)
526 int i, depth = curr->lockdep_depth;
529 printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr));
533 depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr));
537 print_lock(curr->held_locks + i);
1044 struct task_struct *curr = current;
1054 curr->comm, task_pid_nr(curr));
1076 struct task_struct *curr = current;
1098 lockdep_print_held_locks(curr);
1308 print_bad_irq_dependency(struct task_struct *curr,
1328 curr->comm, task_pid_nr(curr),
1329 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1330 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
1331 curr->hardirqs_enabled,
1332 curr->softirqs_enabled);
1358 lockdep_print_held_locks(curr);
1379 check_usage(struct task_struct *curr, struct held_lock *prev,
1405 return print_bad_irq_dependency(curr, &this, &that,
1452 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
1461 if (!check_usage(curr, prev, next, bit,
1473 if (!check_usage(curr, prev, next, bit,
1481 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1485 if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \
1508 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1522 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1533 curr->comm, task_pid_nr(curr));
1539 lockdep_print_held_locks(curr);
1556 check_deadlock(struct task_struct *curr, struct held_lock *next,
1563 for (i = 0; i < curr->lockdep_depth; i++) {
1564 prev = curr->held_locks + i;
1586 return print_deadlock_bug(curr, prev, next);
1614 check_prev_add(struct task_struct *curr, struct held_lock *prev,
1647 if (!check_prev_add_irq(curr, prev, next))
1714 * The ones that are relevant are (in increasing distance from curr):
1719 check_prevs_add(struct task_struct *curr, struct held_lock *next)
1721 int depth = curr->lockdep_depth;
1736 if (curr->held_locks[depth].irq_context !=
1737 curr->held_locks[depth-1].irq_context)
1741 int distance = curr->lockdep_depth - depth + 1;
1742 hlock = curr->held_locks + depth-1;
1748 if (!check_prev_add(curr, hlock, next,
1769 if (curr->held_locks[depth].irq_context !=
1770 curr->held_locks[depth-1].irq_context)
1800 static inline int lookup_chain_cache(struct task_struct *curr,
1860 for (i = curr->lockdep_depth - 1; i >= 0; i--) {
1861 hlock_curr = curr->held_locks + i;
1867 chain->depth = curr->lockdep_depth + 1 - i;
1878 int lock_id = curr->held_locks[i].class_idx - 1;
1890 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
1904 lookup_chain_cache(curr, hlock, chain_key)) {
1917 int ret = check_deadlock(curr, hlock, lock, hlock->read);
1933 if (!check_prevs_add(curr, hlock))
1944 static inline int validate_chain(struct task_struct *curr,
1956 static void check_chain_key(struct task_struct *curr)
1963 for (i = 0; i < curr->lockdep_depth; i++) {
1964 hlock = curr->held_locks + i;
1968 curr->lockdep_depth, i,
1983 if (chain_key != curr->curr_chain_key) {
1986 curr->lockdep_depth, i,
1988 (unsigned long long)curr->curr_chain_key);
1994 print_usage_bug(struct task_struct *curr, struct held_lock *this,
2009 curr->comm, task_pid_nr(curr),
2010 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
2011 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
2012 trace_hardirqs_enabled(curr),
2013 trace_softirqs_enabled(curr));
2019 print_irqtrace_events(curr);
2021 lockdep_print_held_locks(curr);
2033 valid_state(struct task_struct *curr, struct held_lock *this,
2037 return print_usage_bug(curr, this, bad_bit, new_bit);
2041 static int mark_lock(struct task_struct *curr, struct held_lock *this,
2050 print_irq_inversion_bug(struct task_struct *curr,
2063 curr->comm, task_pid_nr(curr));
2073 lockdep_print_held_locks(curr);
2091 check_usage_forwards(struct task_struct *curr, struct held_lock *this,
2106 return print_irq_inversion_bug(curr, &root, target_entry,
2115 check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2130 return print_irq_inversion_bug(curr, &root, target_entry,
2134 void print_irqtrace_events(struct task_struct *curr)
2136 printk("irq event stamp: %u\n", curr->irq_events);
2137 printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event);
2138 print_ip_sym(curr->hardirq_enable_ip);
2139 printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event);
2140 print_ip_sym(curr->hardirq_disable_ip);
2141 printk("softirqs last enabled at (%u): ", curr->softirq_enable_event);
2142 print_ip_sym(curr->softirq_enable_ip);
2143 printk("softirqs last disabled at (%u): ", curr->softirq_disable_event);
2144 print_ip_sym(curr->softirq_disable_ip);
2190 mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2211 if (!valid_state(curr, this, new_bit, excl_bit))
2219 !usage(curr, this, excl_bit, state_name(new_bit & ~1)))
2226 if (!valid_state(curr, this, new_bit, excl_bit + 1))
2230 !usage(curr, this, excl_bit + 1,
2251 mark_held_locks(struct task_struct *curr, enum mark_type mark)
2257 for (i = 0; i < curr->lockdep_depth; i++) {
2258 hlock = curr->held_locks + i;
2266 if (!mark_lock(curr, hlock, usage_bit))
2294 struct task_struct *curr = current;
2304 if (unlikely(curr->hardirqs_enabled)) {
2314 curr->hardirqs_enabled = 1;
2324 if (!mark_held_locks(curr, HARDIRQ))
2331 if (curr->softirqs_enabled)
2332 if (!mark_held_locks(curr, SOFTIRQ))
2335 curr->hardirq_enable_ip = ip;
2336 curr->hardirq_enable_event = ++curr->irq_events;
2352 struct task_struct *curr = current;
2362 if (curr->hardirqs_enabled) {
2366 curr->hardirqs_enabled = 0;
2367 curr->hardirq_disable_ip = ip;
2368 curr->hardirq_disable_event = ++curr->irq_events;
2386 struct task_struct *curr = current;
2394 if (curr->softirqs_enabled) {
2402 curr->softirqs_enabled = 1;
2403 curr->softirq_enable_ip = ip;
2404 curr->softirq_enable_event = ++curr->irq_events;
2411 if (curr->hardirqs_enabled)
2412 mark_held_locks(curr, SOFTIRQ);
2420 struct task_struct *curr = current;
2428 if (curr->softirqs_enabled) {
2432 curr->softirqs_enabled = 0;
2433 curr->softirq_disable_ip = ip;
2434 curr->softirq_disable_event = ++curr->irq_events;
2443 struct task_struct *curr = current;
2453 if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC))
2463 mark_held_locks(curr, RECLAIM_FS);
2483 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2491 if (curr->hardirq_context)
2492 if (!mark_lock(curr, hlock,
2495 if (curr->softirq_context)
2496 if (!mark_lock(curr, hlock,
2500 if (curr->hardirq_context)
2501 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2503 if (curr->softirq_context)
2504 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2510 if (!mark_lock(curr, hlock,
2513 if (curr->softirqs_enabled)
2514 if (!mark_lock(curr, hlock,
2518 if (!mark_lock(curr, hlock,
2521 if (curr->softirqs_enabled)
2522 if (!mark_lock(curr, hlock,
2534 if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) {
2536 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ))
2539 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS))
2547 static int separate_irq_context(struct task_struct *curr,
2550 unsigned int depth = curr->lockdep_depth;
2555 hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2556 curr->softirq_context;
2560 prev_hlock = curr->held_locks + depth-1;
2575 int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2582 static inline int mark_irqflags(struct task_struct *curr,
2588 static inline int separate_irq_context(struct task_struct *curr,
2603 static int mark_lock(struct task_struct *curr, struct held_lock *this,
2638 ret = mark_lock_irq(curr, this, new_bit);
2660 print_irqtrace_events(curr);
2716 struct task_struct *curr = current;
2768 depth = curr->lockdep_depth;
2775 hlock = curr->held_locks + depth - 1;
2786 hlock = curr->held_locks + depth;
2803 if (check == 2 && !mark_irqflags(curr, hlock))
2807 if (!mark_lock(curr, hlock, LOCK_USED))
2824 chain_key = curr->curr_chain_key;
2832 if (separate_irq_context(curr, hlock)) {
2838 if (!validate_chain(curr, lock, hlock, chain_head, chain_key))
2841 curr->curr_chain_key = chain_key;
2842 curr->lockdep_depth++;
2843 check_chain_key(curr);
2848 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
2856 if (unlikely(curr->lockdep_depth > max_lockdep_depth))
2857 max_lockdep_depth = curr->lockdep_depth;
2863 print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
2875 curr->comm, task_pid_nr(curr));
2881 lockdep_print_held_locks(curr);
2892 static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2900 if (curr->lockdep_depth <= 0)
2901 return print_unlock_inbalance_bug(curr, lock, ip);
2935 struct task_struct *curr = current;
2941 depth = curr->lockdep_depth;
2947 hlock = curr->held_locks + i;
2957 return print_unlock_inbalance_bug(curr, lock, ip);
2964 curr->lockdep_depth = i;
2965 curr->curr_chain_key = hlock->prev_chain_key;
2968 hlock = curr->held_locks + i;
2977 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth))
2989 lock_release_non_nested(struct task_struct *curr,
3000 depth = curr->lockdep_depth;
3006 hlock = curr->held_locks + i;
3016 return print_unlock_inbalance_bug(curr, lock, ip);
3040 curr->lockdep_depth = i;
3041 curr->curr_chain_key = hlock->prev_chain_key;
3044 hlock = curr->held_locks + i;
3053 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1))
3064 static int lock_release_nested(struct task_struct *curr,
3073 depth = curr->lockdep_depth - 1;
3074 hlock = curr->held_locks + depth;
3080 return lock_release_non_nested(curr, lock, ip);
3081 curr->lockdep_depth--;
3086 curr->curr_chain_key = hlock->prev_chain_key;
3108 struct task_struct *curr = current;
3110 if (!check_unlock(curr, lock, ip))
3114 if (!lock_release_nested(curr, lock, ip))
3117 if (!lock_release_non_nested(curr, lock, ip))
3121 check_chain_key(curr);
3126 struct task_struct *curr = current;
3129 for (i = 0; i < curr->lockdep_depth; i++) {
3130 struct held_lock *hlock = curr->held_locks + i;
3270 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
3282 curr->comm, task_pid_nr(curr));
3288 lockdep_print_held_locks(curr);
3299 struct task_struct *curr = current;
3305 depth = curr->lockdep_depth;
3311 hlock = curr->held_locks + i;
3321 print_lock_contention_bug(curr, lock, ip);
3347 struct task_struct *curr = current;
3354 depth = curr->lockdep_depth;
3360 hlock = curr->held_locks + i;
3370 print_lock_contention_bug(curr, lock, _RET_IP_);
3626 print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
3638 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
3640 lockdep_print_held_locks(curr);
3660 struct task_struct *curr = current;
3669 for (i = 0; i < curr->lockdep_depth; i++) {
3670 hlock = curr->held_locks + i;
3676 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
3683 static void print_held_locks_bug(struct task_struct *curr)
3694 curr->comm, task_pid_nr(curr));
3695 lockdep_print_held_locks(curr);
3787 struct task_struct *curr = current;
3789 if (unlikely(curr->lockdep_depth)) {
3796 curr->comm, curr->pid);
3797 lockdep_print_held_locks(curr);
3803 struct task_struct *curr = current;
3817 lockdep_print_held_locks(curr);