Lines Matching refs:lock

17  * Debug aware fast / slowpath lock,trylock,unlock
22 static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
30 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, _RET_IP_);
31 ret = __rt_mutex_lock(&lock->rtmutex, state);
33 mutex_release(&lock->dep_map, _RET_IP_);
45 * rt_mutex_lock_nested - lock a rt_mutex
47 * @lock: the rt_mutex to be locked
50 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
52 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass);
56 void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock)
58 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, nest_lock, 0);
65 * rt_mutex_lock - lock a rt_mutex
67 * @lock: the rt_mutex to be locked
69 void __sched rt_mutex_lock(struct rt_mutex *lock)
71 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, 0);
77 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
79 * @lock: the rt_mutex to be locked
85 int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
87 return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, NULL, 0);
92 * rt_mutex_lock_killable - lock a rt_mutex killable
94 * @lock: the rt_mutex to be locked
100 int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
102 return __rt_mutex_lock_common(lock, TASK_KILLABLE, NULL, 0);
107 * rt_mutex_trylock - try to lock a rt_mutex
109 * @lock: the rt_mutex to be locked
118 int __sched rt_mutex_trylock(struct rt_mutex *lock)
125 ret = __rt_mutex_trylock(&lock->rtmutex);
127 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
136 * @lock: the rt_mutex to be unlocked
138 void __sched rt_mutex_unlock(struct rt_mutex *lock)
140 mutex_release(&lock->dep_map, _RET_IP_);
141 __rt_mutex_unlock(&lock->rtmutex);
148 int __sched rt_mutex_futex_trylock(struct rt_mutex_base *lock)
150 return rt_mutex_slowtrylock(lock);
153 int __sched __rt_mutex_futex_trylock(struct rt_mutex_base *lock)
155 return __rt_mutex_slowtrylock(lock);
162 * @lock: The rt_mutex to be unlocked
163 * @wqh: The wake queue head from which to get the next lock waiter
165 bool __sched __rt_mutex_futex_unlock(struct rt_mutex_base *lock,
168 lockdep_assert_held(&lock->wait_lock);
170 debug_rt_mutex_unlock(lock);
172 if (!rt_mutex_has_waiters(lock)) {
173 lock->owner = NULL;
183 mark_wakeup_next_waiter(wqh, lock);
188 void __sched rt_mutex_futex_unlock(struct rt_mutex_base *lock)
194 raw_spin_lock_irqsave(&lock->wait_lock, flags);
195 postunlock = __rt_mutex_futex_unlock(lock, &wqh);
196 raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
205 * @lock: The rt_mutex to be initialized
206 * @name: The lock name used for debugging
207 * @key: The lock class key used for debugging
213 void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name,
216 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
217 __rt_mutex_base_init(&lock->rtmutex);
218 lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
223 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
226 * @lock: the rt_mutex to be locked
236 void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
241 __rt_mutex_base_init(lock);
245 * some of the futex functions invoke spin_unlock(&hb->lock) with
248 * the spinlock is based, which makes lockdep notice a lock
251 lockdep_set_class(&lock->wait_lock, &pi_futex_key);
252 rt_mutex_set_owner(lock, proxy_owner);
256 * rt_mutex_proxy_unlock - release a lock on behalf of owner
258 * @lock: the rt_mutex to be locked
267 void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
269 debug_rt_mutex_proxy_unlock(lock);
270 rt_mutex_clear_owner(lock);
274 * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
275 * @lock: the rt_mutex to take
286 * 0 - task blocked on lock
287 * 1 - acquired the lock for task, caller should wake it up
292 int __sched __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
298 lockdep_assert_held(&lock->wait_lock);
300 if (try_to_take_rt_mutex(lock, task, NULL))
304 ret = task_blocks_on_rt_mutex(lock, waiter, task, NULL,
307 if (ret && !rt_mutex_owner(lock)) {
311 * released the lock while we were walking the
321 * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
322 * @lock: the rt_mutex to take
333 * 0 - task blocked on lock
334 * 1 - acquired the lock for task, caller should wake it up
339 int __sched rt_mutex_start_proxy_lock(struct rt_mutex_base *lock,
345 raw_spin_lock_irq(&lock->wait_lock);
346 ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
348 remove_waiter(lock, waiter);
349 raw_spin_unlock_irq(&lock->wait_lock);
355 * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
356 * @lock: the rt_mutex we were woken on
361 * Wait for the lock acquisition started on our behalf by
371 int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
377 raw_spin_lock_irq(&lock->wait_lock);
380 ret = rt_mutex_slowlock_block(lock, NULL, TASK_INTERRUPTIBLE, to, waiter);
385 fixup_rt_mutex_waiters(lock, true);
386 raw_spin_unlock_irq(&lock->wait_lock);
392 * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
393 * @lock: the rt_mutex we were woken on
399 * Unless we acquired the lock; we're still enqueued on the wait-list and can
406 * false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
411 bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
416 raw_spin_lock_irq(&lock->wait_lock);
418 * Do an unconditional try-lock, this deals with the lock stealing
424 * we will own the lock and it will have removed the waiter. If we
428 try_to_take_rt_mutex(lock, current, waiter);
433 if (rt_mutex_owner(lock) != current) {
434 remove_waiter(lock, waiter);
441 fixup_rt_mutex_waiters(lock, false);
443 raw_spin_unlock_irq(&lock->wait_lock);
466 next_lock = waiter->lock;
502 static __always_inline int __mutex_lock_common(struct mutex *lock,
511 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
512 ret = __rt_mutex_lock(&lock->rtmutex, state);
514 mutex_release(&lock->dep_map, ip);
516 lock_acquired(&lock->dep_map, ip);
521 void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass)
523 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
527 void __sched _mutex_lock_nest_lock(struct mutex *lock,
530 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest_lock, _RET_IP_);
534 int __sched mutex_lock_interruptible_nested(struct mutex *lock,
537 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, NULL, _RET_IP_);
541 int __sched mutex_lock_killable_nested(struct mutex *lock,
544 return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
548 void __sched mutex_lock_io_nested(struct mutex *lock, unsigned int subclass)
555 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
562 void __sched mutex_lock(struct mutex *lock)
564 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
568 int __sched mutex_lock_interruptible(struct mutex *lock)
570 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
574 int __sched mutex_lock_killable(struct mutex *lock)
576 return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
580 void __sched mutex_lock_io(struct mutex *lock)
584 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
590 int __sched mutex_trylock(struct mutex *lock)
597 ret = __rt_mutex_trylock(&lock->rtmutex);
599 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
605 void __sched mutex_unlock(struct mutex *lock)
607 mutex_release(&lock->dep_map, _RET_IP_);
608 __rt_mutex_unlock(&lock->rtmutex);