/linux-master/include/asm-generic/ |
H A D | qrwlock_types.h | 26 arch_spinlock_t wait_lock; member in struct:qrwlock 31 .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
|
H A D | qrwlock.h | 132 return arch_spin_is_locked(&lock->wait_lock);
|
/linux-master/kernel/locking/ |
H A D | rwbase_rt.c | 19 * 2) Take tmutex::wait_lock, which protects the writelocked flag 75 raw_spin_lock_irq(&rtm->wait_lock); 78 * Call into the slow lock path with the rtmutex->wait_lock 87 * unlock(m->wait_lock) 90 * lock(m->wait_lock) 92 * unlock(m->wait_lock) 120 * rtmutex->wait_lock has to be unlocked in any case of course. 124 raw_spin_unlock_irq(&rtm->wait_lock); 151 raw_spin_lock_irq(&rtm->wait_lock); 155 * clean up rwb->readers it needs to acquire rtm->wait_lock [all...] |
H A D | qrwlock.c | 43 arch_spin_lock(&lock->wait_lock); 56 arch_spin_unlock(&lock->wait_lock); 73 arch_spin_lock(&lock->wait_lock); 88 arch_spin_unlock(&lock->wait_lock);
|
H A D | rtmutex.c | 82 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock, 107 * lock->wait_lock is held but explicit acquire semantics are needed 115 /* lock->wait_lock is held so the unlock provides release semantics. */ 234 * Callers must hold the ->wait_lock -- which is the whole purpose as we force 259 * 2) Drop lock->wait_lock 264 __releases(lock->wait_lock) 269 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 274 * unlock(wait_lock); 275 * lock(wait_lock); 281 * unlock(wait_lock); [all...] |
H A D | rtmutex_api.c | 168 lockdep_assert_held(&lock->wait_lock); 179 * retain preempt_disabled when we drop the wait_lock, to 194 raw_spin_lock_irqsave(&lock->wait_lock, flags); 196 raw_spin_unlock_irqrestore(&lock->wait_lock, flags); 246 * the wait_lock of the rtmutex associated to the pi_futex held. 247 * spin_unlock() in turn takes wait_lock of the rtmutex on which 249 * recursion. Give the futex/rtmutex wait_lock a separate key. 251 lockdep_set_class(&lock->wait_lock, &pi_futex_key); 298 lockdep_assert_held(&lock->wait_lock); 345 raw_spin_lock_irq(&lock->wait_lock); [all...] |
H A D | rwsem.c | 113 * For all the above cases, wait_lock will be held. A writer must also 324 raw_spin_lock_init(&sem->wait_lock); 372 lockdep_assert_held(&sem->wait_lock); 388 lockdep_assert_held(&sem->wait_lock); 402 * - the wait_lock must be held by the caller 405 * preferably when the wait_lock is released 419 lockdep_assert_held(&sem->wait_lock); 573 * writer slowpaths with wait_lock held. It releases the wait_lock and 579 __releases(&sem->wait_lock) [all...] |
H A D | mutex-debug.c | 26 * Must be called with lock->wait_lock held. 38 lockdep_assert_held(&lock->wait_lock); 53 lockdep_assert_held(&lock->wait_lock);
|
H A D | ww_mutex.h | 75 raw_spin_lock(&lock->wait_lock); 80 raw_spin_unlock(&lock->wait_lock); 85 lockdep_assert_held(&lock->wait_lock); 149 raw_spin_lock(&lock->rtmutex.wait_lock); 154 raw_spin_unlock(&lock->rtmutex.wait_lock); 159 lockdep_assert_held(&lock->rtmutex.wait_lock); 319 * wait_lock. 374 * After acquiring lock with fastpath, where we do not hold wait_lock, set ctx 386 * and keep spinning, or it will acquire wait_lock, add itself
|
H A D | mutex.c | 49 raw_spin_lock_init(&lock->wait_lock); 313 * by acquiring wait_lock there is a guarantee that 622 raw_spin_lock(&lock->wait_lock); 624 * After waiting to acquire the wait_lock, try again. 659 * Once we hold wait_lock, we're serialized against 669 * wait_lock. This ensures the lock cancellation is ordered 683 raw_spin_unlock(&lock->wait_lock); 704 raw_spin_lock(&lock->wait_lock); 706 raw_spin_lock(&lock->wait_lock); 732 raw_spin_unlock(&lock->wait_lock); [all...] |
H A D | rtmutex_common.h | 49 * @tree is ordered by @lock->wait_lock 133 lockdep_assert_held(&lock->wait_lock); 181 raw_spin_lock_init(&lock->wait_lock);
|
/linux-master/drivers/gpu/drm/omapdrm/ |
H A D | omap_irq.c | 18 /* call with wait_lock and dispc runtime held */ 25 assert_spin_locked(&priv->wait_lock); 52 spin_lock_irqsave(&priv->wait_lock, flags); 55 spin_unlock_irqrestore(&priv->wait_lock, flags); 69 spin_lock_irqsave(&priv->wait_lock, flags); 72 spin_unlock_irqrestore(&priv->wait_lock, flags); 90 spin_lock_irqsave(&priv->wait_lock, flags); 96 spin_unlock_irqrestore(&priv->wait_lock, flags); 122 spin_lock_irqsave(&priv->wait_lock, flags); 126 spin_unlock_irqrestore(&priv->wait_lock, flag [all...] |
H A D | omap_drv.h | 94 spinlock_t wait_lock; /* protects the wait_list */ member in struct:omap_drm_private
|
/linux-master/drivers/gpu/drm/tidss/ |
H A D | tidss_irq.c | 18 /* call with wait_lock and dispc runtime held */ 21 assert_spin_locked(&tidss->wait_lock); 34 spin_lock_irqsave(&tidss->wait_lock, flags); 38 spin_unlock_irqrestore(&tidss->wait_lock, flags); 49 spin_lock_irqsave(&tidss->wait_lock, flags); 53 spin_unlock_irqrestore(&tidss->wait_lock, flags); 91 spin_lock_irqsave(&tidss->wait_lock, flags); 93 spin_unlock_irqrestore(&tidss->wait_lock, flags);
|
H A D | tidss_drv.h | 32 spinlock_t wait_lock; /* protects the irq masks */ member in struct:tidss_device
|
/linux-master/drivers/tty/ |
H A D | tty_ldsem.c | 69 raw_spin_lock_init(&sem->wait_lock); 147 raw_spin_lock_irqsave(&sem->wait_lock, flags); 149 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 162 raw_spin_lock_irq(&sem->wait_lock); 175 raw_spin_unlock_irq(&sem->wait_lock); 190 raw_spin_unlock_irq(&sem->wait_lock); 211 raw_spin_lock_irq(&sem->wait_lock); 216 raw_spin_unlock_irq(&sem->wait_lock); 220 raw_spin_unlock_irq(&sem->wait_lock); 237 raw_spin_lock_irq(&sem->wait_lock); [all...] |
/linux-master/include/linux/ |
H A D | rtmutex.h | 24 raw_spinlock_t wait_lock; member in struct:rt_mutex_base 31 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(rtbasename.wait_lock), \ 52 * @wait_lock: spinlock to protect the structure
|
H A D | mutex_types.h | 43 raw_spinlock_t wait_lock; member in struct:mutex
|
H A D | mutex.h | 68 , .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
|
H A D | rwsem.h | 59 raw_spinlock_t wait_lock; member in struct:rw_semaphore 106 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),\
|
H A D | tty_ldisc.h | 19 raw_spinlock_t wait_lock; member in struct:ld_semaphore
|
/linux-master/kernel/futex/ |
H A D | pi.c | 51 lockdep_assert_held(&pi_state->pi_mutex.wait_lock); 93 raw_spin_lock_irqsave(&pi_state->pi_mutex.wait_lock, flags); 96 raw_spin_unlock_irqrestore(&pi_state->pi_mutex.wait_lock, flags); 174 * pi_mutex->wait_lock: 193 * pi_mutex->wait_lock 232 * Now that we have a pi_state, we can acquire wait_lock 235 raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); 238 * Since {uval, pi_state} is serialized by wait_lock, and our current 300 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); 317 raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); [all...] |
/linux-master/fs/bcachefs/ |
H A D | six.c | 150 * Since we may be called under wait_lock (and by the wakeup code 215 raw_spin_lock(&lock->wait_lock); 250 raw_spin_unlock(&lock->wait_lock); 424 raw_spin_lock(&lock->wait_lock); 445 raw_spin_unlock(&lock->wait_lock); 482 raw_spin_lock(&lock->wait_lock); 486 raw_spin_unlock(&lock->wait_lock); 762 raw_spin_lock(&lock->wait_lock); 765 raw_spin_unlock(&lock->wait_lock); 843 raw_spin_lock_init(&lock->wait_lock); [all...] |
H A D | six.h | 142 raw_spinlock_t wait_lock; member in struct:six_lock
|
/linux-master/lib/ |
H A D | test_lockup.c | 490 offsetof(spinlock_t, lock.wait_lock.magic), 493 offsetof(rwlock_t, rwbase.rtmutex.wait_lock.magic), 496 offsetof(struct mutex, rtmutex.wait_lock.magic), 499 offsetof(struct rw_semaphore, rwbase.rtmutex.wait_lock.magic), 510 offsetof(struct mutex, wait_lock.magic), 513 offsetof(struct rw_semaphore, wait_lock.magic),
|