/linux-master/arch/powerpc/include/asm/ |
H A D | simple_spinlock.h | 6 * Simple spin lock operations. 35 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock) argument 37 return lock.slock == 0; 40 static inline int arch_spin_is_locked(arch_spinlock_t *lock) argument 42 return !arch_spin_value_unlocked(READ_ONCE(*lock)); 46 * This returns the old value in the lock, so we succeeded 47 * in getting the lock if the return value is 0. 49 static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) argument 64 : "r" (token), "r" (&lock->slock), [eh] "n" (eh) 70 static inline int arch_spin_trylock(arch_spinlock_t *lock) argument 94 splpar_spin_yield(arch_spinlock_t *lock) argument 95 splpar_rw_yield(arch_rwlock_t *lock) argument 98 spin_yield(arch_spinlock_t *lock) argument 106 rw_yield(arch_rwlock_t *lock) argument 114 arch_spin_lock(arch_spinlock_t *lock) argument 128 arch_spin_unlock(arch_spinlock_t *lock) argument [all...] |
H A D | qspinlock.h | 11 * Use the EH=1 hint for accesses that result in the lock being acquired. 12 * The hardware is supposed to optimise this pattern by holding the lock 31 * Put a speculation barrier after testing the lock/node and finding it 42 * Execute a miso instruction after passing the MCS lock ownership to the 55 * This executes miso after an unlock of the lock word, having ownership 67 * the lock field. 71 static __always_inline int queued_spin_is_locked(struct qspinlock *lock) argument 73 return READ_ONCE(lock->val); 76 static __always_inline int queued_spin_value_unlocked(struct qspinlock lock) argument 78 return !lock 81 queued_spin_is_contended(struct qspinlock *lock) argument 92 __queued_spin_trylock_nosteal(struct qspinlock *lock) argument 114 __queued_spin_trylock_steal(struct qspinlock *lock) argument 138 queued_spin_trylock(struct qspinlock *lock) argument 148 queued_spin_lock(struct qspinlock *lock) argument 154 queued_spin_unlock(struct qspinlock *lock) argument [all...] |
/linux-master/arch/sh/include/asm/ |
H A D | spinlock-cas.h | 26 #define arch_spin_is_locked(x) ((x)->lock <= 0) 28 static inline void arch_spin_lock(arch_spinlock_t *lock) argument 30 while (!__sl_cas(&lock->lock, 1, 0)); 33 static inline void arch_spin_unlock(arch_spinlock_t *lock) argument 35 __sl_cas(&lock->lock, 0, 1); 38 static inline int arch_spin_trylock(arch_spinlock_t *lock) argument 40 return __sl_cas(&lock->lock, [all...] |
H A D | spinlock_types.h | 10 volatile unsigned int lock; member in struct:__anon23 16 volatile unsigned int lock; member in struct:__anon24
|
H A D | spinlock-llsc.h | 18 #define arch_spin_is_locked(x) ((x)->lock <= 0) 21 * Simple spin lock operations. There are two variants, one clears IRQ's 26 static inline void arch_spin_lock(arch_spinlock_t *lock) argument 41 : "r" (&lock->lock) 46 static inline void arch_spin_unlock(arch_spinlock_t *lock) argument 56 : "r" (&lock->lock) 61 static inline int arch_spin_trylock(arch_spinlock_t *lock) argument 74 : "r" (&lock [all...] |
/linux-master/kernel/locking/ |
H A D | rtmutex_api.c | 17 * Debug aware fast / slowpath lock,trylock,unlock 22 static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock, argument 30 mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, _RET_IP_); 31 ret = __rt_mutex_lock(&lock->rtmutex, state); 33 mutex_release(&lock->dep_map, _RET_IP_); 45 * rt_mutex_lock_nested - lock a rt_mutex 47 * @lock: the rt_mutex to be locked 50 void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) argument 52 __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass); 56 void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struc argument 69 rt_mutex_lock(struct rt_mutex *lock) argument 85 rt_mutex_lock_interruptible(struct rt_mutex *lock) argument 100 rt_mutex_lock_killable(struct rt_mutex *lock) argument 118 rt_mutex_trylock(struct rt_mutex *lock) argument 138 rt_mutex_unlock(struct rt_mutex *lock) argument 148 rt_mutex_futex_trylock(struct rt_mutex_base *lock) argument 153 __rt_mutex_futex_trylock(struct rt_mutex_base *lock) argument 165 __rt_mutex_futex_unlock(struct rt_mutex_base *lock, struct rt_wake_q_head *wqh) argument 188 rt_mutex_futex_unlock(struct rt_mutex_base *lock) argument 213 __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key) argument 236 rt_mutex_init_proxy_locked(struct rt_mutex_base *lock, struct task_struct *proxy_owner) argument 267 rt_mutex_proxy_unlock(struct rt_mutex_base *lock) argument 292 __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter, struct task_struct *task) argument 339 rt_mutex_start_proxy_lock(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter, struct task_struct *task) argument 371 rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock, struct hrtimer_sleeper *to, struct rt_mutex_waiter *waiter) argument 411 rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) argument 502 __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass, struct lockdep_map *nest_lock, unsigned long ip) argument 521 mutex_lock_nested(struct mutex *lock, unsigned int subclass) argument 527 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock) argument 534 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) argument 541 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) argument 548 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) argument 562 mutex_lock(struct mutex *lock) argument 568 mutex_lock_interruptible(struct mutex *lock) argument 574 mutex_lock_killable(struct mutex *lock) argument 580 mutex_lock_io(struct mutex *lock) argument 590 mutex_trylock(struct mutex *lock) argument 605 mutex_unlock(struct mutex *lock) argument [all...] |
H A D | mutex.h | 24 extern void debug_mutex_lock_common(struct mutex *lock, 26 extern void debug_mutex_wake_waiter(struct mutex *lock, 29 extern void debug_mutex_add_waiter(struct mutex *lock, 32 extern void debug_mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, 34 extern void debug_mutex_unlock(struct mutex *lock); 35 extern void debug_mutex_init(struct mutex *lock, const char *name, 38 # define debug_mutex_lock_common(lock, waiter) do { } while (0) 39 # define debug_mutex_wake_waiter(lock, waiter) do { } while (0) 41 # define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0) 42 # define debug_mutex_remove_waiter(lock, waite [all...] |
H A D | mutex.c | 34 #include <trace/events/lock.h> 46 __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) argument 48 atomic_long_set(&lock->owner, 0); 49 raw_spin_lock_init(&lock->wait_lock); 50 INIT_LIST_HEAD(&lock->wait_list); 52 osq_lock_init(&lock->osq); 55 debug_mutex_init(lock, name, key); 60 * @owner: contains: 'struct task_struct *' to the current lock owner, 65 * Bit1 indicates unlock needs to hand the lock to the top-waiter 79 static inline struct task_struct *__mutex_owner(struct mutex *lock) argument 89 mutex_is_locked(struct mutex *lock) argument 103 __mutex_trylock_common(struct mutex *lock, bool handoff) argument 142 __mutex_trylock_or_handoff(struct mutex *lock, bool handoff) argument 150 __mutex_trylock(struct mutex *lock) argument 166 __mutex_trylock_fast(struct mutex *lock) argument 177 __mutex_unlock_fast(struct mutex *lock) argument 185 __mutex_set_flag(struct mutex *lock, unsigned long flag) argument 190 __mutex_clear_flag(struct mutex *lock, unsigned long flag) argument 195 __mutex_waiter_is_first(struct mutex *lock, struct mutex_waiter *waiter) argument 205 __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, struct list_head *list) argument 216 __mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter) argument 231 __mutex_handoff(struct mutex *lock, struct task_struct *task) argument 281 mutex_lock(struct mutex *lock) argument 298 __mutex_trylock_or_owner(struct mutex *lock) argument 304 ww_mutex_spin_on_owner(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) argument 352 mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) argument 392 mutex_can_spin_on_owner(struct mutex *lock) argument 441 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) argument 517 mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, struct mutex_waiter *waiter) argument 542 mutex_unlock(struct mutex *lock) argument 563 ww_mutex_unlock(struct ww_mutex *lock) argument 574 __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclass, struct lockdep_map *nest_lock, unsigned long ip, struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx) argument 749 __mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, struct lockdep_map *nest_lock, unsigned long ip) argument 756 __ww_mutex_lock(struct mutex *lock, unsigned int state, unsigned int subclass, unsigned long ip, struct ww_acquire_ctx *ww_ctx) argument 802 mutex_lock_nested(struct mutex *lock, unsigned int subclass) argument 810 _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) argument 817 mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass) argument 824 mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) argument 831 mutex_lock_io_nested(struct mutex *lock, unsigned int subclass) argument 845 ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) argument 871 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) argument 886 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) argument 906 __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip) argument 982 mutex_lock_interruptible(struct mutex *lock) argument 1006 mutex_lock_killable(struct mutex *lock) argument 1027 mutex_lock_io(struct mutex *lock) argument 1038 __mutex_lock_slowpath(struct mutex *lock) argument 1044 __mutex_lock_killable_slowpath(struct mutex *lock) argument 1050 __mutex_lock_interruptible_slowpath(struct mutex *lock) argument 1056 __ww_mutex_lock_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) argument 1063 __ww_mutex_lock_interruptible_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) argument 1086 mutex_trylock(struct mutex *lock) argument 1102 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) argument 1117 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) argument 1144 atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) argument [all...] |
H A D | mutex-debug.c | 8 * lock debugging, locking tree, deadlock detection started by: 26 * Must be called with lock->wait_lock held. 28 void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter) argument 36 void debug_mutex_wake_waiter(struct mutex *lock, struct mutex_waiter *waiter) argument 38 lockdep_assert_held(&lock->wait_lock); 39 DEBUG_LOCKS_WARN_ON(list_empty(&lock->wait_list)); 50 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, argument 53 lockdep_assert_held(&lock->wait_lock); 55 /* Mark the current thread as blocked on the lock: */ 59 void debug_mutex_remove_waiter(struct mutex *lock, struc argument 71 debug_mutex_unlock(struct mutex *lock) argument 79 debug_mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) argument 100 mutex_destroy(struct mutex *lock) argument [all...] |
H A D | ww_mutex.h | 9 __ww_waiter_first(struct mutex *lock) argument 13 w = list_first_entry(&lock->wait_list, struct mutex_waiter, list); 14 if (list_entry_is_head(w, &lock->wait_list, list)) 21 __ww_waiter_next(struct mutex *lock, struct mutex_waiter *w) argument 24 if (list_entry_is_head(w, &lock->wait_list, list)) 31 __ww_waiter_prev(struct mutex *lock, struct mutex_waiter *w) argument 34 if (list_entry_is_head(w, &lock->wait_list, list)) 41 __ww_waiter_last(struct mutex *lock) argument 45 w = list_last_entry(&lock->wait_list, struct mutex_waiter, list); 46 if (list_entry_is_head(w, &lock 53 __ww_waiter_add(struct mutex *lock, struct mutex_waiter *waiter, struct mutex_waiter *pos) argument 62 __ww_mutex_owner(struct mutex *lock) argument 68 __ww_mutex_has_waiters(struct mutex *lock) argument 73 lock_wait_lock(struct mutex *lock) argument 78 unlock_wait_lock(struct mutex *lock) argument 83 lockdep_assert_wait_lock_held(struct mutex *lock) argument 94 __ww_waiter_first(struct rt_mutex *lock) argument 103 __ww_waiter_next(struct rt_mutex *lock, struct rt_mutex_waiter *w) argument 112 __ww_waiter_prev(struct rt_mutex *lock, struct rt_mutex_waiter *w) argument 121 __ww_waiter_last(struct rt_mutex *lock) argument 130 __ww_waiter_add(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct rt_mutex_waiter *pos) argument 136 __ww_mutex_owner(struct rt_mutex *lock) argument 142 __ww_mutex_has_waiters(struct rt_mutex *lock) argument 147 lock_wait_lock(struct rt_mutex *lock) argument 152 unlock_wait_lock(struct rt_mutex *lock) argument 157 lockdep_assert_wait_lock_held(struct rt_mutex *lock) argument 277 __ww_mutex_die(struct MUTEX *lock, struct MUTEX_WAITER *waiter, struct ww_acquire_ctx *ww_ctx) argument 300 __ww_mutex_wound(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx, struct ww_acquire_ctx *hold_ctx) argument 355 __ww_mutex_check_waiters(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx) argument 378 ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) argument 413 __ww_mutex_kill(struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx) argument 441 __ww_mutex_check_kill(struct MUTEX *lock, struct MUTEX_WAITER *waiter, struct ww_acquire_ctx *ctx) argument 489 __ww_mutex_add_waiter(struct MUTEX_WAITER *waiter, struct MUTEX *lock, struct ww_acquire_ctx *ww_ctx) argument 559 __ww_mutex_unlock(struct ww_mutex *lock) argument [all...] |
/linux-master/lib/ |
H A D | dec_and_lock.c | 13 * spin_lock(&lock); 18 * because the spin-lock and the decrement must be 21 int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) argument 28 spin_lock(lock); 31 spin_unlock(lock); 37 int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, argument 45 spin_lock_irqsave(lock, *flags); 48 spin_unlock_irqrestore(lock, *flags); 53 int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock) argument 60 raw_spin_lock(lock); 68 _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock, unsigned long *flags) argument [all...] |
/linux-master/tools/testing/radix-tree/linux/ |
H A D | local_lock.h | 5 static inline void local_lock(local_lock_t *lock) { } argument 6 static inline void local_unlock(local_lock_t *lock) { } argument
|
/linux-master/arch/s390/include/asm/ |
H A D | spinlock_types.h | 10 int lock; member in struct:__anon21 13 #define __ARCH_SPIN_LOCK_UNLOCKED { .lock = 0, }
|
/linux-master/fs/bcachefs/ |
H A D | six.h | 14 * write lock without deadlocking, so an operation that updates multiple nodes 23 * six_lock_read(&foo->lock); 24 * six_unlock_read(&foo->lock); 26 * An intent lock must be held before taking a write lock: 27 * six_lock_intent(&foo->lock); 28 * six_lock_write(&foo->lock); 29 * six_unlock_write(&foo->lock); 30 * six_unlock_intent(&foo->lock); 40 * There are also interfaces that take the lock typ 191 six_lock_seq(const struct six_lock *lock) argument 205 six_trylock_type(struct six_lock *lock, enum six_lock_type type) argument 229 six_lock_waiter(struct six_lock *lock, enum six_lock_type type, struct six_lock_waiter *wait, six_lock_should_sleep_fn should_sleep_fn, void *p) argument 247 six_lock_ip(struct six_lock *lock, enum six_lock_type type, six_lock_should_sleep_fn should_sleep_fn, void *p, unsigned long ip) argument 266 six_lock_type(struct six_lock *lock, enum six_lock_type type, six_lock_should_sleep_fn should_sleep_fn, void *p) argument 286 six_relock_type(struct six_lock *lock, enum six_lock_type type, unsigned seq) argument 308 six_unlock_type(struct six_lock *lock, enum six_lock_type type) argument [all...] |
H A D | six.c | 14 #include <trace/events/lock.h> 27 static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type); 38 /* Value we add to the lock in order to take the lock: */ 41 /* If the lock has this value (used as a mask), taking the lock fails: */ 44 /* Mask that indicates lock is held for this type: */ 47 /* Waitlist we wakeup when releasing the lock: */ 72 static inline void six_set_bitmask(struct six_lock *lock, u32 mask) argument 74 if ((atomic_read(&lock 78 six_clear_bitmask(struct six_lock *lock, u32 mask) argument 84 six_set_owner(struct six_lock *lock, enum six_lock_type type, u32 old, struct task_struct *owner) argument 98 pcpu_read_count(struct six_lock *lock) argument 117 __do_six_trylock(struct six_lock *lock, enum six_lock_type type, struct task_struct *task, bool try) argument 206 __six_lock_wakeup(struct six_lock *lock, enum six_lock_type lock_type) argument 259 six_lock_wakeup(struct six_lock *lock, u32 state, enum six_lock_type lock_type) argument 272 do_six_trylock(struct six_lock *lock, enum six_lock_type type, bool try) argument 291 six_trylock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long ip) argument 312 six_relock_ip(struct six_lock *lock, enum six_lock_type type, unsigned seq, unsigned long ip) argument 329 six_owner_running(struct six_lock *lock) argument 344 six_optimistic_spin(struct six_lock *lock, struct six_lock_waiter *wait, enum six_lock_type type) argument 394 six_optimistic_spin(struct six_lock *lock, struct six_lock_waiter *wait, enum six_lock_type type) argument 404 six_lock_slowpath(struct six_lock *lock, enum six_lock_type type, struct six_lock_waiter *wait, six_lock_should_sleep_fn should_sleep_fn, void *p, unsigned long ip) argument 537 six_lock_ip_waiter(struct six_lock *lock, enum six_lock_type type, struct six_lock_waiter *wait, six_lock_should_sleep_fn should_sleep_fn, void *p, unsigned long ip) argument 562 do_six_unlock_type(struct six_lock *lock, enum six_lock_type type) argument 603 six_unlock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long ip) argument 632 six_lock_downgrade(struct six_lock *lock) argument 648 six_lock_tryupgrade(struct six_lock *lock) argument 686 six_trylock_convert(struct six_lock *lock, enum six_lock_type from, enum six_lock_type to) argument 715 six_lock_increment(struct six_lock *lock, enum six_lock_type type) argument 753 six_lock_wakeup_all(struct six_lock *lock) argument 775 six_lock_counts(struct six_lock *lock) argument 810 six_lock_readers_add(struct six_lock *lock, int nr) argument 829 six_lock_exit(struct six_lock *lock) argument 839 __six_lock_init(struct six_lock *lock, const char *name, struct lock_class_key *key, enum six_lock_init_flags flags) argument [all...] |
/linux-master/arch/sparc/include/asm/ |
H A D | spinlock_32.h | 16 #define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) 18 static inline void arch_spin_lock(arch_spinlock_t *lock) argument 34 : "r" (lock) 38 static inline int arch_spin_trylock(arch_spinlock_t *lock) argument 43 : "r" (lock) 48 static inline void arch_spin_unlock(arch_spinlock_t *lock) argument 50 __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); 59 * irq-safe write-lock, but readers can get non-irqsafe 74 * but counter is non-zero, he has to release the lock an 133 arch_write_unlock(arch_rwlock_t *lock) argument [all...] |
/linux-master/include/asm-generic/ |
H A D | qrwlock.h | 3 * Queue read/write lock 28 #define _QW_LOCKED 0x0ff /* A writer holds the lock */ 36 extern void queued_read_lock_slowpath(struct qrwlock *lock); 37 extern void queued_write_lock_slowpath(struct qrwlock *lock); 40 * queued_read_trylock - try to acquire read lock of a queued rwlock 41 * @lock : Pointer to queued rwlock structure 42 * Return: 1 if lock acquired, 0 if failed 44 static inline int queued_read_trylock(struct qrwlock *lock) argument 48 cnts = atomic_read(&lock->cnts); 50 cnts = (u32)atomic_add_return_acquire(_QR_BIAS, &lock 63 queued_write_trylock(struct qrwlock *lock) argument 78 queued_read_lock(struct qrwlock *lock) argument 94 queued_write_lock(struct qrwlock *lock) argument 108 queued_read_unlock(struct qrwlock *lock) argument 120 queued_write_unlock(struct qrwlock *lock) argument 130 queued_rwlock_is_contended(struct qrwlock *lock) argument [all...] |
H A D | qspinlock.h | 7 * ticket-lock.h and only come looking here when you've considered all the 48 * @lock: Pointer to queued spinlock structure 51 static __always_inline int queued_spin_is_locked(struct qspinlock *lock) argument 57 return atomic_read(&lock->val); 63 * @lock: queued spinlock structure 66 * N.B. Whenever there are tasks waiting for the lock, it is considered 67 * locked wrt the lockref code to avoid lock stealing by the lockref 68 * code and change things underneath the lock. This also allows some 71 static __always_inline int queued_spin_value_unlocked(struct qspinlock lock) argument 73 return !lock 81 queued_spin_is_contended(struct qspinlock *lock) argument 90 queued_spin_trylock(struct qspinlock *lock) argument 107 queued_spin_lock(struct qspinlock *lock) argument 123 queued_spin_unlock(struct qspinlock *lock) argument 133 virt_spin_lock(struct qspinlock *lock) argument [all...] |
H A D | spinlock.h | 4 * 'Generic' ticket-lock implementation. 8 * to a test-and-set lock. 33 static __always_inline void arch_spin_lock(arch_spinlock_t *lock) argument 35 u32 val = atomic_fetch_add(1<<16, lock); 49 atomic_cond_read_acquire(lock, ticket == (u16)VAL); 53 static __always_inline bool arch_spin_trylock(arch_spinlock_t *lock) argument 55 u32 old = atomic_read(lock); 60 return atomic_try_cmpxchg(lock, &old, old + (1<<16)); /* SC, for RCsc */ 63 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) argument 65 u16 *ptr = (u16 *)lock 71 arch_spin_value_unlocked(arch_spinlock_t lock) argument 78 arch_spin_is_locked(arch_spinlock_t *lock) argument 85 arch_spin_is_contended(arch_spinlock_t *lock) argument [all...] |
/linux-master/tools/include/linux/ |
H A D | rwsem.h | 8 pthread_rwlock_t lock; member in struct:rw_semaphore 13 return pthread_rwlock_init(&sem->lock, NULL); 18 return pthread_rwlock_destroy(&sem->lock); 23 return pthread_rwlock_rdlock(&sem->lock); 28 return pthread_rwlock_unlock(&sem->lock); 33 return pthread_rwlock_wrlock(&sem->lock); 38 return pthread_rwlock_unlock(&sem->lock);
|
/linux-master/arch/arm/include/asm/ |
H A D | spinlock.h | 51 * A memory barrier is required after we get a lock, and before we 56 static inline void arch_spin_lock(arch_spinlock_t *lock) argument 62 prefetchw(&lock->slock); 70 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) 75 lockval.tickets.owner = READ_ONCE(lock->tickets.owner); 81 static inline int arch_spin_trylock(arch_spinlock_t *lock) argument 86 prefetchw(&lock->slock); 95 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) 107 static inline void arch_spin_unlock(arch_spinlock_t *lock) argument 110 lock 114 arch_spin_value_unlocked(arch_spinlock_t lock) argument 119 arch_spin_is_locked(arch_spinlock_t *lock) argument 124 arch_spin_is_contended(arch_spinlock_t *lock) argument [all...] |
/linux-master/include/trace/events/ |
H A D | lock.h | 3 #define TRACE_SYSTEM lock 11 /* flags for lock:contention_begin */ 26 TP_PROTO(struct lockdep_map *lock, unsigned int subclass, 30 TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip), 34 __string(name, lock->name) 40 __assign_str(name, lock->name); 41 __entry->lockdep_addr = lock; 50 DECLARE_EVENT_CLASS(lock, 52 TP_PROTO(struct lockdep_map *lock, unsigned long ip), 54 TP_ARGS(lock, i [all...] |
/linux-master/arch/x86/include/asm/ |
H A D | qspinlock.h | 14 static __always_inline u32 queued_fetch_set_pending_acquire(struct qspinlock *lock) argument 23 val = GEN_BINARY_RMWcc(LOCK_PREFIX "btsl", lock->val.counter, c, 25 val |= atomic_read(&lock->val) & ~_Q_PENDING_MASK; 31 extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); 33 extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val); 34 extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock); 40 * @lock : Pointer to queued spinlock structure 44 static inline void native_queued_spin_unlock(struct qspinlock *lock) argument 46 smp_store_release(&lock->locked, 0); 49 static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u3 argument 54 queued_spin_unlock(struct qspinlock *lock) argument 86 virt_spin_lock(struct qspinlock *lock) argument [all...] |
/linux-master/include/linux/ |
H A D | local_lock_internal.h | 54 #define __local_lock_init(lock) \ 58 debug_check_no_locks_freed((void *)lock, sizeof(*lock));\ 59 lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \ 62 local_lock_debug_init(lock); \ 65 #define __local_lock(lock) \ 68 local_lock_acquire(this_cpu_ptr(lock)); \ 71 #define __local_lock_irq(lock) \ 74 local_lock_acquire(this_cpu_ptr(lock)); \ [all...] |
H A D | mutex.h | 40 extern void mutex_destroy(struct mutex *lock); 46 static inline void mutex_destroy(struct mutex *lock) {} argument 76 extern void __mutex_init(struct mutex *lock, const char *name, 81 * @lock: the mutex to be queried 85 extern bool mutex_is_locked(struct mutex *lock); 101 extern void __mutex_rt_init(struct mutex *lock, const char *name, 125 extern void mutex_lock_nested(struct mutex *lock, unsigned int subclass); 126 extern void _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); 128 extern int __must_check mutex_lock_interruptible_nested(struct mutex *lock, 130 extern int __must_check mutex_lock_killable_nested(struct mutex *lock, [all...] |