/linux-master/fs/ocfs2/dlm/ |
H A D | dlmast.c | 35 struct dlm_lock *lock); 36 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 39 * lock level will obsolete a pending bast. 40 * For example, if dlm_thread queued a bast for an EX lock that 42 * lock owner downconverted to NL, the bast is now obsolete. 44 * This is needed because the lock and convert paths can queue 47 static int dlm_should_cancel_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) argument 50 assert_spin_locked(&lock->spinlock); 52 if (lock->ml.highest_blocked == LKM_IVMODE) 54 BUG_ON(lock 74 __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) argument 129 dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock) argument 140 __dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock) argument 166 dlm_update_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock) argument 196 dlm_do_local_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock) argument 214 dlm_do_remote_ast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock) argument 238 dlm_do_local_bast(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int blocked_type) argument 263 struct dlm_lock *lock = NULL; local 421 dlm_send_proxy_ast_msg(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int msg_type, int blocked_type, int flags) argument [all...] |
H A D | dlmconvert.c | 5 * underlying calls for lock conversion 38 * only one that holds a lock on exit (res->spinlock). 43 struct dlm_lock *lock, int flags, 48 struct dlm_lock *lock, int flags, int type); 61 struct dlm_lock *lock, int flags, int type) 72 status = __dlmconvert_master(dlm, res, lock, flags, type, 83 dlm_queue_ast(dlm, lock); 93 /* performs lock conversion at the lockres master site 96 * taken: takes and drops lock->spinlock 99 * call_ast: whether ast should be called for this lock 59 dlmconvert_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type) argument 102 __dlmconvert_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type, int *call_ast, int *kick_thread) argument 233 dlm_revert_pending_convert(struct dlm_lock_resource *res, struct dlm_lock *lock) argument 249 dlmconvert_remote(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type) argument 353 dlm_send_remote_convert_request(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags, int type) argument 435 struct dlm_lock *lock = NULL; local [all...] |
/linux-master/include/linux/ |
H A D | rtmutex.h | 38 * @lock: the mutex to be queried 42 static inline bool rt_mutex_base_is_locked(struct rt_mutex_base *lock) argument 44 return READ_ONCE(lock->owner) != NULL; 98 extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); 101 extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass); 102 extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock); 103 #define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0) 104 #define rt_mutex_lock_nest_lock(lock, nest_lock) \ 107 _rt_mutex_lock_nest_lock(lock, [all...] |
H A D | lockdep.h | 45 * Every lock has a list of other locks that were taken after it. 61 * bit 0 is reused to indicate if the lock has been accessed in BFS. 67 * struct lock_chain - lock dependency chain record 72 * @entry: the collided lock chains in lock_chain hash list 90 extern void lockdep_reset_lock(struct lockdep_map *lock); 128 extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name, 132 lockdep_init_map_waits(struct lockdep_map *lock, const char *name, argument 135 lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL); 139 lockdep_init_map_wait(struct lockdep_map *lock, const char *name, argument 142 lockdep_init_map_waits(lock, nam 145 lockdep_init_map(struct lockdep_map *lock, const char *name, struct lock_class_key *key, int subclass) argument 189 lockdep_match_key(struct lockdep_map *lock, struct lock_class_key *key) argument 229 lock_is_held(const struct lockdep_map *lock) argument 244 lock_set_subclass(struct lockdep_map *lock, unsigned int subclass, unsigned long ip) argument [all...] |
H A D | rwlock_rt.h | 51 #define read_lock_irqsave(lock, flags) \ 54 rt_read_lock(lock); \ 58 #define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock)) 93 #define write_lock_nested(lock, subclass) rt_write_lock(((void)(subclass), (lock))) 107 #define write_lock_irqsave(lock, flags) \ 110 rt_write_lock(lock); \ 114 #define write_trylock(lock) __cond_loc [all...] |
/linux-master/drivers/hwspinlock/ |
H A D | hwspinlock_internal.h | 21 * @trylock: make a single attempt to take the lock. returns 0 on 23 * @unlock: release the lock. always succeed. may _not_ sleep. 25 * core while spinning on a lock, between two successive 29 int (*trylock)(struct hwspinlock *lock); 30 void (*unlock)(struct hwspinlock *lock); 31 void (*relax)(struct hwspinlock *lock); 36 * @bank: the hwspinlock_device structure which owns this lock 37 * @lock: initialized and used by hwspinlock core 42 spinlock_t lock; member in struct:hwspinlock 50 * @base_id: id index of the first lock i 59 struct hwspinlock lock[]; member in struct:hwspinlock_device [all...] |
/linux-master/arch/sparc/include/asm/ |
H A D | spinlock_types.h | 10 volatile unsigned char lock; member in struct:__anon1175 20 volatile unsigned int lock; member in struct:__anon1176
|
/linux-master/arch/arm/include/asm/mach/ |
H A D | irq.h | 25 raw_spin_lock(&desc->lock); \ 27 raw_spin_unlock(&desc->lock); \
|
/linux-master/arch/mips/include/asm/ |
H A D | spinlock.h | 19 * @lock : Pointer to queued spinlock structure 21 static inline void queued_spin_unlock(struct qspinlock *lock) argument 25 smp_store_release(&lock->locked, 0);
|
/linux-master/tools/testing/selftests/bpf/progs/ |
H A D | tracing_failure.c | 11 int BPF_PROG(test_spin_lock, struct bpf_spin_lock *lock) argument 17 int BPF_PROG(test_spin_unlock, struct bpf_spin_lock *lock) argument
|
H A D | linked_list.c | 15 int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map) argument 24 bpf_spin_lock(lock); 26 bpf_spin_unlock(lock); 33 bpf_spin_lock(lock); 35 bpf_spin_unlock(lock); 43 bpf_spin_lock(lock); 46 bpf_spin_unlock(lock); 49 bpf_spin_lock(lock); 51 bpf_spin_unlock(lock); 60 bpf_spin_lock(lock); 96 list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map) argument 178 list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map) argument 248 test_list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head) argument 259 test_list_push_pop_multiple(struct bpf_spin_lock *lock, struct bpf_list_head *head) argument 270 test_list_in_list(struct bpf_spin_lock *lock, struct bpf_list_head *head) argument [all...] |
/linux-master/arch/powerpc/include/asm/ |
H A D | simple_spinlock_types.h | 16 volatile signed int lock; member in struct:__anon18
|
/linux-master/kernel/locking/ |
H A D | rtmutex.c | 27 #include <trace/events/lock.h> 36 struct rt_mutex *lock, 42 static inline void __ww_mutex_check_waiters(struct rt_mutex *lock, argument 47 static inline void ww_mutex_lock_acquired(struct ww_mutex *lock, argument 52 static inline int __ww_mutex_check_kill(struct rt_mutex *lock, argument 66 * lock->owner state tracking: 68 * lock->owner holds the task_struct pointer of the owner. Bit 0 69 * is used to keep track of the "lock has waiters" state. 72 * NULL 0 lock is free (fast acquire possible) 73 * NULL 1 lock i 35 __ww_mutex_add_waiter(struct rt_mutex_waiter *waiter, struct rt_mutex *lock, struct ww_acquire_ctx *ww_ctx) argument 93 rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner) argument 104 rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner) argument 113 rt_mutex_clear_owner(struct rt_mutex_base *lock) argument 119 clear_rt_mutex_waiters(struct rt_mutex_base *lock) argument 126 fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock) argument 214 rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock, struct task_struct *old, struct task_struct *new) argument 221 rt_mutex_try_acquire(struct rt_mutex_base *lock) argument 226 rt_mutex_cmpxchg_release(struct rt_mutex_base *lock, struct task_struct *old, struct task_struct *new) argument 238 mark_rt_mutex_waiters(struct rt_mutex_base *lock) argument 268 clear_rt_mutex_waiters(lock); variable 298 rt_mutex_cmpxchg_acquire(struct rt_mutex_base *lock, struct task_struct *old, struct task_struct *new) argument 308 rt_mutex_try_acquire(struct rt_mutex_base *lock) argument 320 rt_mutex_cmpxchg_release(struct rt_mutex_base *lock, struct task_struct *old, struct task_struct *new) argument 327 mark_rt_mutex_waiters(struct rt_mutex_base *lock) argument 477 rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) argument 485 rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) argument 524 rt_mutex_adjust_prio(struct rt_mutex_base *lock, struct task_struct *p) argument 685 struct rt_mutex_base *lock; local 1084 try_to_take_rt_mutex(struct rt_mutex_base *lock, struct task_struct *task, struct rt_mutex_waiter *waiter) argument 1200 task_blocks_on_rt_mutex(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter, struct task_struct *task, struct ww_acquire_ctx *ww_ctx, enum rtmutex_chainwalk chwalk) argument 1308 mark_wakeup_next_waiter(struct rt_wake_q_head *wqh, struct rt_mutex_base *lock) argument 1354 __rt_mutex_slowtrylock(struct rt_mutex_base *lock) argument 1370 rt_mutex_slowtrylock(struct rt_mutex_base *lock) argument 1396 __rt_mutex_trylock(struct rt_mutex_base *lock) argument 1407 rt_mutex_slowunlock(struct rt_mutex_base *lock) argument 1468 __rt_mutex_unlock(struct rt_mutex_base *lock) argument 1477 rtmutex_spin_on_owner(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter, struct task_struct *owner) argument 1514 rtmutex_spin_on_owner(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter, struct task_struct *owner) argument 1535 remove_waiter(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) argument 1599 rt_mutex_slowlock_block(struct rt_mutex_base *lock, struct ww_acquire_ctx *ww_ctx, unsigned int state, struct hrtimer_sleeper *timeout, struct rt_mutex_waiter *waiter) argument 1677 __rt_mutex_slowlock(struct rt_mutex_base *lock, struct ww_acquire_ctx *ww_ctx, unsigned int state, enum rtmutex_chainwalk chwalk, struct rt_mutex_waiter *waiter) argument 1730 __rt_mutex_slowlock_locked(struct rt_mutex_base *lock, struct ww_acquire_ctx *ww_ctx, unsigned int state) argument 1753 rt_mutex_slowlock(struct rt_mutex_base *lock, struct ww_acquire_ctx *ww_ctx, unsigned int state) argument 1785 __rt_mutex_lock(struct rt_mutex_base *lock, unsigned int state) argument 1806 rtlock_slowlock_locked(struct rt_mutex_base *lock) argument 1856 rtlock_slowlock(struct rt_mutex_base *lock) argument [all...] |
H A D | rtmutex_common.h | 45 * @lock: Pointer to the rt_mutex on which the waiter blocks 49 * @tree is ordered by @lock->wait_lock 50 * @pi_tree is ordered by rt_mutex_owner(@lock)->pi_lock 56 struct rt_mutex_base *lock; member in struct:rt_mutex_waiter 64 * @head: The regular wake_q_head for sleeping lock variants 65 * @rtlock_task: Task pointer for RT lock (spin/rwlock) wakeups 81 extern void rt_mutex_init_proxy_locked(struct rt_mutex_base *lock, 83 extern void rt_mutex_proxy_unlock(struct rt_mutex_base *lock); 84 extern int __rt_mutex_start_proxy_lock(struct rt_mutex_base *lock, 87 extern int rt_mutex_start_proxy_lock(struct rt_mutex_base *lock, 110 rt_mutex_has_waiters(struct rt_mutex_base *lock) argument 120 rt_mutex_waiter_is_top_waiter(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) argument 128 rt_mutex_top_waiter(struct rt_mutex_base *lock) argument 157 rt_mutex_owner(struct rt_mutex_base *lock) argument 179 __rt_mutex_base_init(struct rt_mutex_base *lock) argument 187 debug_rt_mutex_unlock(struct rt_mutex_base *lock) argument 193 debug_rt_mutex_proxy_unlock(struct rt_mutex_base *lock) argument 228 rt_mutex_owner(struct rt_mutex_base *lock) argument [all...] |
H A D | qspinlock_paravirt.h | 30 * not running. The one lock stealing attempt allowed at slowpath entry 57 * Hybrid PV queued/unfair lock 60 * it will be called once when a lock waiter enter the PV slowpath before 64 * pv_wait_head_or_lock() to signal that it is ready to spin on the lock. 65 * When that bit becomes visible to the incoming waiters, no lock stealing 67 * enter the MCS wait queue. So lock starvation shouldn't happen as long 69 * and hence disabling lock stealing. 71 * When the pending bit isn't set, the lock waiters will stay in the unfair 72 * mode spinning on the lock unless the MCS wait queue is empty. In this 73 * case, the lock waiter 81 pv_hybrid_queued_unfair_trylock(struct qspinlock *lock) argument 109 set_pending(struct qspinlock *lock) argument 119 trylock_clear_pending(struct qspinlock *lock) argument 126 set_pending(struct qspinlock *lock) argument 131 trylock_clear_pending(struct qspinlock *lock) argument 172 struct qspinlock *lock; member in struct:pv_hash_entry 212 pv_hash(struct qspinlock *lock, struct pv_node *node) argument 239 pv_unhash(struct qspinlock *lock) argument 360 pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node) argument 403 pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) argument 503 __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked) argument 547 __pv_queued_spin_unlock(struct qspinlock *lock) argument [all...] |
H A D | ww_rt_mutex.c | 12 int ww_mutex_trylock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) argument 14 struct rt_mutex *rtm = &lock->base; 28 ww_mutex_set_context_fastpath(lock, ww_ctx); 38 __ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx, argument 42 struct rt_mutex *rtm = &lock->base; 48 if (unlikely(ww_ctx == READ_ONCE(lock->ctx))) 67 ww_mutex_set_context_fastpath(lock, ww_ctx); 79 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) argument 81 return __ww_rt_mutex_lock(lock, ctx, TASK_UNINTERRUPTIBLE, _RET_IP_); 86 ww_mutex_lock_interruptible(struct ww_mutex *lock, struc argument 92 ww_mutex_unlock(struct ww_mutex *lock) argument [all...] |
H A D | qspinlock.c | 25 #include <trace/events/lock.h> 35 * MCS lock. A copy of the original MCS lock paper ("Algorithms for Scalable 41 * This queued spinlock implementation is based on the MCS lock, however to 45 * In particular; where the traditional MCS lock consists of a tail pointer 54 * number. With one byte for the lock value and 3 bytes for the tail, only a 55 * 32-bit word is now needed. Even though we only need 1 bit for the lock, 59 * We also change the first spinner to spin on the lock bit instead of its 60 * node; whereby avoiding the need to carry a node from lock to unlock, and 61 * preserving existing lock AP 149 clear_pending(struct qspinlock *lock) argument 162 clear_pending_set_locked(struct qspinlock *lock) argument 177 xchg_tail(struct qspinlock *lock, u32 tail) argument 195 clear_pending(struct qspinlock *lock) argument 206 clear_pending_set_locked(struct qspinlock *lock) argument 221 xchg_tail(struct qspinlock *lock, u32 tail) argument 250 queued_fetch_set_pending_acquire(struct qspinlock *lock) argument 262 set_locked(struct qspinlock *lock) argument 276 __pv_kick_node(struct qspinlock *lock, struct mcs_spinlock *node) argument 278 __pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) argument 316 queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) argument [all...] |
/linux-master/arch/mips/include/asm/octeon/ |
H A D | cvmx-spinlock.h | 61 * @lock: Lock to initialize 63 static inline void cvmx_spinlock_init(cvmx_spinlock_t *lock) argument 65 lock->value = CVMX_SPINLOCK_UNLOCKED_VAL; 71 * @lock: Lock to check 74 static inline int cvmx_spinlock_locked(cvmx_spinlock_t *lock) argument 76 return lock->value != CVMX_SPINLOCK_UNLOCKED_VAL; 80 * Releases lock 82 * @lock: pointer to lock structure 84 static inline void cvmx_spinlock_unlock(cvmx_spinlock_t *lock) argument 103 cvmx_spinlock_trylock(cvmx_spinlock_t *lock) argument 128 cvmx_spinlock_lock(cvmx_spinlock_t *lock) argument [all...] |
/linux-master/arch/arm64/kvm/hyp/include/nvhe/ |
H A D | spinlock.h | 44 static inline void hyp_spin_lock(hyp_spinlock_t *lock) argument 63 /* Did we get the lock? */ 75 /* We got the lock. Critical section starts here. */ 77 : "=&r" (lockval), "=&r" (newval), "=&r" (tmp), "+Q" (*lock) 78 : "Q" (lock->owner) 82 static inline void hyp_spin_unlock(hyp_spinlock_t *lock) argument 96 : "=Q" (lock->owner), "=&r" (tmp) 101 static inline bool hyp_spin_is_locked(hyp_spinlock_t *lock) argument 103 hyp_spinlock_t lockval = READ_ONCE(*lock); 109 static inline void hyp_assert_lock_held(hyp_spinlock_t *lock) argument 122 hyp_assert_lock_held(hyp_spinlock_t *lock) argument [all...] |
/linux-master/drivers/md/ |
H A D | dm-io-tracker.h | 14 spinlock_t lock; member in struct:dm_io_tracker 31 spin_lock_init(&iot->lock); 41 spin_lock_irq(&iot->lock); 44 spin_unlock_irq(&iot->lock); 53 spin_lock_irq(&iot->lock); 56 spin_unlock_irq(&iot->lock); 63 spin_lock_irq(&iot->lock); 65 spin_unlock_irq(&iot->lock); 75 spin_lock_irqsave(&iot->lock, flags); 79 spin_unlock_irqrestore(&iot->lock, flag [all...] |
/linux-master/drivers/md/persistent-data/ |
H A D | dm-block-manager.c | 32 * trace is also emitted for the previous lock acquisition. 45 spinlock_t lock; member in struct:block_lock 61 static unsigned int __find_holder(struct block_lock *lock, argument 67 if (lock->holders[i] == task) 74 /* call this *after* you increment lock->count */ 75 static void __add_holder(struct block_lock *lock, struct task_struct *task) argument 77 unsigned int h = __find_holder(lock, NULL); 83 lock->holders[h] = task; 86 t = lock->traces + h; 91 /* call this *before* you decrement lock 92 __del_holder(struct block_lock *lock, struct task_struct *task) argument 100 __check_holder(struct block_lock *lock) argument 150 __wake_many(struct block_lock *lock) argument 175 bl_init(struct block_lock *lock) argument 186 __available_for_read(struct block_lock *lock) argument 193 bl_down_read(struct block_lock *lock) argument 224 bl_down_read_nonblock(struct block_lock *lock) argument 245 bl_up_read(struct block_lock *lock) argument 256 bl_down_write(struct block_lock *lock) argument 292 bl_up_write(struct block_lock *lock) argument 352 struct block_lock lock; member in struct:buffer_aux [all...] |
/linux-master/drivers/clk/mmp/ |
H A D | clk-apbc.c | 29 spinlock_t *lock; member in struct:clk_apbc 42 if (apbc->lock) 43 spin_lock_irqsave(apbc->lock, flags); 51 if (apbc->lock) 52 spin_unlock_irqrestore(apbc->lock, flags); 56 if (apbc->lock) 57 spin_lock_irqsave(apbc->lock, flags); 63 if (apbc->lock) 64 spin_unlock_irqrestore(apbc->lock, flags); 69 if (apbc->lock) 119 mmp_clk_register_apbc(const char *name, const char *parent_name, void __iomem *base, unsigned int delay, unsigned int apbc_flags, spinlock_t *lock) argument [all...] |
H A D | clk-apmu.c | 23 spinlock_t *lock; member in struct:clk_apmu 32 if (apmu->lock) 33 spin_lock_irqsave(apmu->lock, flags); 38 if (apmu->lock) 39 spin_unlock_irqrestore(apmu->lock, flags); 50 if (apmu->lock) 51 spin_lock_irqsave(apmu->lock, flags); 56 if (apmu->lock) 57 spin_unlock_irqrestore(apmu->lock, flags); 66 void __iomem *base, u32 enable_mask, spinlock_t *lock) 65 mmp_clk_register_apmu(const char *name, const char *parent_name, void __iomem *base, u32 enable_mask, spinlock_t *lock) argument [all...] |
/linux-master/tools/perf/util/ |
H A D | rwsem.c | 15 return pthread_rwlock_init(&sem->lock, NULL); 25 return pthread_rwlock_destroy(&sem->lock); 35 return perf_singlethreaded ? 0 : pthread_rwlock_rdlock(&sem->lock); 45 return perf_singlethreaded ? 0 : pthread_rwlock_unlock(&sem->lock); 55 return perf_singlethreaded ? 0 : pthread_rwlock_wrlock(&sem->lock); 65 return perf_singlethreaded ? 0 : pthread_rwlock_unlock(&sem->lock);
|
/linux-master/arch/x86/kernel/ |
H A D | paravirt-spinlocks.c | 12 __visible void __native_queued_spin_unlock(struct qspinlock *lock) argument 14 native_queued_spin_unlock(lock); 20 return pv_ops.lock.queued_spin_unlock.func == 32 return pv_ops.lock.vcpu_is_preempted.func ==
|