Searched refs:lock (Results 76 - 100 of 6514) sorted by relevance

1234567891011>>

/linux-master/arch/x86/kernel/
H A Dparavirt-spinlocks.c12 __visible void __native_queued_spin_unlock(struct qspinlock *lock) argument
14 native_queued_spin_unlock(lock);
20 return pv_ops.lock.queued_spin_unlock.func ==
32 return pv_ops.lock.vcpu_is_preempted.func ==
/linux-master/include/linux/
H A Drwlock_rt.h51 #define read_lock_irqsave(lock, flags) \
54 rt_read_lock(lock); \
58 #define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock))
93 #define write_lock_nested(lock, subclass) rt_write_lock(((void)(subclass), (lock)))
107 #define write_lock_irqsave(lock, flags) \
110 rt_write_lock(lock); \
114 #define write_trylock(lock) __cond_loc
[all...]
H A Dseqlock.h45 * Make sure we are not reinitializing a held lock:
92 * A sequence counter which associates the lock used for writer
107 * @lock: Pointer to the associated lock
110 * LOCKNAME @lock. The lock is associated to the sequence counter in the
120 * @lock: Pointer to the associated lock
127 __SEQ_LOCK(____s->lock = (_lock)); \
130 #define seqcount_raw_spinlock_init(s, lock) seqcount_LOCKNAME_ini
1073 read_seqbegin_or_lock(seqlock_t *lock, int *seq) argument
1088 need_seqretry(seqlock_t *lock, int seq) argument
1101 done_seqretry(seqlock_t *lock, int seq) argument
1128 read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq) argument
1153 done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags) argument
[all...]
H A Dfs_struct.h11 spinlock_t lock; member in struct:fs_struct
29 spin_lock(&fs->lock);
32 spin_unlock(&fs->lock);
37 spin_lock(&fs->lock);
40 spin_unlock(&fs->lock);
H A Dspinlock_types.h51 struct rt_mutex_base lock; member in struct:spinlock
59 .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \
65 .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \
/linux-master/tools/testing/radix-tree/linux/
H A Dlockdep.h10 static inline void lockdep_set_class(spinlock_t *lock, argument
/linux-master/include/net/netns/
H A Dxdp.h9 struct mutex lock; member in struct:netns_xdp
/linux-master/fs/btrfs/
H A Dlocking.h32 * When we COW a block we are holding the lock on the original block,
34 * when we lock the newly allocated COW'd block. Handle this by having
40 * Oftentimes we need to lock adjacent nodes on the same level while
41 * still holding the lock on the original node we searched to, such as
52 * When splitting we will be holding a lock on the left/right node when
98 * @lock: The lockdep map corresponding to a wait event
101 * the lockdep map as writer (exclusive lock) because it has to block until all
102 * the threads that hold the lock as readers signal the condition for the wait
105 #define btrfs_might_wait_for_event(owner, lock) \
107 rwsem_acquire(&owner->lock##_ma
[all...]
H A Dlocking.c18 * Lockdep class keys for extent_buffer->lock's in this root. For a given
94 lockdep_set_class_and_name(&eb->lock, &ks->keys[level], ks->names[level]);
125 * - try-lock semantics for readers and writers
132 * __btrfs_tree_read_lock - lock extent buffer for read
136 * This takes the read lock on the extent buffer, using the specified nesting
146 down_read_nested(&eb->lock, nest);
156 * Try-lock for read.
162 if (down_read_trylock(&eb->lock)) {
170 * Try-lock for write.
176 if (down_write_trylock(&eb->lock)) {
340 btrfs_drew_lock_init(struct btrfs_drew_lock *lock) argument
349 btrfs_drew_try_write_lock(struct btrfs_drew_lock *lock) argument
366 btrfs_drew_write_lock(struct btrfs_drew_lock *lock) argument
375 btrfs_drew_write_unlock(struct btrfs_drew_lock *lock) argument
381 btrfs_drew_read_lock(struct btrfs_drew_lock *lock) argument
396 btrfs_drew_read_unlock(struct btrfs_drew_lock *lock) argument
[all...]
/linux-master/fs/ocfs2/dlm/
H A Ddlmlock.c5 * underlying calls for lock creation
45 struct dlm_lock *lock, int flags);
49 static void dlm_lock_detach_lockres(struct dlm_lock *lock);
66 /* Tell us whether we can grant a new lock request.
71 * returns: 1 if the lock can be granted, 0 otherwise.
74 struct dlm_lock *lock)
79 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
84 if (!dlm_lock_compatible(tmplock->ml.type, lock->ml.type))
87 lock->ml.type))
94 /* performs lock creatio
73 dlm_can_grant_new_lock(struct dlm_lock_resource *res, struct dlm_lock *lock) argument
101 dlmlock_master(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags) argument
181 dlm_revert_pending_lock(struct dlm_lock_resource *res, struct dlm_lock *lock) argument
197 dlmlock_remote(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags) argument
283 dlm_send_remote_lock_request(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_lock *lock, int flags) argument
324 dlm_lock_get(struct dlm_lock *lock) argument
329 dlm_lock_put(struct dlm_lock *lock) argument
336 struct dlm_lock *lock; local
356 dlm_lock_attach_lockres(struct dlm_lock *lock, struct dlm_lock_resource *res) argument
364 dlm_lock_detach_lockres(struct dlm_lock *lock) argument
408 struct dlm_lock *lock; local
547 struct dlm_lock *lock = NULL; local
[all...]
H A Ddlmconvert.h13 struct dlm_lock *lock, int flags, int type);
16 struct dlm_lock *lock, int flags, int type);
/linux-master/kernel/locking/
H A Dspinlock_rt.c10 * rtmutex, and restored when the lock has been acquired. Regular wakeups
19 * rcu_read_lock() across the lock held section.
46 static __always_inline void __rt_spin_lock(spinlock_t *lock) argument
49 rtlock_lock(&lock->lock);
54 void __sched rt_spin_lock(spinlock_t *lock) argument
56 spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
57 __rt_spin_lock(lock);
62 void __sched rt_spin_lock_nested(spinlock_t *lock, int subclass) argument
64 spin_acquire(&lock
69 rt_spin_lock_nest_lock(spinlock_t *lock, struct lockdep_map *nest_lock) argument
78 rt_spin_unlock(spinlock_t *lock) argument
94 rt_spin_lock_unlock(spinlock_t *lock) argument
101 __rt_spin_trylock(spinlock_t *lock) argument
116 rt_spin_trylock(spinlock_t *lock) argument
122 rt_spin_trylock_bh(spinlock_t *lock) argument
135 __rt_spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key, bool percpu) argument
[all...]
/linux-master/drivers/gpu/drm/xe/
H A Dxe_bo_evict.c54 spin_lock(&xe->pinned.lock);
62 spin_unlock(&xe->pinned.lock);
69 spin_lock(&xe->pinned.lock);
72 spin_unlock(&xe->pinned.lock);
76 spin_lock(&xe->pinned.lock);
79 spin_unlock(&xe->pinned.lock);
88 spin_lock(&xe->pinned.lock);
96 spin_unlock(&xe->pinned.lock);
105 spin_lock(&xe->pinned.lock);
107 spin_unlock(&xe->pinned.lock);
[all...]
/linux-master/drivers/clk/mmp/
H A Dclk-gate.c31 if (gate->lock)
32 spin_lock_irqsave(gate->lock, flags);
39 if (gate->lock)
40 spin_unlock_irqrestore(gate->lock, flags);
57 if (gate->lock)
58 spin_lock_irqsave(gate->lock, flags);
65 if (gate->lock)
66 spin_unlock_irqrestore(gate->lock, flags);
75 if (gate->lock)
76 spin_lock_irqsave(gate->lock, flag
92 mmp_clk_register_gate(struct device *dev, const char *name, const char *parent_name, unsigned long flags, void __iomem *reg, u32 mask, u32 val_enable, u32 val_disable, unsigned int gate_flags, spinlock_t *lock) argument
[all...]
/linux-master/drivers/md/dm-vdo/
H A Ddedupe.c14 * deduplicate against a single block instead of being serialized through a PBN read lock. Only one
22 * A hash_lock acts like a state machine perhaps more than as a lock. Other than the starting and
25 * containing the lock. An asynchronous operation is almost always performed upon entering a state,
28 * In all states except DEDUPING, there is a single data_vio, called the lock agent, performing the
29 * asynchronous operations on behalf of the lock. The agent will change during the lifetime of the
30 * lock if the lock is shared by more than one data_vio. data_vios waiting to deduplicate are kept
31 * on a wait queue. Viewed a different way, the agent holds the lock exclusively until the lock
32 * enters the DEDUPING state, at which point it becomes a shared lock tha
288 spinlock_t lock; member in struct:hash_zones
347 return_hash_lock_to_pool(struct hash_zone *zone, struct hash_lock *lock) argument
377 hash_lock_key(struct hash_lock *lock) argument
429 dequeue_lock_waiter(struct hash_lock *lock) argument
528 retire_lock_agent(struct hash_lock *lock) argument
545 wait_on_hash_lock(struct hash_lock *lock, struct data_vio *data_vio) argument
586 start_bypassing(struct hash_lock *lock, struct data_vio *agent) argument
594 struct hash_lock *lock = data_vio->hash_lock; local
637 struct hash_lock *lock = agent->hash_lock; local
693 struct hash_lock *lock = agent->hash_lock; local
715 start_unlocking(struct hash_lock *lock, struct data_vio *agent) argument
749 struct hash_lock *lock = agent->hash_lock; local
795 start_updating(struct hash_lock *lock, struct data_vio *agent) argument
818 finish_deduping(struct hash_lock *lock, struct data_vio *data_vio) argument
872 struct hash_lock *lock, *new_lock; local
983 launch_dedupe(struct hash_lock *lock, struct data_vio *data_vio, bool has_claim) argument
1008 start_deduping(struct hash_lock *lock, struct data_vio *agent, bool agent_is_done) argument
1080 struct hash_lock *lock = agent->hash_lock; local
1195 start_verifying(struct hash_lock *lock, struct data_vio *agent) argument
1229 struct hash_lock *lock = agent->hash_lock; local
1280 acquire_provisional_reference(struct data_vio *agent, struct pbn_lock *lock, struct slab_depot *depot) argument
1312 struct pbn_lock *lock; local
1416 start_locking(struct hash_lock *lock, struct data_vio *agent) argument
1446 finish_writing(struct hash_lock *lock, struct data_vio *agent) argument
1510 select_writing_agent(struct hash_lock *lock) argument
1559 start_writing(struct hash_lock *lock, struct data_vio *agent) argument
1669 struct hash_lock *lock = agent->hash_lock; local
1706 start_querying(struct hash_lock *lock, struct data_vio *data_vio) argument
1723 report_bogus_lock_state(struct hash_lock *lock, struct data_vio *data_vio) argument
1744 struct hash_lock *lock = data_vio->hash_lock; local
1789 is_hash_collision(struct hash_lock *lock, struct data_vio *candidate) argument
1841 struct hash_lock *lock; local
1915 struct hash_lock *lock = data_vio->hash_lock; local
2739 dump_hash_lock(const struct hash_lock *lock) argument
[all...]
H A Dphysical-zone.c26 /* Each user data_vio needs a PBN read lock and write lock. */
54 static inline bool has_lock_type(const struct pbn_lock *lock, enum pbn_lock_type type) argument
56 return (lock->implementation == &LOCK_IMPLEMENTATIONS[type]);
60 * vdo_is_pbn_read_lock() - Check whether a pbn_lock is a read lock.
61 * @lock: The lock to check.
63 * Return: true if the lock is a read lock.
65 bool vdo_is_pbn_read_lock(const struct pbn_lock *lock) argument
70 set_pbn_lock_type(struct pbn_lock *lock, enum pbn_lock_type type) argument
81 vdo_downgrade_pbn_write_lock(struct pbn_lock *lock, bool compressed_write) argument
110 vdo_claim_pbn_lock_increment(struct pbn_lock *lock) argument
129 vdo_assign_pbn_lock_provisional_reference(struct pbn_lock *lock) argument
141 vdo_unassign_pbn_lock_provisional_reference(struct pbn_lock *lock) argument
155 release_pbn_lock_provisional_reference(struct pbn_lock *lock, physical_block_number_t locked_pbn, struct block_allocator *allocator) argument
186 struct pbn_lock lock; member in union:__anon758
213 return_pbn_lock_to_pool(struct pbn_lock_pool *pool, struct pbn_lock *lock) argument
445 struct pbn_lock *lock, *new_lock = NULL; local
486 struct pbn_lock *lock; local
611 vdo_release_physical_zone_pbn_lock(struct physical_zone *zone, physical_block_number_t locked_pbn, struct pbn_lock *lock) argument
[all...]
/linux-master/drivers/media/dvb-frontends/cxd2880/
H A Dcxd2880_tnrdmd_dvbt.h37 *lock);
43 *lock);
/linux-master/tools/testing/selftests/bpf/progs/
H A Dtest_helper_restricted.c10 struct lock { struct
25 __type(value, struct lock);
50 struct lock *lock; local
52 lock = bpf_map_lookup_elem(&locks, &key);
53 if (lock) {
54 bpf_spin_lock(&lock->l);
55 bpf_spin_unlock(&lock->l);
/linux-master/arch/x86/include/asm/
H A Dqspinlock_paravirt.h7 void __lockfunc __pv_queued_spin_unlock_slowpath(struct qspinlock *lock, u8 locked);
26 * void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock)
28 * u8 lockval = cmpxchg(&lock->locked, _Q_LOCKED_VAL, 0);
32 * pv_queued_spin_unlock_slowpath(lock, lockval);
36 * rdi = lock (first argument)
64 extern void __lockfunc __pv_queued_spin_unlock(struct qspinlock *lock);
/linux-master/kernel/sched/
H A Dswait.c9 raw_spin_lock_init(&q->lock);
10 lockdep_set_class_and_name(&q->lock, key, name);
51 raw_spin_lock_irqsave(&q->lock, flags);
53 raw_spin_unlock_irqrestore(&q->lock, flags);
66 raw_spin_lock_irq(&q->lock);
77 raw_spin_unlock_irq(&q->lock);
78 raw_spin_lock_irq(&q->lock);
80 raw_spin_unlock_irq(&q->lock);
95 raw_spin_lock_irqsave(&q->lock, flags);
98 raw_spin_unlock_irqrestore(&q->lock, flag
[all...]
/linux-master/arch/powerpc/lib/
H A Dlocks.c3 * Spin and read/write lock operations.
21 void splpar_spin_yield(arch_spinlock_t *lock) argument
25 lock_value = lock->slock;
35 if (lock->slock != lock_value)
42 * Waiting for a read lock or a write lock on a rwlock...
51 lock_value = rw->lock;
53 return; /* no write lock at present */
61 if (rw->lock != lock_value)
/linux-master/lib/
H A Datomic64.c25 * Ensure each lock is in a separate cacheline.
28 raw_spinlock_t lock; member in union:__anon380
32 .lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
42 return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
48 raw_spinlock_t *lock = lock_addr(v); local
51 raw_spin_lock_irqsave(lock, flags);
53 raw_spin_unlock_irqrestore(lock, flags);
61 raw_spinlock_t *lock = lock_addr(v); local
63 raw_spin_lock_irqsave(lock, flag
134 raw_spinlock_t *lock = lock_addr(v); local
149 raw_spinlock_t *lock = lock_addr(v); local
164 raw_spinlock_t *lock = lock_addr(v); local
178 raw_spinlock_t *lock = lock_addr(v); local
[all...]
/linux-master/include/drm/
H A Ddrm_modeset_lock.h43 * ctx. And if any lock fxn returns -EDEADLK, it must backoff and
51 * Contended lock: if a lock is contended you should only call
53 * contended lock.
58 * Stack depot for debugging when a contended lock was not backed off
87 * modeset lock
106 void drm_modeset_lock_init(struct drm_modeset_lock *lock);
109 * drm_modeset_lock_fini - cleanup lock
110 * @lock: lock t
112 drm_modeset_lock_fini(struct drm_modeset_lock *lock) argument
121 drm_modeset_is_locked(struct drm_modeset_lock *lock) argument
130 drm_modeset_lock_assert_held(struct drm_modeset_lock *lock) argument
[all...]
/linux-master/include/acpi/platform/
H A Daclinuxex.h78 * argument and uses that as a name for the lock in debugging.
79 * By executing spin_lock_init() in a macro the key changes from "lock" for
85 spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
86 if (lock) { \
87 *(__handle) = lock; \
90 lock ? AE_OK : AE_NO_MEMORY; \
96 raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \
97 if (lock) { \
[all...]
/linux-master/fs/lockd/
H A Dxdr4.c5 * XDR support for lockd and the lock client.
74 svcxdr_decode_lock(struct xdr_stream *xdr, struct nlm_lock *lock) argument
76 struct file_lock *fl = &lock->fl;
78 if (!svcxdr_decode_string(xdr, &lock->caller, &lock->len))
80 if (!svcxdr_decode_fhandle(xdr, &lock->fh))
82 if (!svcxdr_decode_owner(xdr, &lock->oh))
84 if (xdr_stream_decode_u32(xdr, &lock->svid) < 0)
86 if (xdr_stream_decode_u64(xdr, &lock->lock_start) < 0)
88 if (xdr_stream_decode_u64(xdr, &lock
99 svcxdr_encode_holder(struct xdr_stream *xdr, const struct nlm_lock *lock) argument
269 struct nlm_lock *lock = &argp->lock; local
296 struct nlm_lock *lock = &argp->lock; local
[all...]

Completed in 626 milliseconds

1234567891011>>