Searched refs:lock (Results 26 - 50 of 2780) sorted by relevance

1234567891011>>

/freebsd-11-stable/sys/contrib/ck/include/spinlock/
H A Ddec.h39 * This is similar to the CACAS lock but makes use of an atomic decrement
40 * operation to check if the lock value was decremented to 0 from 1. The
51 ck_spinlock_dec_init(struct ck_spinlock_dec *lock) argument
54 lock->value = 1;
60 ck_spinlock_dec_trylock(struct ck_spinlock_dec *lock) argument
64 value = ck_pr_fas_uint(&lock->value, 0);
70 ck_spinlock_dec_locked(struct ck_spinlock_dec *lock) argument
74 r = ck_pr_load_uint(&lock->value) != 1;
80 ck_spinlock_dec_lock(struct ck_spinlock_dec *lock) argument
86 * Only one thread is guaranteed to decrement lock t
104 ck_spinlock_dec_lock_eb(struct ck_spinlock_dec *lock) argument
123 ck_spinlock_dec_unlock(struct ck_spinlock_dec *lock) argument
[all...]
H A Dfas.h47 ck_spinlock_fas_init(struct ck_spinlock_fas *lock) argument
50 lock->value = false;
56 ck_spinlock_fas_trylock(struct ck_spinlock_fas *lock) argument
60 value = ck_pr_fas_uint(&lock->value, true);
67 ck_spinlock_fas_locked(struct ck_spinlock_fas *lock) argument
71 r = ck_pr_load_uint(&lock->value);
77 ck_spinlock_fas_lock(struct ck_spinlock_fas *lock) argument
80 while (ck_pr_fas_uint(&lock->value, true) == true) {
81 while (ck_pr_load_uint(&lock->value) == true)
90 ck_spinlock_fas_lock_eb(struct ck_spinlock_fas *lock) argument
102 ck_spinlock_fas_unlock(struct ck_spinlock_fas *lock) argument
[all...]
H A Dcas.h49 ck_spinlock_cas_init(struct ck_spinlock_cas *lock) argument
52 lock->value = false;
58 ck_spinlock_cas_trylock(struct ck_spinlock_cas *lock) argument
62 value = ck_pr_fas_uint(&lock->value, true);
68 ck_spinlock_cas_locked(struct ck_spinlock_cas *lock) argument
70 bool r = ck_pr_load_uint(&lock->value);
77 ck_spinlock_cas_lock(struct ck_spinlock_cas *lock) argument
80 while (ck_pr_cas_uint(&lock->value, false, true) == false) {
81 while (ck_pr_load_uint(&lock->value) == true)
90 ck_spinlock_cas_lock_eb(struct ck_spinlock_cas *lock) argument
102 ck_spinlock_cas_unlock(struct ck_spinlock_cas *lock) argument
[all...]
/freebsd-11-stable/contrib/netbsd-tests/usr.bin/xlint/lint1/
H A Dd_c99_nested_struct.c21 pthread_mutex_t lock; member in struct:arc4random_global
24 .lock = { 0x33330003, 0, { 0, 0, 0 }, 0, { 0, 0, 0 }, ((void *)0), ((void *)0), 0, ((void *)0) },
/freebsd-11-stable/contrib/gcclibs/libgomp/config/posix95/
H A Domp-lock.h14 pthread_mutex_t lock; member in struct:__anon1453
/freebsd-11-stable/sys/compat/linuxkpi/common/include/linux/
H A Dww_mutex.h74 ww_mutex_trylock(struct ww_mutex *lock) argument
76 return (mutex_trylock(&lock->base));
83 ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) argument
87 else if ((struct thread *)SX_OWNER(lock->base.sx.sx_lock) == curthread)
90 return (linux_ww_mutex_lock_sub(lock, ctx, 0));
94 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) argument
98 else if ((struct thread *)SX_OWNER(lock->base.sx.sx_lock) == curthread)
101 return (linux_ww_mutex_lock_sub(lock, ctx, 1));
107 ww_mutex_unlock(struct ww_mutex *lock) argument
112 linux_ww_mutex_unlock_sub(lock);
116 ww_mutex_destroy(struct ww_mutex *lock) argument
128 ww_mutex_init(struct ww_mutex *lock, struct ww_class *ww_class) argument
[all...]
/freebsd-11-stable/contrib/apr-util/include/
H A Dapr_anylock.h19 * @brief APR-Util transparent any lock flavor wrapper
28 /** Structure that may contain any APR lock type */
30 /** Indicates what type of lock is in lock */
35 apr_anylock_readlock, /**< Read lock */
36 apr_anylock_writelock /**< Write lock */
43 apr_thread_rwlock_t *rw; /**< Read-write lock */
45 } lock; member in struct:apr_anylock_t
55 ? apr_thread_mutex_lock((lck)->lock.tm) \
57 ? apr_proc_mutex_lock((lck)->lock
[all...]
/freebsd-11-stable/sys/kern/
H A Dkern_condvar.c35 #include <sys/lock.h>
64 #define CV_ASSERT(cvp, lock, td) do { \
68 KASSERT((lock) != NULL, ("%s: lock NULL", __func__)); \
107 _cv_wait(struct cv *cvp, struct lock_object *lock) argument
120 CV_ASSERT(cvp, lock, td);
121 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock,
123 class = LOCK_CLASS(lock);
131 if (lock == &Giant.lock_object)
135 sleepq_add(cvp, lock, cv
162 _cv_wait_unlock(struct cv *cvp, struct lock_object *lock) argument
211 _cv_wait_sig(struct cv *cvp, struct lock_object *lock) argument
271 _cv_timedwait_sbt(struct cv *cvp, struct lock_object *lock, sbintime_t sbt, sbintime_t pr, int flags) argument
333 _cv_timedwait_sig_sbt(struct cv *cvp, struct lock_object *lock, sbintime_t sbt, sbintime_t pr, int flags) argument
[all...]
H A Dkern_rangelock.c32 #include <sys/lock.h>
72 rangelock_init(struct rangelock *lock) argument
75 TAILQ_INIT(&lock->rl_waiters);
76 lock->rl_currdep = NULL;
80 rangelock_destroy(struct rangelock *lock) argument
83 KASSERT(TAILQ_EMPTY(&lock->rl_waiters), ("Dangling waiters"));
101 * Recalculate the lock->rl_currdep after an unlock.
104 rangelock_calc_block(struct rangelock *lock) argument
108 for (entry = lock->rl_currdep; entry != NULL; entry = nextentry) {
112 for (entry1 = TAILQ_FIRST(&lock
141 rangelock_unlock_locked(struct rangelock *lock, struct rl_q_entry *entry, struct mtx *ilk) argument
159 rangelock_unlock(struct rangelock *lock, void *cookie, struct mtx *ilk) argument
172 rangelock_unlock_range(struct rangelock *lock, void *cookie, off_t start, off_t end, struct mtx *ilk) argument
200 rangelock_enqueue(struct rangelock *lock, off_t start, off_t end, int mode, struct mtx *ilk) argument
237 rangelock_rlock(struct rangelock *lock, off_t start, off_t end, struct mtx *ilk) argument
244 rangelock_wlock(struct rangelock *lock, off_t start, off_t end, struct mtx *ilk) argument
[all...]
/freebsd-11-stable/contrib/ofed/librdmacm/
H A Dcma.h57 static inline void fastlock_init(fastlock_t *lock) argument
59 sem_init(&lock->sem, 0, 0);
60 atomic_store(&lock->cnt, 0);
62 static inline void fastlock_destroy(fastlock_t *lock) argument
64 sem_destroy(&lock->sem);
66 static inline void fastlock_acquire(fastlock_t *lock) argument
68 if (atomic_fetch_add(&lock->cnt, 1) > 0)
69 sem_wait(&lock->sem);
71 static inline void fastlock_release(fastlock_t *lock) argument
73 if (atomic_fetch_sub(&lock
[all...]
/freebsd-11-stable/contrib/openbsm/bin/auditdistd/
H A Dsynch.h49 mtx_init(pthread_mutex_t *lock) argument
53 error = pthread_mutex_init(lock, NULL);
57 mtx_destroy(pthread_mutex_t *lock) argument
61 error = pthread_mutex_destroy(lock);
65 mtx_lock(pthread_mutex_t *lock) argument
69 error = pthread_mutex_lock(lock);
73 mtx_trylock(pthread_mutex_t *lock) argument
77 error = pthread_mutex_trylock(lock);
82 mtx_unlock(pthread_mutex_t *lock) argument
86 error = pthread_mutex_unlock(lock);
90 mtx_owned(pthread_mutex_t *lock) argument
97 rw_init(pthread_rwlock_t *lock) argument
105 rw_destroy(pthread_rwlock_t *lock) argument
113 rw_rlock(pthread_rwlock_t *lock) argument
121 rw_wlock(pthread_rwlock_t *lock) argument
129 rw_unlock(pthread_rwlock_t *lock) argument
155 cv_wait(pthread_cond_t *cv, pthread_mutex_t *lock) argument
163 cv_timedwait(pthread_cond_t *cv, pthread_mutex_t *lock, int timeout) argument
[all...]
/freebsd-11-stable/contrib/llvm-project/compiler-rt/lib/sanitizer_common/
H A Dsanitizer_atomic_clang_mips.h21 // internal spin lock mechanism to emulate atomic oprations when the size is
23 static void __spin_lock(volatile int *lock) { argument
24 while (__sync_lock_test_and_set(lock, 1))
25 while (*lock) {
29 static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); } argument
31 // Make sure the lock is on its own cache line to prevent false sharing.
35 int lock; member in struct:__sanitizer::__anon1169
37 } __attribute__((aligned(32))) lock = {0, {0}}; member in namespace:__sanitizer
49 __spin_lock(&lock
[all...]
/freebsd-11-stable/sys/sys/
H A Dlock.h29 * $FreeBSD: stable/11/sys/sys/lock.h 327478 2018-01-02 00:02:36Z mjg $
43 * Lock classes. Each lock has a class which describes characteristics
47 * an error to perform any type of context switch while holding a spin lock.
48 * Also, for an individual lock to be recursable, its class must allow
49 * recursion and the lock itself must explicitly allow recursion.
52 * data for the 'show lock' DDB command. The 'lc_lock' and
54 * to lock and unlock locks while blocking on a sleep queue. The
62 void (*lc_assert)(const struct lock_object *lock, int what);
63 void (*lc_ddb_show)(const struct lock_object *lock);
64 void (*lc_lock)(struct lock_object *lock, uintptr_
[all...]
/freebsd-11-stable/lib/libthr/
H A Dplockstat.d41 probe rw__acquire(void *lock, int wr);
42 probe rw__release(void *lock, int wr);
43 probe rw__block(void *lock, int wr);
44 probe rw__blocked(void *lock, int wr, int success);
45 probe rw__error(void *lock, int wr, int err);
/freebsd-11-stable/sys/contrib/ck/include/
H A Dck_tflock.h82 ck_tflock_ticket_write_lock(struct ck_tflock_ticket *lock) argument
86 previous = ck_tflock_ticket_fca_32(&lock->request, CK_TFLOCK_TICKET_WC_TOPMSK,
89 while (ck_pr_load_32(&lock->completion) != previous)
97 ck_tflock_ticket_write_unlock(struct ck_tflock_ticket *lock) argument
101 ck_tflock_ticket_fca_32(&lock->completion, CK_TFLOCK_TICKET_WC_TOPMSK,
107 ck_tflock_ticket_read_lock(struct ck_tflock_ticket *lock) argument
111 previous = ck_tflock_ticket_fca_32(&lock->request,
117 while ((ck_pr_load_32(&lock->completion) &
127 ck_tflock_ticket_read_unlock(struct ck_tflock_ticket *lock) argument
131 ck_tflock_ticket_fca_32(&lock
[all...]
H A Dck_elide.h145 ck_elide_##N##_lock_adaptive(T *lock, \
161 if (L_P(lock) == true) \
173 if (L_P(lock) == false) \
187 L(lock); \
191 ck_elide_##N##_unlock_adaptive(struct ck_elide_stat *st, T *lock) \
194 if (U_P(lock) == false) { \
199 U(lock); \
205 ck_elide_##N##_lock(T *lock) \
209 L(lock); \
213 if (L_P(lock)
[all...]
/freebsd-11-stable/sys/compat/linuxkpi/common/src/
H A Dlinux_lock.c37 struct ww_mutex *lock; member in struct:ww_mutex_thread
72 /* lock a mutex with deadlock avoidance */
74 linux_ww_mutex_lock_sub(struct ww_mutex *lock, argument
85 if (unlikely(sx_try_xlock(&lock->base.sx) == 0)) {
87 entry.lock = lock;
92 SX_OWNER(lock->base.sx.sx_lock);
101 * lock and is at the same time trying
102 * to acquire a lock this thread owns,
107 other->lock
138 linux_ww_mutex_unlock_sub(struct ww_mutex *lock) argument
[all...]
/freebsd-11-stable/contrib/ntp/sntp/libevent/
H A Devthread-internal.h50 /* Global function pointers to lock-related functions. NULL if locking isn't
62 * running a given event_base's loop. Requires lock. */
69 * thread. Requires lock. */
75 /** Allocate a new lock, and store it in lockvar, a void*. Sets lockvar to
81 /** Free a given lock, if it is present and locking is enabled. */
89 /** Acquire a lock. */
93 evthread_lock_fns_.lock(mode, lockvar); \
96 /** Release a lock */
113 /** Lock an event_base, if it is set up for locking. Acquires the lock
124 /** If lock debuggin
137 EVLOCK_TRY_LOCK_(void *lock) argument
251 EVLOCK_TRY_LOCK_(void *lock) argument
[all...]
H A Devthread_pthread.c48 pthread_mutex_t *lock = mm_malloc(sizeof(pthread_mutex_t)); local
49 if (!lock)
53 if (pthread_mutex_init(lock, attr)) {
54 mm_free(lock);
57 return lock;
63 pthread_mutex_t *lock = lock_; local
64 pthread_mutex_destroy(lock);
65 mm_free(lock);
71 pthread_mutex_t *lock = lock_; local
73 return pthread_mutex_trylock(lock);
81 pthread_mutex_t *lock = lock_; local
141 pthread_mutex_t *lock = lock_; local
[all...]
/freebsd-11-stable/contrib/libstdc++/config/cpu/hppa/
H A Datomicity.h46 // linker, we explicitly instantiate the atomicity lock.
55 volatile int& lock = _Atomicity_lock<0>::_S_atomicity_lock; local
64 : "r" (&lock)
70 : : "r" (&lock), "r" (tmp) : "memory");
79 volatile int& lock = _Atomicity_lock<0>::_S_atomicity_lock; local
88 : "r" (&lock)
93 : : "r" (&lock), "r" (tmp) : "memory");
/freebsd-11-stable/usr.bin/lock/
H A DMakefile4 PROG= lock
/freebsd-11-stable/sys/dev/vkbd/
H A Dvkbd_var.h41 int lock; /* keyboard lock key state */ member in struct:vkbd_status
/freebsd-11-stable/contrib/ofed/libmlx5/
H A Ddoorbell.h50 static inline void mlx5_write64(uint32_t val[2], void *dest, struct mlx5_spinlock *lock) argument
57 static inline void mlx5_write64(uint32_t val[2], void *dest, struct mlx5_spinlock *lock) argument
59 mlx5_spin_lock(lock);
62 mlx5_spin_unlock(lock);
/freebsd-11-stable/contrib/unbound/services/
H A Dview.c64 lock_rw_init(&v->lock);
65 lock_protect(&v->lock, &v->vtree, sizeof(v->vtree));
78 lock_rw_destroy(&v->lock);
97 lock_rw_destroy(&v->lock);
114 lock_rw_init(&v->lock);
115 lock_protect(&v->lock, &v->name, sizeof(*v)-sizeof(rbnode_type));
130 lock_rw_wrlock(&vs->lock);
131 lock_rw_wrlock(&v->lock);
134 lock_rw_unlock(&v->lock);
136 lock_rw_unlock(&vs->lock);
[all...]
/freebsd-11-stable/contrib/llvm-project/compiler-rt/lib/scudo/
H A Dscudo_tsd_exclusive.inc39 FallbackTSD.lock();

Completed in 222 milliseconds

1234567891011>>