Searched refs:lock (Results 126 - 150 of 2780) sorted by relevance

1234567891011>>

/freebsd-11-stable/sys/vm/
H A Dvm_object.h85 * (o) per-object lock
96 struct rwlock lock; member in struct:vm_object
225 extern struct mtx vm_object_list_mtx; /* lock for object list and count */
234 rw_assert(&(object)->lock, RA_LOCKED)
236 rw_assert(&(object)->lock, RA_RLOCKED)
238 rw_assert(&(object)->lock, RA_WLOCKED)
240 rw_assert(&(object)->lock, RA_UNLOCKED)
242 rw_downgrade(&(object)->lock)
244 rw_rlock(&(object)->lock)
246 rw_runlock(&(object)->lock)
[all...]
/freebsd-11-stable/sys/compat/linuxkpi/common/include/linux/
H A Dwait.h80 spinlock_t lock; member in struct:wait_queue_head
114 MTX_SYSINIT(name, &(name).lock.m, spin_lock_name("wqhead"), MTX_DEF)
117 mtx_init(&(wqh)->lock.m, spin_lock_name("wqhead"), \
148 #define __wait_event_common(wqh, cond, timeout, state, lock) ({ \
159 __timeout, state, lock); \
210 spin_unlock(&(wqh).lock); \
213 spin_lock(&(wqh).lock); \
220 #define wait_event_interruptible_lock_irq(wqh, cond, lock) ({ \
222 TASK_INTERRUPTIBLE, &(lock)); \
228 #define wait_event_lock_irq(wqh, cond, lock) ({ \
[all...]
/freebsd-11-stable/contrib/ntp/lib/isc/
H A Drwlock.c115 result = isc_mutex_init(&rwl->lock);
147 DESTROYLOCK(&rwl->lock);
160 LOCK(&rwl->lock);
164 UNLOCK(&rwl->lock);
170 DESTROYLOCK(&rwl->lock);
186 * value of 0. When a new writer tries to get a write lock, it increments
195 * cnt_and_flag is a "lock" shared by all readers and writers. This integer
200 * lock by exclusively setting the writer_flag to 1, provided that the whole
218 * and a mutex lock, ordering between the atomic operation and locks can be
225 * LOCK(lock);
[all...]
H A Dmem.c118 static isc_mutex_t lock; variable
122 * Locked by the global lock.
130 isc_mutex_t lock; member in struct:isc__mem
178 isc_mutex_t *lock; /*%< optional lock */ member in struct:isc__mempool
180 /*%< locked via the memory context's lock */
293 isc__mempool_associatelock(isc_mempool_t *mpctx, isc_mutex_t *lock);
536 /* Require: we hold the context lock. */
884 RUNTIME_CHECK(isc_mutex_init(&lock) == ISC_R_SUCCESS);
924 result = isc_mutex_init(&ctx->lock);
1898 isc_mutex_t *lock; local
1959 isc__mempool_associatelock(isc_mempool_t *mpctx0, isc_mutex_t *lock) argument
[all...]
H A Dresult.c61 "lock busy", /*%< 17 */
113 static isc_mutex_t lock; variable
139 LOCK(&lock);
143 UNLOCK(&lock);
152 RUNTIME_CHECK(isc_mutex_init(&lock) == ISC_R_SUCCESS);
179 LOCK(&lock);
202 UNLOCK(&lock);
/freebsd-11-stable/contrib/apr-util/misc/
H A Dapr_rmm.c73 apr_anylock_t lock; member in struct:apr_rmm_t
213 APU_DECLARE(apr_status_t) apr_rmm_init(apr_rmm_t **rmm, apr_anylock_t *lock,
221 if (!lock) {
223 nulllock.lock.pm = NULL;
224 lock = &nulllock;
226 if ((rv = APR_ANYLOCK_LOCK(lock)) != APR_SUCCESS)
233 (*rmm)->lock = *lock;
245 return APR_ANYLOCK_UNLOCK(lock);
253 if ((rv = APR_ANYLOCK_LOCK(&rmm->lock)) !
[all...]
/freebsd-11-stable/sys/dev/cxgb/ulp/tom/
H A Dcxgb_l2t.c55 * Module locking notes: There is a RW lock protecting the L2 table as a
57 * under the protection of the table lock, individual entry changes happen
58 * while holding that entry's mutex. The table lock nests outside the
59 * entry locks. Allocations of new entries take the table lock as writers so
61 * take the table lock as readers so multiple entries can be updated in
87 mtx_assert(&e->lock, MA_OWNED);
124 * Must be called with the entry's lock held.
129 mtx_assert(&e->lock, MA_OWNED);
151 mtx_assert(&e->lock, MA_OWNED);
167 mtx_assert(&e->lock, MA_OWNE
[all...]
/freebsd-11-stable/contrib/ntp/sntp/libevent/
H A Devbuffer-internal.h112 /** A lock used to mediate access to this buffer. */
113 void *lock; member in struct:evbuffer
115 /** True iff we should free the lock field when we free this
236 void *lock; /**< lock prevent concurrent access to refcnt */ member in struct:evbuffer_file_segment
281 /** Assert that we are holding the lock on an evbuffer */
283 EVLOCK_ASSERT_LOCKED((buffer)->lock)
287 EVLOCK_LOCK((buffer)->lock, 0); \
291 EVLOCK_UNLOCK((buffer)->lock, 0); \
295 EVLOCK_LOCK2((buffer1)->lock, (buffer
[all...]
/freebsd-11-stable/sys/dev/ixl/
H A Di40e_osdep.c140 i40e_init_spinlock(struct i40e_spinlock *lock) argument
142 mtx_init(&lock->mutex, "mutex",
147 i40e_acquire_spinlock(struct i40e_spinlock *lock) argument
149 mtx_lock(&lock->mutex);
153 i40e_release_spinlock(struct i40e_spinlock *lock) argument
155 mtx_unlock(&lock->mutex);
159 i40e_destroy_spinlock(struct i40e_spinlock *lock) argument
161 if (mtx_initialized(&lock->mutex))
162 mtx_destroy(&lock->mutex);
/freebsd-11-stable/contrib/subversion/subversion/libsvn_fs_base/bdb/
H A Dlocks-table.c40 #include "lock-tokens-table.h"
79 svn_lock_t *lock,
88 SVN_ERR(svn_fs_base__unparse_lock_skel(&lock_skel, lock, pool));
92 svn_fs_base__trail_debug(trail, "lock", "add");
93 return BDB_WRAP(fs, N_("storing lock record"),
116 return BDB_WRAP(fs, N_("deleting lock from 'locks' table"), db_err);
132 svn_lock_t *lock; local
134 svn_fs_base__trail_debug(trail, "lock", "get");
143 SVN_ERR(BDB_WRAP(fs, N_("reading lock"), db_err));
151 SVN_ERR(svn_fs_base__parse_lock_skel(&lock, ske
77 svn_fs_bdb__lock_add(svn_fs_t *fs, const char *lock_token, svn_lock_t *lock, trail_t *trail, apr_pool_t *pool) argument
208 svn_lock_t *lock; local
[all...]
/freebsd-11-stable/sys/dev/sfxge/
H A Dsfxge.h167 struct mtx lock; member in struct:sfxge_evq
225 struct mtx lock; member in struct:sfxge_mcdi
248 struct mtx lock; member in struct:sfxge_port
438 mtx_init(&(__port)->lock, (__port)->lock_name, \
442 mtx_destroy(&(_port)->lock)
444 mtx_lock(&(_port)->lock)
446 mtx_unlock(&(_port)->lock)
448 mtx_assert(&(_port)->lock, MA_OWNED)
457 mtx_init(&(__mcdi)->lock, (__mcdi)->lock_name, \
461 mtx_destroy(&(_mcdi)->lock)
[all...]
/freebsd-11-stable/contrib/unbound/services/
H A Drpz.c315 /* must hold write lock on auth_zone */
330 lock_rw_wrlock(&r->respip_set->lock);
332 lock_rw_unlock(&r->respip_set->lock);
488 lock_rw_wrlock(&r->local_zones->lock);
498 lock_rw_unlock(&r->local_zones->lock);
505 lock_rw_unlock(&r->local_zones->lock);
513 lock_rw_unlock(&r->local_zones->lock);
525 lock_rw_unlock(&r->local_zones->lock);
528 lock_rw_wrlock(&z->lock);
531 lock_rw_unlock(&z->lock);
[all...]
/freebsd-11-stable/sys/kern/
H A Dsubr_witness.c36 * Implementation of the `witness' lock verifier. Originally implemented for
37 * mutexes in BSD/OS. Extended to handle generic lock objects and lock
62 * Special rules concerning Giant and lock orders:
67 * 2) Giant must be released when blocking on a sleepable lock.
71 * Giant. When a thread blocks on a sleepable lock, it sleeps. Hence rule
77 * a sleepable lock because it is a non-sleepable lock and non-sleepable
78 * locks may always be acquired while holding a sleepable lock. The second
79 * case, Giant before a sleepable lock, follow
752 struct lock_object *lock; local
837 witness_init(struct lock_object *lock, const char *type) argument
878 witness_destroy(struct lock_object *lock) argument
1071 witness_checkorder(struct lock_object *lock, int flags, const char *file, int line, struct lock_object *interlock) argument
1418 witness_lock(struct lock_object *lock, int flags, const char *file, int line) argument
1477 witness_upgrade(struct lock_object *lock, int flags, const char *file, int line) argument
1522 witness_downgrade(struct lock_object *lock, int flags, const char *file, int line) argument
1568 witness_unlock(struct lock_object *lock, int flags, const char *file, int line) argument
1710 witness_warn(int flags, struct lock_object *lock, const char *fmt, ...) argument
1782 witness_file(struct lock_object *lock) argument
1793 witness_line(struct lock_object *lock) argument
2156 find_instance(struct lock_list_entry *list, const struct lock_object *lock) argument
2175 struct lock_object *lock; local
2265 witness_display_spinlock(struct lock_object *lock, struct thread *owner, int (*prnt)(const char *fmt, ...)) argument
2280 witness_save(struct lock_object *lock, const char **filep, int *linep) argument
2315 witness_restore(struct lock_object *lock, const char *file, int line) argument
2352 witness_assert(const struct lock_object *lock, int flags, const char *file, int line) argument
2425 witness_setflag(struct lock_object *lock, int flag, int set) argument
2455 witness_norelease(struct lock_object *lock) argument
2462 witness_releaseok(struct lock_object *lock) argument
[all...]
H A Dsubr_turnstile.c36 * turnstile queue's are assigned to a lock held by an owning thread. Thus,
44 * in a hash table based on the address of the lock. Each entry in the
50 * and attached to that thread. When a thread blocks on a lock, if it is the
51 * first thread to block, it lends its turnstile to the lock. If the lock
52 * already has a turnstile, then it gives its turnstile to the lock's
55 * blocked on the lock, then it reclaims the turnstile associated with the lock
71 #include <sys/lock.h>
97 #define TC_HASH(lock) (((uintptr_
536 turnstile_chain_lock(struct lock_object *lock) argument
545 turnstile_trywait(struct lock_object *lock) argument
571 struct lock_object *lock; local
589 turnstile_lookup(struct lock_object *lock) argument
608 turnstile_chain_unlock(struct lock_object *lock) argument
678 struct lock_object *lock; local
1051 struct lock_object *lock; local
1101 struct lock_object *lock; local
1263 struct lock_object *lock; local
1285 struct lock_object *lock; local
[all...]
H A Dkern_synch.c49 #include <sys/lock.h>
107 * vmem tries to lock the sleepq mutexes when free'ing kva, so make sure
123 * The lock argument is unlocked before the caller is suspended, and
125 * flag the lock is not re-locked before returning.
128 _sleep(void *ident, struct lock_object *lock, int priority, argument
144 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, lock,
146 KASSERT(sbt != 0 || mtx_owned(&Giant) || lock != NULL,
147 ("sleeping without a lock"));
151 KASSERT(lock != NULL && lock !
[all...]
/freebsd-11-stable/contrib/jemalloc/src/
H A Dmutex.c88 InitializeSRWLock(&mutex->lock);
90 if (!InitializeCriticalSectionAndSpinCount(&mutex->lock,
95 mutex->lock = 0;
101 if (_pthread_mutex_init_calloc_cb(&mutex->lock,
111 if (pthread_mutex_init(&mutex->lock, &attr) != 0) {
160 if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
/freebsd-11-stable/sys/contrib/octeon-sdk/
H A Dcvmx-zone.c86 zone->lock.value = CVMX_SPINLOCK_UNLOCKED_VAL;
117 zone->lock.value = CVMX_SPINLOCK_UNLOCKED_VAL;
144 cvmx_spinlock_lock(&zone->lock);
156 cvmx_spinlock_unlock(&zone->lock);
167 cvmx_spinlock_lock(&zone->lock);
170 cvmx_spinlock_unlock(&zone->lock);
/freebsd-11-stable/sys/cddl/contrib/opensolaris/common/atomic/i386/
H A Dopensolaris_atomic.S52 lock
81 lock
98 lock
109 lock
122 lock
130 lock
/freebsd-11-stable/sys/dev/cxgbe/iw_cxgbe/
H A Dresource.c110 mutex_lock(&uctx->lock);
121 mutex_lock(&rdev->stats.lock);
123 mutex_unlock(&rdev->stats.lock);
150 mutex_unlock(&uctx->lock);
152 mutex_lock(&rdev->stats.lock);
155 mutex_unlock(&rdev->stats.lock);
169 mutex_lock(&uctx->lock);
171 mutex_unlock(&uctx->lock);
180 mutex_lock(&uctx->lock);
191 mutex_lock(&rdev->stats.lock);
[all...]
/freebsd-11-stable/sys/dev/cxgbe/tom/
H A Dt4_tom_l2t.c39 #include <sys/lock.h>
141 * Must be called with the entry's lock held.
146 mtx_assert(&e->lock, MA_OWNED);
156 mtx_assert(&e->lock, MA_OWNED);
169 mtx_assert(&e->lock, MA_OWNED);
183 mtx_assert(&e->lock, MA_OWNED);
256 mtx_lock(&e->lock);
258 mtx_unlock(&e->lock);
284 mtx_lock(&e->lock);
288 mtx_unlock(&e->lock);
[all...]
/freebsd-11-stable/sys/dev/ocs_fc/
H A Docs_drv_fc.h58 ocs_rlock_t lock; /*>> Device wide lock */ member in struct:ocs_s
160 ocs_rlock_init(ocs, &ocs->lock, "ocsdevicelock");
165 ocs_rlock_free(&ocs->lock);
170 return ocs_rlock_try(&ocs->lock);
175 ocs_rlock_acquire(&ocs->lock);
180 ocs_rlock_release(&ocs->lock);
/freebsd-11-stable/contrib/unbound/util/
H A Dtcp_conn_limit.c69 lock_quick_destroy(&n->lock);
95 lock_quick_init(&node->lock);
159 lock_quick_lock(&tcl->lock);
164 lock_quick_unlock(&tcl->lock);
174 lock_quick_lock(&tcl->lock);
177 lock_quick_unlock(&tcl->lock);
/freebsd-11-stable/sys/dev/sound/pci/
H A Dspicds.c52 struct mtx *lock; member in struct:spicds_info
152 codec->lock = snd_mtxcreate(codec->name, codec->name);
168 snd_mtxfree(codec->lock);
175 snd_mtxlock(codec->lock);
177 snd_mtxunlock(codec->lock);
183 snd_mtxlock(codec->lock);
185 snd_mtxunlock(codec->lock);
191 snd_mtxlock(codec->lock);
193 snd_mtxunlock(codec->lock);
199 snd_mtxlock(codec->lock);
[all...]
/freebsd-11-stable/contrib/llvm-project/openmp/runtime/src/
H A Dkmp_itt.h100 __kmp_inline void __kmp_itt_lock_creating(kmp_user_lock_p lock,
103 __kmp_inline void __kmp_itt_lock_creating(kmp_user_lock_p lock);
105 __kmp_inline void __kmp_itt_lock_acquiring(kmp_user_lock_p lock);
106 __kmp_inline void __kmp_itt_lock_acquired(kmp_user_lock_p lock);
107 __kmp_inline void __kmp_itt_lock_releasing(kmp_user_lock_p lock);
108 __kmp_inline void __kmp_itt_lock_cancelled(kmp_user_lock_p lock);
109 __kmp_inline void __kmp_itt_lock_destroyed(kmp_user_lock_p lock);
113 __kmp_inline void __kmp_itt_critical_creating(kmp_user_lock_p lock,
116 __kmp_inline void __kmp_itt_critical_creating(kmp_user_lock_p lock);
118 __kmp_inline void __kmp_itt_critical_acquiring(kmp_user_lock_p lock);
[all...]
/freebsd-11-stable/contrib/subversion/subversion/libsvn_wc/
H A Dlock.c2 * lock.c: routines for locking working copy subdirectories.
38 #include "lock.h"
81 #define IS_MISSING(lock) ((lock) == &missing)
207 add_to_shared(svn_wc_adm_access_t *lock, apr_pool_t *scratch_pool)
209 /* ### sometimes we replace &missing with a now-valid lock. */
211 svn_wc_adm_access_t *prior = svn_wc__db_temp_get_access(lock->db,
212 lock->abspath,
215 SVN_ERR(svn_wc__db_temp_close_access(lock->db, lock
206 add_to_shared(svn_wc_adm_access_t *lock, apr_pool_t *scratch_pool) argument
306 svn_wc_adm_access_t *lock = p; local
381 svn_wc_adm_access_t *lock = data; local
413 svn_wc_adm_access_t *lock = p; local
435 svn_wc_adm_access_t *lock = apr_palloc(result_pool, sizeof(*lock)); local
556 svn_wc_adm_access_t *lock; local
638 svn_wc_adm_access_t *lock; local
733 svn_wc_adm_access_t *lock = APR_ARRAY_IDX(rollback, i, local
763 svn_wc_adm_access_t *lock; local
[all...]

Completed in 149 milliseconds

1234567891011>>