Searched refs:lock (Results 51 - 75 of 2780) sorted by relevance

1234567891011>>

/freebsd-11-stable/contrib/llvm-project/lldb/source/Expression/
H A DExpression.cpp20 assert(m_target_wp.lock());
27 assert(m_target_wp.lock());
/freebsd-11-stable/contrib/ntp/sntp/libevent/include/event2/
H A Dthread.h35 lock its data structures.
58 @name Flags passed to lock functions
62 /** A flag passed to a locking callback when the lock was allocated as a
63 * read-write lock, and we want to acquire or release the lock for writing. */
65 /** A flag passed to a locking callback when the lock was allocated as a
66 * read-write lock, and we want to acquire or release the lock for reading. */
69 * for the lock; if we can't get the lock immediatel
115 int (*lock)(unsigned mode, void *lock); member in struct:evthread_lock_callbacks
[all...]
/freebsd-11-stable/contrib/netbsd-tests/kernel/
H A Dt_lock.c36 #include <machine/lock.h>
53 ATF_TC(lock); variable
54 ATF_TC_HEAD(lock, tc)
60 ATF_TC_BODY(lock, tc)
84 ATF_TP_ADD_TC(tp, lock);
/freebsd-11-stable/contrib/llvm-project/lldb/include/lldb/Host/
H A DProcessRunLock.h42 // Try to lock the read lock, but only do so if there are no writers.
43 bool TryLock(ProcessRunLock *lock) { argument
45 if (m_lock == lock)
46 return true; // We already have this lock locked
50 if (lock) {
51 if (lock->ReadTryLock()) {
52 m_lock = lock;
/freebsd-11-stable/contrib/ntp/lib/isc/
H A Dquota.c34 return (isc_mutex_init(&quota->lock));
43 DESTROYLOCK(&quota->lock);
48 LOCK(&quota->lock);
50 UNLOCK(&quota->lock);
55 LOCK(&quota->lock);
57 UNLOCK(&quota->lock);
63 LOCK(&quota->lock);
72 UNLOCK(&quota->lock);
78 LOCK(&quota->lock);
81 UNLOCK(&quota->lock);
[all...]
/freebsd-11-stable/contrib/ntp/sntp/libevent/
H A Devthread_win32.c53 CRITICAL_SECTION *lock = mm_malloc(sizeof(CRITICAL_SECTION)); local
54 if (!lock)
56 if (InitializeCriticalSectionAndSpinCount(lock, SPIN_COUNT) == 0) {
57 mm_free(lock);
60 return lock;
66 CRITICAL_SECTION *lock = lock_; local
67 DeleteCriticalSection(lock);
68 mm_free(lock);
74 CRITICAL_SECTION *lock = lock_; local
76 return ! TryEnterCriticalSection(lock);
86 CRITICAL_SECTION *lock = lock_; local
160 CRITICAL_SECTION *lock = lock_; local
183 CRITICAL_SECTION lock; member in struct:evthread_win32_cond
236 CRITICAL_SECTION *lock = lock_; local
[all...]
/freebsd-11-stable/libexec/rtld-elf/
H A Drtld_lock.c32 * We use the "simple, non-scalable reader-preference lock" from:
38 * In this algorithm the lock is a single word. Its low-order bit is
39 * set when a writer holds the lock. The remaining high-order bits
40 * contain a count of readers desiring the lock. The algorithm requires
58 #define WAFLAG 0x1 /* A writer holds the lock */
59 #define RC_INCR 0x2 /* Adjusts count of readers desiring lock */
62 volatile u_int lock; member in struct:Struct_Lock
78 * Arrange for the lock to occupy its own cache line. First, we
95 l->lock = 0;
100 def_lock_destroy(void *lock) argument
108 def_rlock_acquire(void *lock) argument
118 def_wlock_acquire(void *lock) argument
135 def_lock_release(void *lock) argument
195 rlock_acquire(rtld_lock_t lock, RtldLockState *lockstate) argument
211 wlock_acquire(rtld_lock_t lock, RtldLockState *lockstate) argument
227 lock_release(rtld_lock_t lock, RtldLockState *lockstate) argument
247 lock_upgrade(rtld_lock_t lock, RtldLockState *lockstate) argument
[all...]
/freebsd-11-stable/contrib/jemalloc/include/jemalloc/internal/
H A Dmutex.h34 SRWLOCK lock; member in struct:malloc_mutex_s
36 CRITICAL_SECTION lock;
39 OSSpinLock lock;
41 pthread_mutex_t lock;
44 pthread_mutex_t lock;
85 AcquireSRWLockExclusive(&mutex->lock);
87 EnterCriticalSection(&mutex->lock);
90 OSSpinLockLock(&mutex->lock);
92 pthread_mutex_lock(&mutex->lock);
106 ReleaseSRWLockExclusive(&mutex->lock);
[all...]
/freebsd-11-stable/sys/dev/drm2/ttm/
H A Dttm_bo_manager.c41 * Currently we use a spinlock for the lock, but a mutex *may* be
48 struct mtx lock; member in struct:ttm_range_manager
72 mtx_lock(&rman->lock);
77 mtx_unlock(&rman->lock);
84 mtx_unlock(&rman->lock);
98 mtx_lock(&rman->lock);
100 mtx_unlock(&rman->lock);
118 mtx_init(&rman->lock, "ttmrman", NULL, MTX_DEF);
128 mtx_lock(&rman->lock);
131 mtx_unlock(&rman->lock);
[all...]
H A Dttm_memory.h56 * @lock: Lock to protect the @shrink - and the memory accounting members,
75 struct mtx lock; member in struct:ttm_mem_global
108 mtx_lock(&glob->lock);
110 mtx_unlock(&glob->lock);
114 mtx_unlock(&glob->lock);
129 mtx_lock(&glob->lock);
132 mtx_unlock(&glob->lock);
/freebsd-11-stable/sys/compat/linuxkpi/common/include/linux/
H A Dspinlock.h36 #include <sys/lock.h>
136 #define spin_lock_init(lock) linux_spin_lock_init(lock, spin_lock_name("lnxspin"))
139 linux_spin_lock_init(spinlock_t *lock, const char *name) argument
142 memset(lock, 0, sizeof(*lock));
143 mtx_init(&lock->m, name, NULL, MTX_DEF | MTX_NOWITNESS);
147 spin_lock_destroy(spinlock_t *lock) argument
150 mtx_destroy(&lock->m);
153 #define DEFINE_SPINLOCK(lock) \
[all...]
/freebsd-11-stable/contrib/llvm-project/libunwind/src/
H A DRWMutex.hpp33 bool lock() { return true; } function in class:libunwind::RWMutex
49 bool lock() {
68 bool lock() { return pthread_rwlock_wrlock(&_lock) == 0; }
81 pthread_rwlock_rdlock(pthread_rwlock_t *lock);
83 pthread_rwlock_wrlock(pthread_rwlock_t *lock);
85 pthread_rwlock_unlock(pthread_rwlock_t *lock);
99 bool lock() {
/freebsd-11-stable/contrib/unbound/util/storage/
H A Dlruhash.c55 lock_quick_init(&array[i].lock);
56 lock_protect(&array[i].lock, &array[i],
71 lock_quick_init(&table->lock);
86 lock_quick_destroy(&table->lock);
91 lock_protect(&table->lock, table, sizeof(*table));
92 lock_protect(&table->lock, table->array,
104 lock_quick_destroy(&bin->lock);
133 lock_quick_lock(&table->array[i].lock);
135 /* lock both destination bins */
136 lock_quick_lock(&newa[i].lock);
[all...]
/freebsd-11-stable/contrib/subversion/subversion/libsvn_subr/
H A Dlock.c2 * lock.c: routines for svn_lock_t objects.
46 svn_lock_dup(const svn_lock_t *lock, apr_pool_t *pool)
50 if (lock == NULL)
54 *new_l = *lock;
44 svn_lock_dup(const svn_lock_t *lock, apr_pool_t *pool) argument
/freebsd-11-stable/sys/dev/smbus/
H A Dsmbconf.c33 #include <sys/lock.h>
51 mtx_lock(&sc->lock);
54 mtx_unlock(&sc->lock);
93 error = msleep(sc, &sc->lock, SMBPRI|PCATCH, "smbreq", 0);
97 error = msleep(sc, &sc->lock, SMBPRI, "smbreq", 0);
124 mtx_lock(&sc->lock);
126 mtx_unlock(&sc->lock);
128 mtx_lock(&sc->lock);
144 mtx_unlock(&sc->lock);
149 mtx_unlock(&sc->lock);
[all...]
/freebsd-11-stable/sys/kern/
H A Dkern_lockf.c71 #include <sys/lock.h>
175 * This structure is used to keep track of both local and remote lock
177 * the lock owner structure. Each possible lock owner (local proc for
182 * If a lock owner has a lock that blocks some other lock or a lock
183 * that is waiting for some other lock, it also has a vertex in the
200 pid_t lo_pid; /* (c) Process Id of the lock owne
353 lf_free_lock(struct lockf_entry *lock) argument
415 struct lockf_entry *lock; local
781 struct lockf_entry *lock, *nlock; local
900 lf_alloc_vertex(struct lockf_entry *lock) argument
994 lf_add_outgoing(struct lockf *state, struct lockf_entry *lock) argument
1062 lf_add_incoming(struct lockf *state, struct lockf_entry *lock) argument
1095 lf_insert_lock(struct lockf *state, struct lockf_entry *lock) argument
1149 lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all, struct lockf_entry_list *granted) argument
1174 lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start, struct lockf_entry_list *granted) argument
1190 lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end, struct lockf_entry_list *granted) argument
1215 lf_activate_lock(struct lockf *state, struct lockf_entry *lock) argument
1328 lf_cancel_lock(struct lockf *state, struct lockf_entry *lock) argument
1379 lf_setlock(struct lockf *state, struct lockf_entry *lock, struct vnode *vp, void **cookiep) argument
1578 lf_getlock(struct lockf *state, struct lockf_entry *lock, struct flock *fl) argument
1607 lf_cancel(struct lockf *state, struct lockf_entry *lock, void *cookie) argument
1660 lf_getblock(struct lockf *state, struct lockf_entry *lock) argument
1704 lf_findoverlap(struct lockf_entry **overlap, struct lockf_entry *lock, int type) argument
2497 lf_print(char *tag, struct lockf_entry *lock) argument
2523 lf_printlist(char *tag, struct lockf_entry *lock) argument
[all...]
/freebsd-11-stable/contrib/gcc/
H A Dgthr-gnat.h41 extern void __gnat_install_locks (void (*lock) (void), void (*unlock) (void));
/freebsd-11-stable/sys/contrib/octeon-sdk/
H A Dcvmx-coremask.c72 cvmx_spinlock_t lock; /**< mutex spinlock */ member in struct:__anon8453
104 cvmx_spinlock_lock(&state.lock);
122 cvmx_spinlock_unlock(&state.lock);
132 cvmx_spinlock_unlock(&state.lock);
/freebsd-11-stable/sys/dev/cs/
H A Dif_csvar.h64 struct mtx lock; member in struct:cs_softc
69 #define CS_LOCK(sc) mtx_lock(&(sc)->lock)
70 #define CS_UNLOCK(sc) mtx_unlock(&(sc)->lock)
71 #define CS_ASSERT_LOCKED(sc) mtx_assert(&(sc)->lock, MA_OWNED)
/freebsd-11-stable/sys/cddl/contrib/opensolaris/common/atomic/amd64/
H A Dopensolaris_atomic.S33 lock
44 lock
53 lock
60 lock
/freebsd-11-stable/sys/sys/
H A D_lockmgr.h38 struct lock { struct
H A Drangelock.h42 * The structure representing the range lock. Caller may request
44 * all existing lock owners are compatible with the request. Two lock
51 * rl_waiters is the queue containing in order (a) granted write lock
52 * requests, (b) granted read lock requests, and (c) in order of arrival,
53 * lock requests which cannot be granted yet.
55 * rl_currdep is the first lock request that cannot be granted now due
68 void rangelock_init(struct rangelock *lock);
69 void rangelock_destroy(struct rangelock *lock);
70 void rangelock_unlock(struct rangelock *lock, voi
[all...]
/freebsd-11-stable/sys/dev/cxgbe/
H A Dt4_smt.h56 struct mtx lock; member in struct:smt_entry
60 struct rwlock lock; member in struct:smt_data
80 mtx_lock(&e->lock);
82 mtx_unlock(&e->lock);
/freebsd-11-stable/contrib/ofed/opensm/include/complib/
H A Dcl_passivelock.h38 * This file contains the passive lock, which synchronizes passive threads.
39 * The passive lock allows multiple readers to access a resource
64 * are sharing the lock with a single thread holding the lock exclusively.
66 * Passive lock works exclusively between threads and cannot be used in
69 * The passive lock functions operate a cl_plock_t structure which should
96 pthread_rwlock_t lock; member in struct:_cl_plock
101 * lock
105 * Records the current state of the lock, such as initialized,
118 * passive lock
[all...]
/freebsd-11-stable/lib/libthr/thread/
H A Dthr_rtld.c56 struct urwlock lock; member in struct:rtld_lock
81 l->lock.rw_flags = URWLOCK_PREFER_READER;
86 _thr_rtld_lock_destroy(void *lock) argument
91 locki = (struct rtld_lock *)lock - &lock_place[0];
93 ((char *)lock)[i] = 0;
112 _thr_rtld_rlock_acquire(void *lock) argument
120 l = (struct rtld_lock *)lock;
123 while (_thr_rwlock_rdlock(&l->lock, 0, NULL) != 0)
130 _thr_rtld_wlock_acquire(void *lock) argument
138 l = (struct rtld_lock *)lock;
147 _thr_rtld_lock_release(void *lock) argument
[all...]

Completed in 310 milliseconds

1234567891011>>