Searched refs:locks (Results 1 - 25 of 87) sorted by last modified time

1234

/linux-master/fs/btrfs/
H A Dqgroup.c1145 /* Release locks on tree_root before we access quota_root */
2128 btrfs_tree_unlock_rw(eb, path->locks[level]);
2129 path->locks[level] = 0;
2233 src_path->locks[root_level] = 0;
2256 src_path->locks[cur_level] = BTRFS_READ_LOCK;
2386 dst_path->locks[cur_level] = BTRFS_READ_LOCK;
2419 dst_path->locks[cur_level]);
2423 dst_path->locks[cur_level] = 0;
2466 dst_path->locks[level] = 0;
2559 path->locks[root_leve
[all...]
H A Dbackref.c604 * If we're search_commit_root we could possibly be holding locks on
2175 path->locks[0] = 0;
H A Dextent-tree.c3909 * Return the error after taking the locks.
3920 * Return the error after taking the locks.
5369 BUG_ON(!path->locks[level]);
5385 if (path->locks[level] && !wc->keep_locks) {
5386 btrfs_tree_unlock_rw(eb, path->locks[level]);
5387 path->locks[level] = 0;
5394 BUG_ON(!path->locks[level]);
5408 if (path->locks[level] && level > 0) {
5409 btrfs_tree_unlock_rw(eb, path->locks[level]);
5410 path->locks[leve
[all...]
/linux-master/fs/smb/server/
H A Dsmb2pdu.c7190 lock_ele = req->locks;
7236 /* Check conflict locks in one request */
7242 pr_err("conflict two locks in one request\n");
7284 /* check locks in connection list */
H A Dsmb2misc.c175 *off = offsetof(struct smb2_lock_req, locks);
226 * regardless of number of locks. Subtract single
/linux-master/fs/smb/common/
H A Dsmb2pdu.h854 DECLARE_FLEX_ARRAY(struct smb2_lock_element, locks);
/linux-master/fs/smb/client/
H A Dcifsglob.h492 /* unlock range of mandatory locks */
1359 * This is used to track byte stream locks on the file
1363 struct list_head blist; /* pointer to locks blocked on this */
1423 struct cifsFileInfo *cfile; /* fid that owns locks */
1424 struct list_head locks; /* locks held by fid above */ member in struct:cifs_fid_locks
1574 struct list_head llist; /* locks helb by this inode */
1676 * - This will be called by cifsd, with no locks held.
1691 * - it will be called by cifsd, with no locks held
1958 * Here are all the locks (spinloc
[all...]
H A Dfile.c445 if (!list_empty(&cur->locks)) {
495 INIT_LIST_HEAD(&fdlocks->locks);
521 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
577 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
921 * Try to reacquire byte range locks that were released when session
936 /* can cache locks - no need to relock */
1128 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
1342 list_for_each_entry(li, &fdlocks->locks, llist) {
1432 list_add_tail(&lock->llist, &cfile->llist->locks);
1439 * 2) 1, if no locks preven
[all...]
/linux-master/net/unix/
H A Daf_unix.c163 spin_lock(&net->unx.table.locks[hash1]);
170 spin_lock(&net->unx.table.locks[hash1]);
171 spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING);
178 spin_unlock(&net->unx.table.locks[hash1]);
182 spin_unlock(&net->unx.table.locks[hash1]);
183 spin_unlock(&net->unx.table.locks[hash2]);
337 spin_lock(&net->unx.table.locks[sk->sk_hash]);
339 spin_unlock(&net->unx.table.locks[sk->sk_hash]);
344 spin_lock(&net->unx.table.locks[sk->sk_hash]);
346 spin_unlock(&net->unx.table.locks[s
[all...]
H A Ddiag.c214 spin_lock(&net->unx.table.locks[slot]);
224 spin_unlock(&net->unx.table.locks[slot]);
230 spin_unlock(&net->unx.table.locks[slot]);
245 spin_lock(&net->unx.table.locks[i]);
249 spin_unlock(&net->unx.table.locks[i]);
253 spin_unlock(&net->unx.table.locks[i]);
/linux-master/arch/x86/include/asm/
H A Dalternative.h139 void *locks, void *locks_end,
147 void *locks, void *locks_end,
146 alternatives_smp_module_add(struct module *mod, char *name, void *locks, void *locks_end, void *text, void *text_end) argument
/linux-master/fs/dlm/
H A Dlock.c39 Stage 2, xxxx_lock(), just finds and locks the relevant rsb which is
498 * NL locks on an rsb, but without the guarantee that the cached master value
500 * to keep NL locks on an rsb that they may lock again shortly; this can lead
636 /* Because we have held no locks on this rsb,
712 /* During recovery, other nodes can send us new MSTCPY locks (from
1355 /* convention says granted locks kept in order of grmode */
1938 compatible with other granted locks */
2024 * between locks on the convert queue while they couldn't be granted anyway.
2032 * be zero, i.e. there will never be conv-deadlk between two locks that are
2089 * mode locks
[all...]
H A Duser.c172 An EOL lock needs to be removed from the process's list of locks.
653 hanging off the open file that's used to keep track of locks owned by the
673 INIT_LIST_HEAD(&proc->locks);
H A Ddlm_internal.h255 struct list_head lkb_ownqueue; /* list of locks for a process */
697 the process's locks point back to it*/
710 /* locks list is kept so we can remove all a process's locks when it
718 struct list_head locks; member in struct:dlm_user_proc
/linux-master/arch/sparc/lib/
H A DMakefile14 lib-$(CONFIG_SPARC32) += copy_user.o locks.o
/linux-master/arch/powerpc/lib/
H A DMakefile60 obj64-$(CONFIG_SMP) += locks.o
/linux-master/tools/perf/util/
H A Dmem-events.c608 if (lock & P(LOCK, LOCKED)) stats->locks++;
715 stats->locks += add->locks;
H A Dmem-events.h63 u32 locks; /* count of 'lock' transactions */ member in struct:c2c_stats
/linux-master/tools/perf/
H A Dbuiltin-c2c.c2385 fprintf(out, " Locked Load/Store Operations : %10d\n", stats->locks);
2436 fprintf(out, " Locked Access on shared lines : %10d\n", stats->locks);
/linux-master/drivers/md/dm-vdo/
H A Dslab-depot.c51 return &journal->locks[sequence_number % journal->size];
491 if (journal->reap_lock == &journal->locks[journal->size])
492 journal->reap_lock = &journal->locks[0];
501 * reference block write which released the locks allowing the slab journal to reap may not
605 * function will not change locks during replay.)
669 /* Ensure no locks are spuriously held on an empty journal. */
763 * Release the per-entry locks for any unused entries in the block we are about to
1063 * locks, and return its VIO to the pool.
1829 "New block has locks, but journal is not full");
1833 * block has locks; i
[all...]
H A Dslab-depot.h115 /* The recovery journal of the VDO (slab journal holds locks on it) */
147 /* The locks for each on disk block */
148 struct journal_lock *locks; member in struct:slab_journal
H A Drecovery-journal.c40 * A lock_counter is intended to keep all of the locks for the blocks in the recovery journal. The
42 * is at index 0, zone 0's lock 1 is at index 1, and zone 1's lock 0 is at index 'locks'. This
45 * The locks are implemented as a single object instead of as a lock counter per lock both to
90 block_count_t zone_counter = (counter->locks * zone_id) + lock_number;
487 /* Try reaping again in case more locks were released while flush was out. */
553 * Attempts to reap the journal now that all the locks on some journal block have been released.
630 counter->locks = journal->size;
1150 /* Release any unused entry locks. */
H A Drecovery-journal.h62 * counters are used as locks to prevent premature reaping of journal blocks. Each time a new
94 /* The number of logical zones which may hold locks */
96 /* The number of physical zones which may hold locks */
98 /* The number of locks */
99 block_count_t locks; member in struct:lock_counter
148 /* The slab depot which can hold locks on this journal */
150 /* The block map which can hold locks on this journal */
214 /* The locks for each on-disk block */
H A Dphysical-zone.c86 "must not downgrade block map write locks");
91 * data_vio write locks are downgraded in place--the writer retains the hold on the lock.
113 * Claim the next free reference atomically since hash locks from multiple hash zone
178 * Unused (idle) PBN locks are kept in a list. Just like in a malloc implementation, the lock
183 /** @entry: Only used while locks are in the pool. */
185 /** @lock: Only used while locks are not in the pool. */
190 * struct pbn_lock_pool - list of PBN locks.
192 * The lock pool is little more than the memory allocated for the locks.
195 /** @capacity: The number of locks allocated for the pool. */
197 /** @borrowed: The number of locks currentl
202 idle_pbn_lock locks[]; member in struct:pbn_lock_pool
[all...]
/linux-master/arch/x86/kernel/
H A Dalternative.c1441 const s32 *locks; member in struct:smp_alt_module
1455 void *locks, void *locks_end,
1475 smp->locks = locks;
1479 DPRINTK(SMP, "locks %p -> %p, text %p -> %p, name %s\n",
1480 smp->locks, smp->locks_end,
1485 alternatives_smp_unlock(locks, locks_end, text, text_end);
1520 alternatives_smp_lock(mod->locks, mod->locks_end,
1543 for (poff = mod->locks; poff < mod->locks_end; poff++) {
1453 alternatives_smp_module_add(struct module *mod, char *name, void *locks, void *locks_end, void *text, void *text_end) argument

Completed in 520 milliseconds

1234