Searched refs:readers (Results 1 - 25 of 43) sorted by last modified time

12

/linux-master/fs/bcachefs/
H A Dbtree_update_interior.c269 struct prealloc_nodes *p = &as->prealloc_nodes[b->c.lock.readers != NULL];
H A Dbtree_key_cache.c92 if (ck->c.lock.readers) {
134 if (!ck->c.lock.readers) {
H A Dbtree_cache.c50 if (b->c.lock.readers)
H A Ddebug.c513 prt_printf(out, "%u", b->c.lock.readers != NULL);
H A Dbtree_locking.c419 int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read]; local
428 six_lock_readers_add(&b->lock, -readers);
431 six_lock_readers_add(&b->lock, readers);
H A Dsix.c104 read_count += *per_cpu_ptr(lock->readers, cpu);
154 if (type == SIX_LOCK_read && lock->readers) {
156 this_cpu_inc(*lock->readers); /* signal that we own lock */
163 this_cpu_sub(*lock->readers, !ret);
171 } else if (type == SIX_LOCK_write && lock->readers) {
570 lock->readers) {
572 this_cpu_dec(*lock->readers);
658 if (!lock->readers) {
666 if (lock->readers)
667 this_cpu_dec(*lock->readers);
[all...]
H A Dsix.h18 * not with readers, we can take intent locks at the start of the operation,
141 unsigned __percpu *readers; member in struct:six_lock
/linux-master/fs/btrfs/
H A Dextent_io.c3336 if (atomic_read(&subpage->readers))
H A Dsubpage.c175 atomic_set(&ret->readers, 0);
271 atomic_add(nbits, &subpage->readers);
292 ASSERT(atomic_read(&subpage->readers) >= nbits);
295 last = atomic_sub_and_test(nbits, &subpage->readers);
339 ASSERT(atomic_read(&subpage->readers) == 0);
H A Dsubpage.h64 * Both data and metadata needs to track how many readers are for the
66 * Data relies on @readers to unlock the page when last reader finished.
70 atomic_t readers; member in struct:btrfs_subpage
H A Dlocking.c125 * - try-lock semantics for readers and writers
336 * if there are pending readers no new writers would be allowed to come in and
342 atomic_set(&lock->readers, 0);
351 if (atomic_read(&lock->readers))
356 /* Ensure writers count is updated before we check for pending readers */
358 if (atomic_read(&lock->readers)) {
371 wait_event(lock->pending_writers, !atomic_read(&lock->readers));
383 atomic_inc(&lock->readers);
402 if (atomic_dec_and_test(&lock->readers))
H A Dlocking.h102 * the threads that hold the lock as readers signal the condition for the wait
119 * the condition and do the signaling acquire the lock as readers (shared
201 atomic_t readers; member in struct:btrfs_drew_lock
/linux-master/include/linux/
H A Drwbase_rt.h12 atomic_t readers; member in struct:rwbase_rt
18 .readers = ATOMIC_INIT(READER_BIAS), \
25 atomic_set(&(rwbase)->readers, READER_BIAS); \
31 return atomic_read(&rwb->readers) != READER_BIAS;
36 return atomic_read(&rwb->readers) == WRITER_BIAS;
41 return atomic_read(&rwb->readers) > 0;
/linux-master/drivers/mtd/ubi/
H A Dubi.h281 * @readers: number of users holding this volume in read-only mode
336 int readers; member in struct:ubi_volume
447 * @vol->readers, @vol->writers, @vol->exclusive,
H A Dkapi.c106 * readers and one writer at a time.
163 vol->readers += 1;
173 if (vol->exclusive || vol->writers || vol->readers ||
357 vol->readers -= 1;
/linux-master/drivers/media/dvb-core/
H A Ddvbdev.c136 if (!dvbdev->readers)
138 dvbdev->readers--;
158 dvbdev->readers++;
H A Ddvb_frontend.c3011 .readers = (~0) - 1,
/linux-master/tools/perf/util/
H A Dsession.c2519 int i, ret, readers, nr_readers; local
2552 readers = 1;
2557 rd[readers] = (struct reader) {
2565 ret = reader__init(&rd[readers], NULL);
2568 ret = reader__mmap(&rd[readers], session);
2571 readers++;
2575 while (readers) {
2585 readers--;
/linux-master/drivers/md/dm-vdo/indexer/
H A Dvolume-index.c806 struct buffered_reader **readers,
820 result = uds_read_from_buffered_reader(readers[i], buffer,
873 result = uds_read_from_buffered_reader(readers[i], decoded,
891 result = uds_start_restoring_delta_index(&sub_index->delta_index, readers,
983 struct buffered_reader **readers, unsigned int reader_count)
988 result = start_restoring_volume_index(volume_index, readers, reader_count);
992 result = finish_restoring_volume_index(volume_index, readers, reader_count);
999 result = uds_check_guard_delta_lists(readers, reader_count);
805 start_restoring_volume_sub_index(struct volume_sub_index *sub_index, struct buffered_reader **readers, unsigned int reader_count) argument
982 uds_load_volume_index(struct volume_index *volume_index, struct buffered_reader **readers, unsigned int reader_count) argument
H A Dvolume-index.h183 struct buffered_reader **readers,
H A Dindex-layout.c914 struct buffered_reader *readers[MAX_ZONES]; local
924 result = open_region_reader(layout, &isl->open_chapter, &readers[0]);
928 result = uds_load_open_chapter(index, readers[0]);
929 uds_free_buffered_reader(readers[0]);
935 &readers[zone]);
938 uds_free_buffered_reader(readers[zone - 1]);
944 result = uds_load_volume_index(index->volume_index, readers, isl->zone_count);
946 uds_free_buffered_reader(readers[zone]);
950 result = open_region_reader(layout, &isl->index_page_map, &readers[0]);
954 result = uds_read_index_page_map(index->volume->index_page_map, readers[
[all...]
/linux-master/kernel/locking/
H A Drwsem.c38 * - Bit 0: RWSEM_READER_OWNED - rwsem may be owned by readers (just a hint)
55 * is involved. Ideally we would like to track all the readers that own
109 * 1) rwsem_mark_wake() for readers -- set, clear
296 * The lock is owned by readers when
301 * Having some reader bits set is not enough to guarantee a readers owned
302 * lock as the readers may be in the process of backing out from the count
350 RWSEM_WAKE_READERS, /* Wake readers only */
362 * Magic number to batch-wakeup waiting readers, even when writers are
409 * Implies rwsem_del_waiter() for all woken readers.
451 * We prefer to do the first reader grant before counting readers
809 int readers = count >> RWSEM_READER_SHIFT; local
[all...]
/linux-master/fs/
H A Dpipe.c72 * FIFOs and Pipes now generate SIGIO for both readers and writers.
424 !READ_ONCE(pipe->readers);
457 if (!pipe->readers) {
498 if (!pipe->readers) {
571 * space. We wake up any readers if necessary, and then
700 if (!pipe->readers)
729 pipe->readers--;
734 if (!pipe->readers != !pipe->writers) {
896 pipe->readers = pipe->writers = 1;
1053 * but that requires that we wake up any other readers/writer
[all...]
H A Dcoredump.c472 pipe->readers++;
482 wait_event_interruptible(pipe->rd_wait, pipe->readers == 1);
485 pipe->readers--;
/linux-master/tools/testing/radix-tree/
H A Dmaple.c34724 pthread_t readers[RCU_RANGE_COUNT / 5]; local
34944 pthread_t readers[20]; local
34993 pthread_t readers[20]; local
35041 pthread_t readers[20]; local
[all...]

Completed in 289 milliseconds

12