Lines Matching refs:reader

31  * Big reader spinlocks provide cache-local contention-free read
38 * of larger reader objects due to necessary linkage overhead. In
88 /* The reader list is protected under the writer br. */
142 ck_brlock_read_register(struct ck_brlock *br, struct ck_brlock_reader *reader)
145 reader->n_readers = 0;
146 reader->previous = NULL;
151 reader->next = ck_pr_load_ptr(&br->readers);
152 if (reader->next != NULL)
153 reader->next->previous = reader;
154 ck_pr_store_ptr(&br->readers, reader);
161 ck_brlock_read_unregister(struct ck_brlock *br, struct ck_brlock_reader *reader)
166 if (reader->next != NULL)
167 reader->next->previous = reader->previous;
169 if (reader->previous != NULL)
170 reader->previous->next = reader->next;
172 br->readers = reader->next;
179 ck_brlock_read_lock(struct ck_brlock *br, struct ck_brlock_reader *reader)
182 if (reader->n_readers >= 1) {
183 ck_pr_store_uint(&reader->n_readers, reader->n_readers + 1);
192 ck_pr_fas_uint(&reader->n_readers, 1);
195 * Serialize reader counter update with respect to load of
200 ck_pr_store_uint(&reader->n_readers, 1);
203 * Serialize reader counter update with respect to load of
212 ck_pr_store_uint(&reader->n_readers, 0);
221 struct ck_brlock_reader *reader,
226 if (reader->n_readers >= 1) {
227 ck_pr_store_uint(&reader->n_readers, reader->n_readers + 1);
240 ck_pr_fas_uint(&reader->n_readers, 1);
243 * Serialize reader counter update with respect to load of
248 ck_pr_store_uint(&reader->n_readers, 1);
251 * Serialize reader counter update with respect to load of
260 ck_pr_store_uint(&reader->n_readers, 0);
271 ck_brlock_read_unlock(struct ck_brlock_reader *reader)
275 ck_pr_store_uint(&reader->n_readers, reader->n_readers - 1);