Lines Matching defs:br

67 buf_ring_enqueue(struct buf_ring *br, void *buf)
72 for (i = br->br_cons_head; i != br->br_prod_head;
73 i = ((i + 1) & br->br_cons_mask))
74 if(br->br_ring[i] == buf)
76 buf, i, br->br_prod_tail, br->br_cons_tail);
80 prod_head = br->br_prod_head;
81 prod_next = (prod_head + 1) & br->br_prod_mask;
82 cons_tail = br->br_cons_tail;
86 if (prod_head == br->br_prod_head &&
87 cons_tail == br->br_cons_tail) {
88 br->br_drops++;
94 } while (!atomic_cmpset_acq_int(&br->br_prod_head, prod_head, prod_next));
96 if (br->br_ring[prod_head] != NULL)
99 br->br_ring[prod_head] = buf;
106 while (br->br_prod_tail != prod_head)
108 atomic_store_rel_int(&br->br_prod_tail, prod_next);
118 buf_ring_dequeue_mc(struct buf_ring *br)
125 cons_head = br->br_cons_head;
126 cons_next = (cons_head + 1) & br->br_cons_mask;
128 if (cons_head == br->br_prod_tail) {
132 } while (!atomic_cmpset_acq_int(&br->br_cons_head, cons_head, cons_next));
134 buf = br->br_ring[cons_head];
136 br->br_ring[cons_head] = NULL;
143 while (br->br_cons_tail != cons_head)
146 atomic_store_rel_int(&br->br_cons_tail, cons_next);
158 buf_ring_dequeue_sc(struct buf_ring *br)
180 * cons_head = br->br_cons_head;
181 * atomic_cmpset_acq_32(&br->br_prod_head, ...));
182 * buf = br->br_ring[cons_head]; <see <1>>
183 * br->br_ring[prod_head] = buf;
184 * atomic_store_rel_32(&br->br_prod_tail, ...);
185 * prod_tail = br->br_prod_tail;
190 * <1> Load (on core 1) from br->br_ring[cons_head] can be reordered (speculative readed) by CPU.
193 cons_head = atomic_load_acq_32(&br->br_cons_head);
195 cons_head = br->br_cons_head;
197 prod_tail = atomic_load_acq_32(&br->br_prod_tail);
199 cons_next = (cons_head + 1) & br->br_cons_mask;
201 cons_next_next = (cons_head + 2) & br->br_cons_mask;
209 prefetch(br->br_ring[cons_next]);
211 prefetch(br->br_ring[cons_next_next]);
214 br->br_cons_head = cons_next;
215 buf = br->br_ring[cons_head];
218 br->br_ring[cons_head] = NULL;
219 if (!mtx_owned(br->br_lock))
221 if (br->br_cons_tail != cons_head)
223 br->br_cons_tail, cons_head);
225 br->br_cons_tail = cons_next;
235 buf_ring_advance_sc(struct buf_ring *br)
240 cons_head = br->br_cons_head;
241 prod_tail = br->br_prod_tail;
243 cons_next = (cons_head + 1) & br->br_cons_mask;
246 br->br_cons_head = cons_next;
248 br->br_ring[cons_head] = NULL;
250 br->br_cons_tail = cons_next;
270 buf_ring_putback_sc(struct buf_ring *br, void *_new)
272 KASSERT_FREEBSD(br->br_cons_head != br->br_prod_tail,
274 br->br_ring[br->br_cons_head] = _new;
283 buf_ring_peek(struct buf_ring *br)
287 if ((br->br_lock != NULL) && !mtx_owned(br->br_lock))
296 if (br->br_cons_head == br->br_prod_tail)
299 return (br->br_ring[br->br_cons_head]);
303 buf_ring_peek_clear_sc(struct buf_ring *br)
308 if (!mtx_owned(br->br_lock))
317 if (br->br_cons_head == br->br_prod_tail)
325 ret = br->br_ring[br->br_cons_head];
326 br->br_ring[br->br_cons_head] = NULL;
329 return (br->br_ring[br->br_cons_head]);
334 buf_ring_full(struct buf_ring *br)
337 return (((br->br_prod_head + 1) & br->br_prod_mask) == br->br_cons_tail);
341 buf_ring_empty(struct buf_ring *br)
344 return (br->br_cons_head == br->br_prod_tail);
348 buf_ring_count(struct buf_ring *br)
351 return ((br->br_prod_size + br->br_prod_tail - br->br_cons_tail)
352 & br->br_prod_mask);
357 void buf_ring_free(struct buf_ring *br, struct malloc_type *type);