Lines Matching refs:bl

56 			      struct io_buffer_list *bl, unsigned int bgid)
63 bl->bgid = bgid;
64 atomic_set(&bl->refs, 1);
65 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL));
71 struct io_buffer_list *bl;
77 bl = io_buffer_get_list(ctx, buf->bgid);
78 list_add(&buf->list, &bl->buf_list);
114 struct io_buffer_list *bl)
116 if (!list_empty(&bl->buf_list)) {
119 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list);
123 if (list_empty(&bl->buf_list))
134 struct io_buffer_list *bl,
137 struct io_uring_buf_ring *br = bl->buf_ring;
138 __u16 tail, head = bl->head;
148 head &= bl->mask;
150 if (bl->is_mmap || head < IO_BUFFER_LIST_BUF_PER_PAGE) {
155 buf = page_address(bl->buf_pages[index]);
161 req->buf_list = bl;
176 bl->head++;
185 struct io_buffer_list *bl;
190 bl = io_buffer_get_list(ctx, req->buf_index);
191 if (likely(bl)) {
192 if (bl->is_buf_ring)
193 ret = io_ring_buffer_select(req, len, bl, issue_flags);
195 ret = io_provided_buffer_select(req, len, bl);
204 static void io_kbuf_mark_free(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
209 if (bl->buf_ring == ibf->mem) {
220 struct io_buffer_list *bl, unsigned nbufs)
228 if (bl->is_buf_ring) {
229 i = bl->buf_ring->tail - bl->head;
230 if (bl->is_mmap) {
235 io_kbuf_mark_free(ctx, bl);
236 bl->buf_ring = NULL;
237 bl->is_mmap = 0;
238 } else if (bl->buf_nr_pages) {
241 for (j = 0; j < bl->buf_nr_pages; j++)
242 unpin_user_page(bl->buf_pages[j]);
243 kvfree(bl->buf_pages);
244 bl->buf_pages = NULL;
245 bl->buf_nr_pages = 0;
248 INIT_LIST_HEAD(&bl->buf_list);
249 bl->is_buf_ring = 0;
256 while (!list_empty(&bl->buf_list)) {
259 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list);
269 void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl)
271 if (atomic_dec_and_test(&bl->refs)) {
272 __io_remove_buffers(ctx, bl, -1U);
273 kfree_rcu(bl, rcu);
279 struct io_buffer_list *bl;
284 xa_for_each(&ctx->io_bl_xa, index, bl) {
285 xa_erase(&ctx->io_bl_xa, bl->bgid);
286 io_put_bl(ctx, bl);
326 struct io_buffer_list *bl;
332 bl = io_buffer_get_list(ctx, p->bgid);
333 if (bl) {
336 if (!bl->is_buf_ring)
337 ret = __io_remove_buffers(ctx, bl, p->nbufs);
430 struct io_buffer_list *bl)
442 list_move_tail(&buf->list, &bl->buf_list);
459 struct io_buffer_list *bl;
464 bl = io_buffer_get_list(ctx, p->bgid);
465 if (unlikely(!bl)) {
466 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT);
467 if (!bl) {
471 INIT_LIST_HEAD(&bl->buf_list);
472 ret = io_buffer_add_list(ctx, bl, p->bgid);
478 kfree_rcu(bl, rcu);
483 if (bl->is_buf_ring) {
488 ret = io_add_buffers(ctx, p, bl);
499 struct io_buffer_list *bl)
536 bl->buf_pages = pages;
537 bl->buf_nr_pages = nr_pages;
538 bl->buf_ring = br;
539 bl->is_buf_ring = 1;
540 bl->is_mmap = 0;
579 struct io_buffer_list *bl)
605 bl->buf_ring = ibf->mem;
606 bl->is_buf_ring = 1;
607 bl->is_mmap = 1;
614 struct io_buffer_list *bl, *free_bl = NULL;
643 bl = io_buffer_get_list(ctx, reg.bgid);
644 if (bl) {
646 if (bl->is_buf_ring || !list_empty(&bl->buf_list))
649 free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL);
650 if (!bl)
655 ret = io_pin_pbuf_ring(&reg, bl);
657 ret = io_alloc_pbuf_ring(ctx, &reg, bl);
660 bl->nr_entries = reg.ring_entries;
661 bl->mask = reg.ring_entries - 1;
663 io_buffer_add_list(ctx, bl, reg.bgid);
674 struct io_buffer_list *bl;
685 bl = io_buffer_get_list(ctx, reg.bgid);
686 if (!bl)
688 if (!bl->is_buf_ring)
691 xa_erase(&ctx->io_bl_xa, bl->bgid);
692 io_put_bl(ctx, bl);
699 struct io_buffer_list *bl;
709 bl = io_buffer_get_list(ctx, buf_status.buf_group);
710 if (!bl)
712 if (!bl->is_buf_ring)
715 buf_status.head = bl->head;
725 struct io_buffer_list *bl;
740 bl = xa_load(&ctx->io_bl_xa, bgid);
743 if (bl && bl->is_mmap)
744 ret = atomic_inc_not_zero(&bl->refs);
748 return bl;