Lines Matching full:bl
42 struct io_buffer_list *bl, unsigned int bgid) in io_buffer_add_list() argument
49 bl->bgid = bgid; in io_buffer_add_list()
50 atomic_set(&bl->refs, 1); in io_buffer_add_list()
51 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL)); in io_buffer_add_list()
57 struct io_buffer_list *bl; in io_kbuf_recycle_legacy() local
72 bl = io_buffer_get_list(ctx, buf->bgid); in io_kbuf_recycle_legacy()
73 list_add(&buf->list, &bl->buf_list); in io_kbuf_recycle_legacy()
115 struct io_buffer_list *bl) in io_provided_buffer_select() argument
117 if (!list_empty(&bl->buf_list)) { in io_provided_buffer_select()
120 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list); in io_provided_buffer_select()
133 struct io_buffer_list *bl, in io_ring_buffer_select() argument
136 struct io_uring_buf_ring *br = bl->buf_ring; in io_ring_buffer_select()
138 __u16 head = bl->head; in io_ring_buffer_select()
143 head &= bl->mask; in io_ring_buffer_select()
148 req->buf_list = bl; in io_ring_buffer_select()
164 bl->head++; in io_ring_buffer_select()
173 struct io_buffer_list *bl; in io_buffer_select() local
178 bl = io_buffer_get_list(ctx, req->buf_index); in io_buffer_select()
179 if (likely(bl)) { in io_buffer_select()
180 if (bl->is_mapped) in io_buffer_select()
181 ret = io_ring_buffer_select(req, len, bl, issue_flags); in io_buffer_select()
183 ret = io_provided_buffer_select(req, len, bl); in io_buffer_select()
190 struct io_buffer_list *bl, unsigned nbufs) in __io_remove_buffers() argument
198 if (bl->is_mapped) { in __io_remove_buffers()
199 i = bl->buf_ring->tail - bl->head; in __io_remove_buffers()
200 if (bl->buf_nr_pages) { in __io_remove_buffers()
203 if (!bl->is_mmap) { in __io_remove_buffers()
204 for (j = 0; j < bl->buf_nr_pages; j++) in __io_remove_buffers()
205 unpin_user_page(bl->buf_pages[j]); in __io_remove_buffers()
207 io_pages_unmap(bl->buf_ring, &bl->buf_pages, in __io_remove_buffers()
208 &bl->buf_nr_pages, bl->is_mmap); in __io_remove_buffers()
209 bl->is_mmap = 0; in __io_remove_buffers()
212 INIT_LIST_HEAD(&bl->buf_list); in __io_remove_buffers()
213 bl->is_mapped = 0; in __io_remove_buffers()
220 while (!list_empty(&bl->buf_list)) { in __io_remove_buffers()
223 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list); in __io_remove_buffers()
233 void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl) in io_put_bl() argument
235 if (atomic_dec_and_test(&bl->refs)) { in io_put_bl()
236 __io_remove_buffers(ctx, bl, -1U); in io_put_bl()
237 kfree_rcu(bl, rcu); in io_put_bl()
243 struct io_buffer_list *bl; in io_destroy_buffers() local
246 xa_for_each(&ctx->io_bl_xa, index, bl) { in io_destroy_buffers()
247 xa_erase(&ctx->io_bl_xa, bl->bgid); in io_destroy_buffers()
248 io_put_bl(ctx, bl); in io_destroy_buffers()
260 static void io_destroy_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl) in io_destroy_bl() argument
262 xa_erase(&ctx->io_bl_xa, bl->bgid); in io_destroy_bl()
263 io_put_bl(ctx, bl); in io_destroy_bl()
289 struct io_buffer_list *bl; in io_remove_buffers() local
295 bl = io_buffer_get_list(ctx, p->bgid); in io_remove_buffers()
296 if (bl) { in io_remove_buffers()
299 if (!bl->is_mapped) in io_remove_buffers()
300 ret = __io_remove_buffers(ctx, bl, p->nbufs); in io_remove_buffers()
389 struct io_buffer_list *bl) in io_add_buffers() argument
401 list_move_tail(&buf->list, &bl->buf_list); in io_add_buffers()
418 struct io_buffer_list *bl; in io_provide_buffers() local
423 bl = io_buffer_get_list(ctx, p->bgid); in io_provide_buffers()
424 if (unlikely(!bl)) { in io_provide_buffers()
425 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT); in io_provide_buffers()
426 if (!bl) { in io_provide_buffers()
430 INIT_LIST_HEAD(&bl->buf_list); in io_provide_buffers()
431 ret = io_buffer_add_list(ctx, bl, p->bgid); in io_provide_buffers()
437 kfree_rcu(bl, rcu); in io_provide_buffers()
442 if (bl->is_mapped) { in io_provide_buffers()
447 ret = io_add_buffers(ctx, p, bl); in io_provide_buffers()
458 struct io_buffer_list *bl) in io_pin_pbuf_ring() argument
491 bl->buf_pages = pages; in io_pin_pbuf_ring()
492 bl->buf_nr_pages = nr_pages; in io_pin_pbuf_ring()
493 bl->buf_ring = br; in io_pin_pbuf_ring()
494 bl->is_mapped = 1; in io_pin_pbuf_ring()
495 bl->is_mmap = 0; in io_pin_pbuf_ring()
506 struct io_buffer_list *bl) in io_alloc_pbuf_ring() argument
512 bl->buf_ring = io_pages_map(&bl->buf_pages, &bl->buf_nr_pages, ring_size); in io_alloc_pbuf_ring()
513 if (IS_ERR(bl->buf_ring)) { in io_alloc_pbuf_ring()
514 bl->buf_ring = NULL; in io_alloc_pbuf_ring()
517 bl->is_mapped = 1; in io_alloc_pbuf_ring()
518 bl->is_mmap = 1; in io_alloc_pbuf_ring()
525 struct io_buffer_list *bl, *free_bl = NULL; in io_register_pbuf_ring() local
554 bl = io_buffer_get_list(ctx, reg.bgid); in io_register_pbuf_ring()
555 if (bl) { in io_register_pbuf_ring()
557 if (bl->is_mapped || !list_empty(&bl->buf_list)) in io_register_pbuf_ring()
559 io_destroy_bl(ctx, bl); in io_register_pbuf_ring()
562 free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL); in io_register_pbuf_ring()
563 if (!bl) in io_register_pbuf_ring()
567 ret = io_pin_pbuf_ring(®, bl); in io_register_pbuf_ring()
569 ret = io_alloc_pbuf_ring(ctx, ®, bl); in io_register_pbuf_ring()
572 bl->nr_entries = reg.ring_entries; in io_register_pbuf_ring()
573 bl->mask = reg.ring_entries - 1; in io_register_pbuf_ring()
575 io_buffer_add_list(ctx, bl, reg.bgid); in io_register_pbuf_ring()
586 struct io_buffer_list *bl; in io_unregister_pbuf_ring() local
597 bl = io_buffer_get_list(ctx, reg.bgid); in io_unregister_pbuf_ring()
598 if (!bl) in io_unregister_pbuf_ring()
600 if (!bl->is_mapped) in io_unregister_pbuf_ring()
603 xa_erase(&ctx->io_bl_xa, bl->bgid); in io_unregister_pbuf_ring()
604 io_put_bl(ctx, bl); in io_unregister_pbuf_ring()
611 struct io_buffer_list *bl; in io_pbuf_get_bl() local
626 bl = xa_load(&ctx->io_bl_xa, bgid); in io_pbuf_get_bl()
629 if (bl && bl->is_mmap) in io_pbuf_get_bl()
630 ret = atomic_inc_not_zero(&bl->refs); in io_pbuf_get_bl()
634 return bl; in io_pbuf_get_bl()
643 struct io_buffer_list *bl; in io_pbuf_mmap() local
647 bl = io_pbuf_get_bl(ctx, bgid); in io_pbuf_mmap()
648 if (IS_ERR(bl)) in io_pbuf_mmap()
649 return PTR_ERR(bl); in io_pbuf_mmap()
651 ret = io_uring_mmap_pages(ctx, vma, bl->buf_pages, bl->buf_nr_pages); in io_pbuf_mmap()
652 io_put_bl(ctx, bl); in io_pbuf_mmap()