• Home
  • Raw
  • Download

Lines Matching full:bl

35 						   struct io_buffer_list *bl,  in __io_buffer_get_list()  argument
38 if (bl && bgid < BGID_ARRAY) in __io_buffer_get_list()
39 return &bl[bgid]; in __io_buffer_get_list()
60 struct io_buffer_list *bl, unsigned int bgid) in io_buffer_add_list() argument
67 bl->bgid = bgid; in io_buffer_add_list()
68 smp_store_release(&bl->is_ready, 1); in io_buffer_add_list()
73 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL)); in io_buffer_add_list()
79 struct io_buffer_list *bl; in io_kbuf_recycle_legacy() local
94 bl = io_buffer_get_list(ctx, buf->bgid); in io_kbuf_recycle_legacy()
95 list_add(&buf->list, &bl->buf_list); in io_kbuf_recycle_legacy()
137 struct io_buffer_list *bl) in io_provided_buffer_select() argument
139 if (!list_empty(&bl->buf_list)) { in io_provided_buffer_select()
142 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list); in io_provided_buffer_select()
155 struct io_buffer_list *bl, in io_ring_buffer_select() argument
158 struct io_uring_buf_ring *br = bl->buf_ring; in io_ring_buffer_select()
160 __u16 head = bl->head; in io_ring_buffer_select()
165 head &= bl->mask; in io_ring_buffer_select()
167 if (bl->is_mmap || head < IO_BUFFER_LIST_BUF_PER_PAGE) { in io_ring_buffer_select()
172 buf = page_address(bl->buf_pages[index]); in io_ring_buffer_select()
178 req->buf_list = bl; in io_ring_buffer_select()
193 bl->head++; in io_ring_buffer_select()
202 struct io_buffer_list *bl; in io_buffer_select() local
207 bl = io_buffer_get_list(ctx, req->buf_index); in io_buffer_select()
208 if (likely(bl)) { in io_buffer_select()
209 if (bl->is_mapped) in io_buffer_select()
210 ret = io_ring_buffer_select(req, len, bl, issue_flags); in io_buffer_select()
212 ret = io_provided_buffer_select(req, len, bl); in io_buffer_select()
220 struct io_buffer_list *bl; in io_init_bl_list() local
223 bl = kcalloc(BGID_ARRAY, sizeof(struct io_buffer_list), GFP_KERNEL); in io_init_bl_list()
224 if (!bl) in io_init_bl_list()
228 INIT_LIST_HEAD(&bl[i].buf_list); in io_init_bl_list()
229 bl[i].bgid = i; in io_init_bl_list()
232 smp_store_release(&ctx->io_bl, bl); in io_init_bl_list()
239 static void io_kbuf_mark_free(struct io_ring_ctx *ctx, struct io_buffer_list *bl) in io_kbuf_mark_free() argument
244 if (bl->buf_ring == ibf->mem) { in io_kbuf_mark_free()
255 struct io_buffer_list *bl, unsigned nbufs) in __io_remove_buffers() argument
263 if (bl->is_mapped) { in __io_remove_buffers()
264 i = bl->buf_ring->tail - bl->head; in __io_remove_buffers()
265 if (bl->is_mmap) { in __io_remove_buffers()
270 io_kbuf_mark_free(ctx, bl); in __io_remove_buffers()
271 bl->buf_ring = NULL; in __io_remove_buffers()
272 bl->is_mmap = 0; in __io_remove_buffers()
273 } else if (bl->buf_nr_pages) { in __io_remove_buffers()
276 for (j = 0; j < bl->buf_nr_pages; j++) in __io_remove_buffers()
277 unpin_user_page(bl->buf_pages[j]); in __io_remove_buffers()
278 kvfree(bl->buf_pages); in __io_remove_buffers()
279 bl->buf_pages = NULL; in __io_remove_buffers()
280 bl->buf_nr_pages = 0; in __io_remove_buffers()
283 INIT_LIST_HEAD(&bl->buf_list); in __io_remove_buffers()
284 bl->is_mapped = 0; in __io_remove_buffers()
291 while (!list_empty(&bl->buf_list)) { in __io_remove_buffers()
294 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list); in __io_remove_buffers()
306 struct io_buffer_list *bl; in io_destroy_buffers() local
316 xa_for_each(&ctx->io_bl_xa, index, bl) { in io_destroy_buffers()
317 xa_erase(&ctx->io_bl_xa, bl->bgid); in io_destroy_buffers()
318 __io_remove_buffers(ctx, bl, -1U); in io_destroy_buffers()
319 kfree_rcu(bl, rcu); in io_destroy_buffers()
354 struct io_buffer_list *bl; in io_remove_buffers() local
360 bl = io_buffer_get_list(ctx, p->bgid); in io_remove_buffers()
361 if (bl) { in io_remove_buffers()
364 if (!bl->is_mapped) in io_remove_buffers()
365 ret = __io_remove_buffers(ctx, bl, p->nbufs); in io_remove_buffers()
454 struct io_buffer_list *bl) in io_add_buffers() argument
466 list_move_tail(&buf->list, &bl->buf_list); in io_add_buffers()
483 struct io_buffer_list *bl; in io_provide_buffers() local
494 bl = io_buffer_get_list(ctx, p->bgid); in io_provide_buffers()
495 if (unlikely(!bl)) { in io_provide_buffers()
496 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT); in io_provide_buffers()
497 if (!bl) { in io_provide_buffers()
501 INIT_LIST_HEAD(&bl->buf_list); in io_provide_buffers()
502 ret = io_buffer_add_list(ctx, bl, p->bgid); in io_provide_buffers()
511 kfree_rcu(bl, rcu); in io_provide_buffers()
518 if (bl->is_mapped) { in io_provide_buffers()
523 ret = io_add_buffers(ctx, p, bl); in io_provide_buffers()
534 struct io_buffer_list *bl) in io_pin_pbuf_ring() argument
571 bl->buf_pages = pages; in io_pin_pbuf_ring()
572 bl->buf_nr_pages = nr_pages; in io_pin_pbuf_ring()
573 bl->buf_ring = br; in io_pin_pbuf_ring()
574 bl->is_mapped = 1; in io_pin_pbuf_ring()
575 bl->is_mmap = 0; in io_pin_pbuf_ring()
614 struct io_buffer_list *bl) in io_alloc_pbuf_ring() argument
640 bl->buf_ring = ibf->mem; in io_alloc_pbuf_ring()
641 bl->is_mapped = 1; in io_alloc_pbuf_ring()
642 bl->is_mmap = 1; in io_alloc_pbuf_ring()
649 struct io_buffer_list *bl, *free_bl = NULL; in io_register_pbuf_ring() local
684 bl = io_buffer_get_list(ctx, reg.bgid); in io_register_pbuf_ring()
685 if (bl) { in io_register_pbuf_ring()
687 if (bl->is_mapped || !list_empty(&bl->buf_list)) in io_register_pbuf_ring()
690 free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL); in io_register_pbuf_ring()
691 if (!bl) in io_register_pbuf_ring()
696 ret = io_pin_pbuf_ring(&reg, bl); in io_register_pbuf_ring()
698 ret = io_alloc_pbuf_ring(ctx, &reg, bl); in io_register_pbuf_ring()
701 bl->nr_entries = reg.ring_entries; in io_register_pbuf_ring()
702 bl->mask = reg.ring_entries - 1; in io_register_pbuf_ring()
704 io_buffer_add_list(ctx, bl, reg.bgid); in io_register_pbuf_ring()
715 struct io_buffer_list *bl; in io_unregister_pbuf_ring() local
726 bl = io_buffer_get_list(ctx, reg.bgid); in io_unregister_pbuf_ring()
727 if (!bl) in io_unregister_pbuf_ring()
729 if (!bl->is_mapped) in io_unregister_pbuf_ring()
732 __io_remove_buffers(ctx, bl, -1U); in io_unregister_pbuf_ring()
733 if (bl->bgid >= BGID_ARRAY) { in io_unregister_pbuf_ring()
734 xa_erase(&ctx->io_bl_xa, bl->bgid); in io_unregister_pbuf_ring()
735 kfree_rcu(bl, rcu); in io_unregister_pbuf_ring()
742 struct io_buffer_list *bl; in io_pbuf_get_address() local
744 bl = __io_buffer_get_list(ctx, smp_load_acquire(&ctx->io_bl), bgid); in io_pbuf_get_address()
746 if (!bl || !bl->is_mmap) in io_pbuf_get_address()
753 if (!smp_load_acquire(&bl->is_ready)) in io_pbuf_get_address()
756 return bl->buf_ring; in io_pbuf_get_address()