• Home
  • Raw
  • Download

Lines Matching full:bl

42 			      struct io_buffer_list *bl, unsigned int bgid)  in io_buffer_add_list()  argument
49 bl->bgid = bgid; in io_buffer_add_list()
50 atomic_set(&bl->refs, 1); in io_buffer_add_list()
51 return xa_err(xa_store(&ctx->io_bl_xa, bgid, bl, GFP_KERNEL)); in io_buffer_add_list()
57 struct io_buffer_list *bl; in io_kbuf_recycle_legacy() local
72 bl = io_buffer_get_list(ctx, buf->bgid); in io_kbuf_recycle_legacy()
73 list_add(&buf->list, &bl->buf_list); in io_kbuf_recycle_legacy()
115 struct io_buffer_list *bl) in io_provided_buffer_select() argument
117 if (!list_empty(&bl->buf_list)) { in io_provided_buffer_select()
120 kbuf = list_first_entry(&bl->buf_list, struct io_buffer, list); in io_provided_buffer_select()
133 struct io_buffer_list *bl, in io_ring_buffer_select() argument
136 struct io_uring_buf_ring *br = bl->buf_ring; in io_ring_buffer_select()
138 __u16 head = bl->head; in io_ring_buffer_select()
143 head &= bl->mask; in io_ring_buffer_select()
148 req->buf_list = bl; in io_ring_buffer_select()
163 bl->head++; in io_ring_buffer_select()
172 struct io_buffer_list *bl; in io_buffer_select() local
177 bl = io_buffer_get_list(ctx, req->buf_index); in io_buffer_select()
178 if (likely(bl)) { in io_buffer_select()
179 if (bl->is_mapped) in io_buffer_select()
180 ret = io_ring_buffer_select(req, len, bl, issue_flags); in io_buffer_select()
182 ret = io_provided_buffer_select(req, len, bl); in io_buffer_select()
189 struct io_buffer_list *bl, unsigned nbufs) in __io_remove_buffers() argument
197 if (bl->is_mapped) { in __io_remove_buffers()
198 i = bl->buf_ring->tail - bl->head; in __io_remove_buffers()
199 if (bl->buf_nr_pages) { in __io_remove_buffers()
202 if (!bl->is_mmap) { in __io_remove_buffers()
203 for (j = 0; j < bl->buf_nr_pages; j++) in __io_remove_buffers()
204 unpin_user_page(bl->buf_pages[j]); in __io_remove_buffers()
206 io_pages_unmap(bl->buf_ring, &bl->buf_pages, in __io_remove_buffers()
207 &bl->buf_nr_pages, bl->is_mmap); in __io_remove_buffers()
208 bl->is_mmap = 0; in __io_remove_buffers()
211 INIT_LIST_HEAD(&bl->buf_list); in __io_remove_buffers()
212 bl->is_mapped = 0; in __io_remove_buffers()
219 while (!list_empty(&bl->buf_list)) { in __io_remove_buffers()
222 nxt = list_first_entry(&bl->buf_list, struct io_buffer, list); in __io_remove_buffers()
232 void io_put_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl) in io_put_bl() argument
234 if (atomic_dec_and_test(&bl->refs)) { in io_put_bl()
235 __io_remove_buffers(ctx, bl, -1U); in io_put_bl()
236 kfree_rcu(bl, rcu); in io_put_bl()
242 struct io_buffer_list *bl; in io_destroy_buffers() local
245 xa_for_each(&ctx->io_bl_xa, index, bl) { in io_destroy_buffers()
246 xa_erase(&ctx->io_bl_xa, bl->bgid); in io_destroy_buffers()
247 io_put_bl(ctx, bl); in io_destroy_buffers()
259 static void io_destroy_bl(struct io_ring_ctx *ctx, struct io_buffer_list *bl) in io_destroy_bl() argument
261 xa_erase(&ctx->io_bl_xa, bl->bgid); in io_destroy_bl()
262 io_put_bl(ctx, bl); in io_destroy_bl()
288 struct io_buffer_list *bl; in io_remove_buffers() local
294 bl = io_buffer_get_list(ctx, p->bgid); in io_remove_buffers()
295 if (bl) { in io_remove_buffers()
298 if (!bl->is_mapped) in io_remove_buffers()
299 ret = __io_remove_buffers(ctx, bl, p->nbufs); in io_remove_buffers()
390 struct io_buffer_list *bl) in io_add_buffers() argument
402 list_move_tail(&buf->list, &bl->buf_list); in io_add_buffers()
419 struct io_buffer_list *bl; in io_provide_buffers() local
424 bl = io_buffer_get_list(ctx, p->bgid); in io_provide_buffers()
425 if (unlikely(!bl)) { in io_provide_buffers()
426 bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT); in io_provide_buffers()
427 if (!bl) { in io_provide_buffers()
431 INIT_LIST_HEAD(&bl->buf_list); in io_provide_buffers()
432 ret = io_buffer_add_list(ctx, bl, p->bgid); in io_provide_buffers()
438 kfree_rcu(bl, rcu); in io_provide_buffers()
443 if (bl->is_mapped) { in io_provide_buffers()
448 ret = io_add_buffers(ctx, p, bl); in io_provide_buffers()
459 struct io_buffer_list *bl) in io_pin_pbuf_ring() argument
492 bl->buf_pages = pages; in io_pin_pbuf_ring()
493 bl->buf_nr_pages = nr_pages; in io_pin_pbuf_ring()
494 bl->buf_ring = br; in io_pin_pbuf_ring()
495 bl->is_mapped = 1; in io_pin_pbuf_ring()
496 bl->is_mmap = 0; in io_pin_pbuf_ring()
507 struct io_buffer_list *bl) in io_alloc_pbuf_ring() argument
513 bl->buf_ring = io_pages_map(&bl->buf_pages, &bl->buf_nr_pages, ring_size); in io_alloc_pbuf_ring()
514 if (IS_ERR(bl->buf_ring)) { in io_alloc_pbuf_ring()
515 bl->buf_ring = NULL; in io_alloc_pbuf_ring()
518 bl->is_mapped = 1; in io_alloc_pbuf_ring()
519 bl->is_mmap = 1; in io_alloc_pbuf_ring()
526 struct io_buffer_list *bl, *free_bl = NULL; in io_register_pbuf_ring() local
555 bl = io_buffer_get_list(ctx, reg.bgid); in io_register_pbuf_ring()
556 if (bl) { in io_register_pbuf_ring()
558 if (bl->is_mapped || !list_empty(&bl->buf_list)) in io_register_pbuf_ring()
560 io_destroy_bl(ctx, bl); in io_register_pbuf_ring()
563 free_bl = bl = kzalloc(sizeof(*bl), GFP_KERNEL_ACCOUNT); in io_register_pbuf_ring()
564 if (!bl) in io_register_pbuf_ring()
568 ret = io_pin_pbuf_ring(&reg, bl); in io_register_pbuf_ring()
570 ret = io_alloc_pbuf_ring(ctx, &reg, bl); in io_register_pbuf_ring()
573 bl->nr_entries = reg.ring_entries; in io_register_pbuf_ring()
574 bl->mask = reg.ring_entries - 1; in io_register_pbuf_ring()
576 io_buffer_add_list(ctx, bl, reg.bgid); in io_register_pbuf_ring()
587 struct io_buffer_list *bl; in io_unregister_pbuf_ring() local
598 bl = io_buffer_get_list(ctx, reg.bgid); in io_unregister_pbuf_ring()
599 if (!bl) in io_unregister_pbuf_ring()
601 if (!bl->is_mapped) in io_unregister_pbuf_ring()
604 xa_erase(&ctx->io_bl_xa, bl->bgid); in io_unregister_pbuf_ring()
605 io_put_bl(ctx, bl); in io_unregister_pbuf_ring()
612 struct io_buffer_list *bl; in io_pbuf_get_bl() local
627 bl = xa_load(&ctx->io_bl_xa, bgid); in io_pbuf_get_bl()
630 if (bl && bl->is_mmap) in io_pbuf_get_bl()
631 ret = atomic_inc_not_zero(&bl->refs); in io_pbuf_get_bl()
635 return bl; in io_pbuf_get_bl()
644 struct io_buffer_list *bl; in io_pbuf_mmap() local
648 bl = io_pbuf_get_bl(ctx, bgid); in io_pbuf_mmap()
649 if (IS_ERR(bl)) in io_pbuf_mmap()
650 return PTR_ERR(bl); in io_pbuf_mmap()
652 ret = io_uring_mmap_pages(ctx, vma, bl->buf_pages, bl->buf_nr_pages); in io_pbuf_mmap()
653 io_put_bl(ctx, bl); in io_pbuf_mmap()