Lines Matching refs:buf
617 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_sendctxs_destroy() local
620 if (!buf->rb_sc_ctxs) in rpcrdma_sendctxs_destroy()
622 for (i = 0; i <= buf->rb_sc_last; i++) in rpcrdma_sendctxs_destroy()
623 kfree(buf->rb_sc_ctxs[i]); in rpcrdma_sendctxs_destroy()
624 kfree(buf->rb_sc_ctxs); in rpcrdma_sendctxs_destroy()
625 buf->rb_sc_ctxs = NULL; in rpcrdma_sendctxs_destroy()
646 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_sendctxs_create() local
656 buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL); in rpcrdma_sendctxs_create()
657 if (!buf->rb_sc_ctxs) in rpcrdma_sendctxs_create()
660 buf->rb_sc_last = i - 1; in rpcrdma_sendctxs_create()
661 for (i = 0; i <= buf->rb_sc_last; i++) { in rpcrdma_sendctxs_create()
666 buf->rb_sc_ctxs[i] = sc; in rpcrdma_sendctxs_create()
669 buf->rb_sc_head = 0; in rpcrdma_sendctxs_create()
670 buf->rb_sc_tail = 0; in rpcrdma_sendctxs_create()
678 static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf, in rpcrdma_sendctx_next() argument
681 return likely(item < buf->rb_sc_last) ? item + 1 : 0; in rpcrdma_sendctx_next()
699 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_sendctx_get_locked() local
703 next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head); in rpcrdma_sendctx_get_locked()
705 if (next_head == READ_ONCE(buf->rb_sc_tail)) in rpcrdma_sendctx_get_locked()
709 sc = buf->rb_sc_ctxs[next_head]; in rpcrdma_sendctx_get_locked()
714 buf->rb_sc_head = next_head; in rpcrdma_sendctx_get_locked()
741 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_sendctx_put_locked() local
747 next_tail = buf->rb_sc_tail; in rpcrdma_sendctx_put_locked()
749 next_tail = rpcrdma_sendctx_next(buf, next_tail); in rpcrdma_sendctx_put_locked()
752 rpcrdma_sendctx_unmap(buf->rb_sc_ctxs[next_tail]); in rpcrdma_sendctx_put_locked()
754 } while (buf->rb_sc_ctxs[next_tail] != sc); in rpcrdma_sendctx_put_locked()
757 smp_store_release(&buf->rb_sc_tail, next_tail); in rpcrdma_sendctx_put_locked()
765 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_mrs_create() local
783 spin_lock(&buf->rb_lock); in rpcrdma_mrs_create()
784 rpcrdma_mr_push(mr, &buf->rb_mrs); in rpcrdma_mrs_create()
785 list_add(&mr->mr_all, &buf->rb_all_mrs); in rpcrdma_mrs_create()
786 spin_unlock(&buf->rb_lock); in rpcrdma_mrs_create()
796 struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer, in rpcrdma_mr_refresh_worker() local
798 struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, in rpcrdma_mr_refresh_worker()
812 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_mrs_refresh() local
823 queue_work(xprtiod_workqueue, &buf->rb_refresh_worker); in rpcrdma_mrs_refresh()
909 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_reqs_setup() local
913 list_for_each_entry(req, &buf->rb_allreqs, rl_all) { in rpcrdma_reqs_setup()
942 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_reqs_reset() local
945 list_for_each_entry(req, &buf->rb_allreqs, rl_all) in rpcrdma_reqs_reset()
953 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_rep_create() local
978 spin_lock(&buf->rb_lock); in rpcrdma_rep_create()
979 list_add(&rep->rr_all, &buf->rb_all_reps); in rpcrdma_rep_create()
980 spin_unlock(&buf->rb_lock); in rpcrdma_rep_create()
997 struct rpcrdma_buffer *buf = &rep->rr_rxprt->rx_buf; in rpcrdma_rep_destroy() local
999 spin_lock(&buf->rb_lock); in rpcrdma_rep_destroy()
1001 spin_unlock(&buf->rb_lock); in rpcrdma_rep_destroy()
1006 static struct rpcrdma_rep *rpcrdma_rep_get_locked(struct rpcrdma_buffer *buf) in rpcrdma_rep_get_locked() argument
1011 node = llist_del_first(&buf->rb_free_reps); in rpcrdma_rep_get_locked()
1023 void rpcrdma_rep_put(struct rpcrdma_buffer *buf, struct rpcrdma_rep *rep) in rpcrdma_rep_put() argument
1025 llist_add(&rep->rr_node, &buf->rb_free_reps); in rpcrdma_rep_put()
1034 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_reps_unmap() local
1037 list_for_each_entry(rep, &buf->rb_all_reps, rr_all) { in rpcrdma_reps_unmap()
1043 static void rpcrdma_reps_destroy(struct rpcrdma_buffer *buf) in rpcrdma_reps_destroy() argument
1047 spin_lock(&buf->rb_lock); in rpcrdma_reps_destroy()
1048 while ((rep = list_first_entry_or_null(&buf->rb_all_reps, in rpcrdma_reps_destroy()
1052 spin_unlock(&buf->rb_lock); in rpcrdma_reps_destroy()
1056 spin_lock(&buf->rb_lock); in rpcrdma_reps_destroy()
1058 spin_unlock(&buf->rb_lock); in rpcrdma_reps_destroy()
1069 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_buffer_create() local
1072 buf->rb_bc_srv_max_requests = 0; in rpcrdma_buffer_create()
1073 spin_lock_init(&buf->rb_lock); in rpcrdma_buffer_create()
1074 INIT_LIST_HEAD(&buf->rb_mrs); in rpcrdma_buffer_create()
1075 INIT_LIST_HEAD(&buf->rb_all_mrs); in rpcrdma_buffer_create()
1076 INIT_WORK(&buf->rb_refresh_worker, rpcrdma_mr_refresh_worker); in rpcrdma_buffer_create()
1078 INIT_LIST_HEAD(&buf->rb_send_bufs); in rpcrdma_buffer_create()
1079 INIT_LIST_HEAD(&buf->rb_allreqs); in rpcrdma_buffer_create()
1080 INIT_LIST_HEAD(&buf->rb_all_reps); in rpcrdma_buffer_create()
1090 list_add(&req->rl_list, &buf->rb_send_bufs); in rpcrdma_buffer_create()
1093 init_llist_head(&buf->rb_free_reps); in rpcrdma_buffer_create()
1097 rpcrdma_buffer_destroy(buf); in rpcrdma_buffer_create()
1115 struct rpcrdma_buffer *buf = &mr->mr_xprt->rx_buf; in rpcrdma_req_destroy() local
1117 spin_lock(&buf->rb_lock); in rpcrdma_req_destroy()
1119 spin_unlock(&buf->rb_lock); in rpcrdma_req_destroy()
1139 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_mrs_destroy() local
1142 cancel_work_sync(&buf->rb_refresh_worker); in rpcrdma_mrs_destroy()
1144 spin_lock(&buf->rb_lock); in rpcrdma_mrs_destroy()
1145 while ((mr = list_first_entry_or_null(&buf->rb_all_mrs, in rpcrdma_mrs_destroy()
1150 spin_unlock(&buf->rb_lock); in rpcrdma_mrs_destroy()
1154 spin_lock(&buf->rb_lock); in rpcrdma_mrs_destroy()
1156 spin_unlock(&buf->rb_lock); in rpcrdma_mrs_destroy()
1168 rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) in rpcrdma_buffer_destroy() argument
1170 rpcrdma_reps_destroy(buf); in rpcrdma_buffer_destroy()
1172 while (!list_empty(&buf->rb_send_bufs)) { in rpcrdma_buffer_destroy()
1175 req = list_first_entry(&buf->rb_send_bufs, in rpcrdma_buffer_destroy()
1192 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_mr_get() local
1195 spin_lock(&buf->rb_lock); in rpcrdma_mr_get()
1196 mr = rpcrdma_mr_pop(&buf->rb_mrs); in rpcrdma_mr_get()
1197 spin_unlock(&buf->rb_lock); in rpcrdma_mr_get()
1288 void *buf; in rpcrdma_regbuf_realloc() local
1290 buf = kmalloc(size, flags); in rpcrdma_regbuf_realloc()
1291 if (!buf) in rpcrdma_regbuf_realloc()
1297 rb->rg_data = buf; in rpcrdma_regbuf_realloc()
1359 struct rpcrdma_buffer *buf = &r_xprt->rx_buf; in rpcrdma_post_recvs() local
1380 rep = rpcrdma_rep_get_locked(buf); in rpcrdma_post_recvs()
1390 rpcrdma_rep_put(buf, rep); in rpcrdma_post_recvs()
1413 rpcrdma_rep_put(buf, rep); in rpcrdma_post_recvs()