• Home
  • Raw
  • Download

Lines Matching full:send

151 						struct rds_ib_send_work *send,  in rds_ib_send_unmap_op()  argument
157 switch (send->s_wr.opcode) { in rds_ib_send_unmap_op()
159 if (send->s_op) { in rds_ib_send_unmap_op()
160 rm = container_of(send->s_op, struct rds_message, data); in rds_ib_send_unmap_op()
161 rds_ib_send_unmap_data(ic, send->s_op, wc_status); in rds_ib_send_unmap_op()
166 if (send->s_op) { in rds_ib_send_unmap_op()
167 rm = container_of(send->s_op, struct rds_message, rdma); in rds_ib_send_unmap_op()
168 rds_ib_send_unmap_rdma(ic, send->s_op, wc_status); in rds_ib_send_unmap_op()
173 if (send->s_op) { in rds_ib_send_unmap_op()
174 rm = container_of(send->s_op, struct rds_message, atomic); in rds_ib_send_unmap_op()
175 rds_ib_send_unmap_atomic(ic, send->s_op, wc_status); in rds_ib_send_unmap_op()
181 __func__, send->s_wr.opcode); in rds_ib_send_unmap_op()
185 send->s_wr.opcode = 0xdead; in rds_ib_send_unmap_op()
192 struct rds_ib_send_work *send; in rds_ib_send_init_ring() local
195 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { in rds_ib_send_init_ring()
198 send->s_op = NULL; in rds_ib_send_init_ring()
200 send->s_wr.wr_id = i; in rds_ib_send_init_ring()
201 send->s_wr.sg_list = send->s_sge; in rds_ib_send_init_ring()
202 send->s_wr.ex.imm_data = 0; in rds_ib_send_init_ring()
204 sge = &send->s_sge[0]; in rds_ib_send_init_ring()
210 send->s_sge[1].lkey = ic->i_pd->local_dma_lkey; in rds_ib_send_init_ring()
216 struct rds_ib_send_work *send; in rds_ib_send_clear_ring() local
219 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { in rds_ib_send_clear_ring()
220 if (send->s_op && send->s_wr.opcode != 0xdead) in rds_ib_send_clear_ring()
221 rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR); in rds_ib_send_clear_ring()
239 * operations performed in the send path. As the sender allocs and potentially
247 struct rds_ib_send_work *send; in rds_ib_send_cqe_handler() local
272 send = &ic->i_sends[oldest]; in rds_ib_send_cqe_handler()
273 if (send->s_wr.send_flags & IB_SEND_SIGNALED) in rds_ib_send_cqe_handler()
276 rm = rds_ib_send_unmap_op(ic, send, wc->status); in rds_ib_send_cqe_handler()
278 if (time_after(jiffies, send->s_queued + HZ / 2)) in rds_ib_send_cqe_handler()
281 if (send->s_op) { in rds_ib_send_cqe_handler()
282 if (send->s_op == rm->m_final_op) { in rds_ib_send_cqe_handler()
289 send->s_op = NULL; in rds_ib_send_cqe_handler()
305 …rds_ib_conn_error(conn, "send completion on <%pI6c,%pI6c,%d> had status %u (%s), vendor err 0x%x, … in rds_ib_send_cqe_handler()
317 * - send credits: this tells us how many WRs we're allowed
319 * each SEND WR we post, we decrement this by one.
328 * exhausted their send credits, and are unable to send new credits
329 * to the peer. We achieve this by requiring that we send at least
335 * The RDS send code is essentially single-threaded; rds_send_xmit
336 * sets RDS_IN_XMIT to ensure exclusive access to the send ring.
340 * In the send path, we need to update the counters for send credits
346 * Spinlocks shared between the send and the receive path are bad,
375 /* The last credit must be used to send a credit update. */ in rds_ib_send_grab_credits()
393 * the posted regardless of whether any send credits are in rds_ib_send_grab_credits()
439 /* Decide whether to send an update to the peer now. in rds_ib_advertise_credits()
440 * If we would send a credit update for every single buffer we in rds_ib_advertise_credits()
442 * consumes buffer, we refill the ring, send ACK to remote in rds_ib_advertise_credits()
445 * Performance pretty much depends on how often we send in rds_ib_advertise_credits()
456 struct rds_ib_send_work *send, in rds_ib_set_wr_signal_state() argument
466 send->s_wr.send_flags |= IB_SEND_SIGNALED; in rds_ib_set_wr_signal_state()
479 * once we send the final fragment.
490 struct rds_ib_send_work *send = NULL; in rds_ib_xmit() local
510 /* Do not send cong updates to IB loopback */ in rds_ib_xmit()
595 * sticking the header into the send ring. Which is why we in rds_ib_xmit()
611 * READ and the following SEND. in rds_ib_xmit()
620 send = &ic->i_sends[pos]; in rds_ib_xmit()
621 first = send; in rds_ib_xmit()
629 send->s_wr.send_flags = send_flags; in rds_ib_xmit()
630 send->s_wr.opcode = IB_WR_SEND; in rds_ib_xmit()
631 send->s_wr.num_sge = 1; in rds_ib_xmit()
632 send->s_wr.next = NULL; in rds_ib_xmit()
633 send->s_queued = jiffies; in rds_ib_xmit()
634 send->s_op = NULL; in rds_ib_xmit()
636 send->s_sge[0].addr = ic->i_send_hdrs_dma[pos]; in rds_ib_xmit()
638 send->s_sge[0].length = sizeof(struct rds_header); in rds_ib_xmit()
639 send->s_sge[0].lkey = ic->i_pd->local_dma_lkey; in rds_ib_xmit()
654 send->s_wr.num_sge = 2; in rds_ib_xmit()
656 send->s_sge[1].addr = sg_dma_address(scat); in rds_ib_xmit()
657 send->s_sge[1].addr += rm->data.op_dmaoff; in rds_ib_xmit()
658 send->s_sge[1].length = len; in rds_ib_xmit()
659 send->s_sge[1].lkey = ic->i_pd->local_dma_lkey; in rds_ib_xmit()
670 rds_ib_set_wr_signal_state(ic, send, false); in rds_ib_xmit()
676 rds_ib_set_wr_signal_state(ic, send, true); in rds_ib_xmit()
677 send->s_wr.send_flags |= IB_SEND_SOLICITED; in rds_ib_xmit()
680 if (send->s_wr.send_flags & IB_SEND_SIGNALED) in rds_ib_xmit()
683 rdsdebug("send %p wr %p num_sge %u next %p\n", send, in rds_ib_xmit()
684 &send->s_wr, send->s_wr.num_sge, send->s_wr.next); in rds_ib_xmit()
701 prev->s_wr.next = &send->s_wr; in rds_ib_xmit()
702 prev = send; in rds_ib_xmit()
705 send = &ic->i_sends[pos]; in rds_ib_xmit()
716 /* if we finished the message then send completion owns it */ in rds_ib_xmit()
770 struct rds_ib_send_work *send = NULL; in rds_ib_xmit_atomic() local
784 /* address of send request in ring */ in rds_ib_xmit_atomic()
785 send = &ic->i_sends[pos]; in rds_ib_xmit_atomic()
786 send->s_queued = jiffies; in rds_ib_xmit_atomic()
789 send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP; in rds_ib_xmit_atomic()
790 send->s_atomic_wr.compare_add = op->op_m_cswp.compare; in rds_ib_xmit_atomic()
791 send->s_atomic_wr.swap = op->op_m_cswp.swap; in rds_ib_xmit_atomic()
792 send->s_atomic_wr.compare_add_mask = op->op_m_cswp.compare_mask; in rds_ib_xmit_atomic()
793 send->s_atomic_wr.swap_mask = op->op_m_cswp.swap_mask; in rds_ib_xmit_atomic()
795 send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD; in rds_ib_xmit_atomic()
796 send->s_atomic_wr.compare_add = op->op_m_fadd.add; in rds_ib_xmit_atomic()
797 send->s_atomic_wr.swap = 0; in rds_ib_xmit_atomic()
798 send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask; in rds_ib_xmit_atomic()
799 send->s_atomic_wr.swap_mask = 0; in rds_ib_xmit_atomic()
801 send->s_wr.send_flags = 0; in rds_ib_xmit_atomic()
802 nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify); in rds_ib_xmit_atomic()
803 send->s_atomic_wr.wr.num_sge = 1; in rds_ib_xmit_atomic()
804 send->s_atomic_wr.wr.next = NULL; in rds_ib_xmit_atomic()
805 send->s_atomic_wr.remote_addr = op->op_remote_addr; in rds_ib_xmit_atomic()
806 send->s_atomic_wr.rkey = op->op_rkey; in rds_ib_xmit_atomic()
807 send->s_op = op; in rds_ib_xmit_atomic()
808 rds_message_addref(container_of(send->s_op, struct rds_message, atomic)); in rds_ib_xmit_atomic()
821 send->s_sge[0].addr = sg_dma_address(op->op_sg); in rds_ib_xmit_atomic()
822 send->s_sge[0].length = sg_dma_len(op->op_sg); in rds_ib_xmit_atomic()
823 send->s_sge[0].lkey = ic->i_pd->local_dma_lkey; in rds_ib_xmit_atomic()
826 send->s_sge[0].addr, send->s_sge[0].length); in rds_ib_xmit_atomic()
831 failed_wr = &send->s_atomic_wr.wr; in rds_ib_xmit_atomic()
832 ret = ib_post_send(ic->i_cm_id->qp, &send->s_atomic_wr.wr, &failed_wr); in rds_ib_xmit_atomic()
833 rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic, in rds_ib_xmit_atomic()
834 send, &send->s_atomic_wr, ret, failed_wr); in rds_ib_xmit_atomic()
835 BUG_ON(failed_wr != &send->s_atomic_wr.wr); in rds_ib_xmit_atomic()
844 if (unlikely(failed_wr != &send->s_atomic_wr.wr)) { in rds_ib_xmit_atomic()
846 BUG_ON(failed_wr != &send->s_atomic_wr.wr); in rds_ib_xmit_atomic()
856 struct rds_ib_send_work *send = NULL; in rds_ib_xmit_rdma() local
899 * be enough work requests to send the entire message. in rds_ib_xmit_rdma()
911 send = &ic->i_sends[pos]; in rds_ib_xmit_rdma()
912 first = send; in rds_ib_xmit_rdma()
919 send->s_wr.send_flags = 0; in rds_ib_xmit_rdma()
920 send->s_queued = jiffies; in rds_ib_xmit_rdma()
921 send->s_op = NULL; in rds_ib_xmit_rdma()
924 nr_sig += rds_ib_set_wr_signal_state(ic, send, in rds_ib_xmit_rdma()
927 send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ; in rds_ib_xmit_rdma()
928 send->s_rdma_wr.remote_addr = remote_addr; in rds_ib_xmit_rdma()
929 send->s_rdma_wr.rkey = op->op_rkey; in rds_ib_xmit_rdma()
932 send->s_rdma_wr.wr.num_sge = max_sge; in rds_ib_xmit_rdma()
935 send->s_rdma_wr.wr.num_sge = num_sge; in rds_ib_xmit_rdma()
938 send->s_rdma_wr.wr.next = NULL; in rds_ib_xmit_rdma()
941 prev->s_rdma_wr.wr.next = &send->s_rdma_wr.wr; in rds_ib_xmit_rdma()
943 for (j = 0; j < send->s_rdma_wr.wr.num_sge && in rds_ib_xmit_rdma()
947 send->s_sge[j].addr = sg_dma_address(scat); in rds_ib_xmit_rdma()
948 send->s_sge[j].lkey = ic->i_pd->local_dma_lkey; in rds_ib_xmit_rdma()
950 send->s_sge[j].addr = odp_addr; in rds_ib_xmit_rdma()
951 send->s_sge[j].lkey = odp_lkey; in rds_ib_xmit_rdma()
953 send->s_sge[j].length = len; in rds_ib_xmit_rdma()
963 rdsdebug("send %p wr %p num_sge %u next %p\n", send, in rds_ib_xmit_rdma()
964 &send->s_rdma_wr.wr, in rds_ib_xmit_rdma()
965 send->s_rdma_wr.wr.num_sge, in rds_ib_xmit_rdma()
966 send->s_rdma_wr.wr.next); in rds_ib_xmit_rdma()
968 prev = send; in rds_ib_xmit_rdma()
969 if (++send == &ic->i_sends[ic->i_send_ring.w_nr]) in rds_ib_xmit_rdma()
970 send = ic->i_sends; in rds_ib_xmit_rdma()
1016 * to send previously (due to flow control). Try again. */ in rds_ib_xmit_path_complete()