• Home
  • Raw
  • Download

Lines Matching +full:recv +full:- +full:not +full:- +full:empty

14  *      - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
36 #include <linux/dma-mapping.h>
49 struct rds_ib_recv_work *recv; in rds_ib_recv_init_ring() local
52 for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) { in rds_ib_recv_init_ring()
55 recv->r_ibinc = NULL; in rds_ib_recv_init_ring()
56 recv->r_frag = NULL; in rds_ib_recv_init_ring()
58 recv->r_wr.next = NULL; in rds_ib_recv_init_ring()
59 recv->r_wr.wr_id = i; in rds_ib_recv_init_ring()
60 recv->r_wr.sg_list = recv->r_sge; in rds_ib_recv_init_ring()
61 recv->r_wr.num_sge = RDS_IB_RECV_SGE; in rds_ib_recv_init_ring()
63 sge = &recv->r_sge[0]; in rds_ib_recv_init_ring()
64 sge->addr = ic->i_recv_hdrs_dma[i]; in rds_ib_recv_init_ring()
65 sge->length = sizeof(struct rds_header); in rds_ib_recv_init_ring()
66 sge->lkey = ic->i_pd->local_dma_lkey; in rds_ib_recv_init_ring()
68 sge = &recv->r_sge[1]; in rds_ib_recv_init_ring()
69 sge->addr = 0; in rds_ib_recv_init_ring()
70 sge->length = RDS_FRAG_SIZE; in rds_ib_recv_init_ring()
71 sge->lkey = ic->i_pd->local_dma_lkey; in rds_ib_recv_init_ring()
82 struct list_head *from_last = from->prev; in list_splice_entire_tail()
92 tmp = xchg(&cache->xfer, NULL); in rds_ib_cache_xfer_to_ready()
94 if (cache->ready) in rds_ib_cache_xfer_to_ready()
95 list_splice_entire_tail(tmp, cache->ready); in rds_ib_cache_xfer_to_ready()
97 cache->ready = tmp; in rds_ib_cache_xfer_to_ready()
106 cache->percpu = alloc_percpu_gfp(struct rds_ib_cache_head, gfp); in rds_ib_recv_alloc_cache()
107 if (!cache->percpu) in rds_ib_recv_alloc_cache()
108 return -ENOMEM; in rds_ib_recv_alloc_cache()
111 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_recv_alloc_cache()
112 head->first = NULL; in rds_ib_recv_alloc_cache()
113 head->count = 0; in rds_ib_recv_alloc_cache()
115 cache->xfer = NULL; in rds_ib_recv_alloc_cache()
116 cache->ready = NULL; in rds_ib_recv_alloc_cache()
125 ret = rds_ib_recv_alloc_cache(&ic->i_cache_incs, gfp); in rds_ib_recv_alloc_caches()
127 ret = rds_ib_recv_alloc_cache(&ic->i_cache_frags, gfp); in rds_ib_recv_alloc_caches()
129 free_percpu(ic->i_cache_incs.percpu); in rds_ib_recv_alloc_caches()
142 head = per_cpu_ptr(cache->percpu, cpu); in rds_ib_cache_splice_all_lists()
143 if (head->first) { in rds_ib_cache_splice_all_lists()
144 list_splice_entire_tail(head->first, caller_list); in rds_ib_cache_splice_all_lists()
145 head->first = NULL; in rds_ib_cache_splice_all_lists()
149 if (cache->ready) { in rds_ib_cache_splice_all_lists()
150 list_splice_entire_tail(cache->ready, caller_list); in rds_ib_cache_splice_all_lists()
151 cache->ready = NULL; in rds_ib_cache_splice_all_lists()
163 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs); in rds_ib_recv_free_caches()
164 rds_ib_cache_splice_all_lists(&ic->i_cache_incs, &list); in rds_ib_recv_free_caches()
165 free_percpu(ic->i_cache_incs.percpu); in rds_ib_recv_free_caches()
168 list_del(&inc->ii_cache_entry); in rds_ib_recv_free_caches()
169 WARN_ON(!list_empty(&inc->ii_frags)); in rds_ib_recv_free_caches()
174 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags); in rds_ib_recv_free_caches()
175 rds_ib_cache_splice_all_lists(&ic->i_cache_frags, &list); in rds_ib_recv_free_caches()
176 free_percpu(ic->i_cache_frags.percpu); in rds_ib_recv_free_caches()
179 list_del(&frag->f_cache_entry); in rds_ib_recv_free_caches()
180 WARN_ON(!list_empty(&frag->f_item)); in rds_ib_recv_free_caches()
191 /* Recycle frag and attached recv buffer f_sg */
195 rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg)); in rds_ib_frag_free()
197 rds_ib_recv_cache_put(&frag->f_cache_entry, &ic->i_cache_frags); in rds_ib_frag_free()
198 atomic_add(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs); in rds_ib_frag_free()
208 struct rds_ib_connection *ic = inc->i_conn->c_transport_data; in rds_ib_inc_free()
213 list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) { in rds_ib_inc_free()
214 list_del_init(&frag->f_item); in rds_ib_inc_free()
217 BUG_ON(!list_empty(&ibinc->ii_frags)); in rds_ib_inc_free()
220 rds_ib_recv_cache_put(&ibinc->ii_cache_entry, &ic->i_cache_incs); in rds_ib_inc_free()
224 struct rds_ib_recv_work *recv) in rds_ib_recv_clear_one() argument
226 if (recv->r_ibinc) { in rds_ib_recv_clear_one()
227 rds_inc_put(&recv->r_ibinc->ii_inc); in rds_ib_recv_clear_one()
228 recv->r_ibinc = NULL; in rds_ib_recv_clear_one()
230 if (recv->r_frag) { in rds_ib_recv_clear_one()
231 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE); in rds_ib_recv_clear_one()
232 rds_ib_frag_free(ic, recv->r_frag); in rds_ib_recv_clear_one()
233 recv->r_frag = NULL; in rds_ib_recv_clear_one()
241 for (i = 0; i < ic->i_recv_ring.w_nr; i++) in rds_ib_recv_clear_ring()
242 rds_ib_recv_clear_one(ic, &ic->i_recvs[i]); in rds_ib_recv_clear_ring()
252 cache_item = rds_ib_recv_cache_get(&ic->i_cache_incs); in rds_ib_refill_one_inc()
269 INIT_LIST_HEAD(&ibinc->ii_frags); in rds_ib_refill_one_inc()
270 rds_inc_init(&ibinc->ii_inc, ic->conn, &ic->conn->c_faddr); in rds_ib_refill_one_inc()
282 cache_item = rds_ib_recv_cache_get(&ic->i_cache_frags); in rds_ib_refill_one_frag()
285 atomic_sub(RDS_FRAG_SIZE / SZ_1K, &ic->i_cache_allocs); in rds_ib_refill_one_frag()
292 sg_init_table(&frag->f_sg, 1); in rds_ib_refill_one_frag()
293 ret = rds_page_remainder_alloc(&frag->f_sg, in rds_ib_refill_one_frag()
302 INIT_LIST_HEAD(&frag->f_item); in rds_ib_refill_one_frag()
308 struct rds_ib_recv_work *recv, gfp_t gfp) in rds_ib_recv_refill_one() argument
310 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_recv_refill_one()
312 int ret = -ENOMEM; in rds_ib_recv_refill_one()
321 if (!ic->i_cache_incs.ready) in rds_ib_recv_refill_one()
322 rds_ib_cache_xfer_to_ready(&ic->i_cache_incs); in rds_ib_recv_refill_one()
323 if (!ic->i_cache_frags.ready) in rds_ib_recv_refill_one()
324 rds_ib_cache_xfer_to_ready(&ic->i_cache_frags); in rds_ib_recv_refill_one()
327 * ibinc was taken from recv if recv contained the start of a message. in rds_ib_recv_refill_one()
330 if (!recv->r_ibinc) { in rds_ib_recv_refill_one()
331 recv->r_ibinc = rds_ib_refill_one_inc(ic, slab_mask); in rds_ib_recv_refill_one()
332 if (!recv->r_ibinc) in rds_ib_recv_refill_one()
336 WARN_ON(recv->r_frag); /* leak! */ in rds_ib_recv_refill_one()
337 recv->r_frag = rds_ib_refill_one_frag(ic, slab_mask, page_mask); in rds_ib_recv_refill_one()
338 if (!recv->r_frag) in rds_ib_recv_refill_one()
341 ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, in rds_ib_recv_refill_one()
345 sge = &recv->r_sge[0]; in rds_ib_recv_refill_one()
346 sge->addr = ic->i_recv_hdrs_dma[recv - ic->i_recvs]; in rds_ib_recv_refill_one()
347 sge->length = sizeof(struct rds_header); in rds_ib_recv_refill_one()
349 sge = &recv->r_sge[1]; in rds_ib_recv_refill_one()
350 sge->addr = sg_dma_address(&recv->r_frag->f_sg); in rds_ib_recv_refill_one()
351 sge->length = sg_dma_len(&recv->r_frag->f_sg); in rds_ib_recv_refill_one()
360 return test_and_set_bit(RDS_RECV_REFILL, &conn->c_flags) == 0; in acquire_refill()
365 clear_bit(RDS_RECV_REFILL, &conn->c_flags); in release_refill()
370 * the system-wide hashed waitqueue buckets in the fast path only to in release_refill()
373 if (waitqueue_active(&conn->c_waitq)) in release_refill()
374 wake_up_all(&conn->c_waitq); in release_refill()
384 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_recv_refill()
385 struct rds_ib_recv_work *recv; in rds_ib_recv_refill() local
400 rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) { in rds_ib_recv_refill()
401 if (pos >= ic->i_recv_ring.w_nr) { in rds_ib_recv_refill()
402 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n", in rds_ib_recv_refill()
407 recv = &ic->i_recvs[pos]; in rds_ib_recv_refill()
408 ret = rds_ib_recv_refill_one(conn, recv, gfp); in rds_ib_recv_refill()
414 rdsdebug("recv %p ibinc %p page %p addr %lu\n", recv, in rds_ib_recv_refill()
415 recv->r_ibinc, sg_page(&recv->r_frag->f_sg), in rds_ib_recv_refill()
416 (long)sg_dma_address(&recv->r_frag->f_sg)); in rds_ib_recv_refill()
419 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, NULL); in rds_ib_recv_refill()
421 rds_ib_conn_error(conn, "recv post on " in rds_ib_recv_refill()
423 "reconnecting\n", &conn->c_faddr, in rds_ib_recv_refill()
436 /* We're doing flow control - update the window. */ in rds_ib_recv_refill()
437 if (ic->i_flowctl && posted) in rds_ib_recv_refill()
441 rds_ib_ring_unalloc(&ic->i_recv_ring, 1); in rds_ib_recv_refill()
448 * ring is completely empty. in rds_ib_recv_refill()
457 (can_wait && rds_ib_ring_low(&ic->i_recv_ring)) || in rds_ib_recv_refill()
458 rds_ib_ring_empty(&ic->i_recv_ring))) { in rds_ib_recv_refill()
459 queue_delayed_work(rds_wq, &conn->c_recv_w, 1); in rds_ib_recv_refill()
466 * We want to recycle several types of recv allocations, like incs and frags.
471 * We move it to an intermediate non-percpu list in a lockless manner, with some
475 * be NULL and xchg'd. The list is actually empty when the pointer is NULL, and
486 chpfirst = __this_cpu_read(cache->percpu->first); in rds_ib_recv_cache_put()
492 __this_cpu_write(cache->percpu->first, new_item); in rds_ib_recv_cache_put()
493 __this_cpu_inc(cache->percpu->count); in rds_ib_recv_cache_put()
495 if (__this_cpu_read(cache->percpu->count) < RDS_IB_RECYCLE_BATCH_COUNT) in rds_ib_recv_cache_put()
499 * Return our per-cpu first list to the cache's xfer by atomically in rds_ib_recv_cache_put()
500 * grabbing the current xfer list, appending it to our per-cpu list, in rds_ib_recv_cache_put()
502 * cache's xfer list as long as it's still empty. in rds_ib_recv_cache_put()
505 old = xchg(&cache->xfer, NULL); in rds_ib_recv_cache_put()
508 old = cmpxchg(&cache->xfer, NULL, chpfirst); in rds_ib_recv_cache_put()
512 __this_cpu_write(cache->percpu->first, NULL); in rds_ib_recv_cache_put()
513 __this_cpu_write(cache->percpu->count, 0); in rds_ib_recv_cache_put()
520 struct list_head *head = cache->ready; in rds_ib_recv_cache_get()
524 cache->ready = head->next; in rds_ib_recv_cache_get()
527 cache->ready = NULL; in rds_ib_recv_cache_get()
544 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item); in rds_ib_inc_copy_to_user()
545 len = be32_to_cpu(inc->i_hdr.h_len); in rds_ib_inc_copy_to_user()
549 frag = list_entry(frag->f_item.next, in rds_ib_inc_copy_to_user()
554 RDS_FRAG_SIZE - frag_off); in rds_ib_inc_copy_to_user()
555 to_copy = min_t(unsigned long, to_copy, len - copied); in rds_ib_inc_copy_to_user()
559 ret = copy_page_to_iter(sg_page(&frag->f_sg), in rds_ib_inc_copy_to_user()
560 frag->f_sg.offset + frag_off, in rds_ib_inc_copy_to_user()
564 return -EFAULT; in rds_ib_inc_copy_to_user()
576 struct ib_send_wr *wr = &ic->i_ack_wr; in rds_ib_recv_init_ack()
577 struct ib_sge *sge = &ic->i_ack_sge; in rds_ib_recv_init_ack()
579 sge->addr = ic->i_ack_dma; in rds_ib_recv_init_ack()
580 sge->length = sizeof(struct rds_header); in rds_ib_recv_init_ack()
581 sge->lkey = ic->i_pd->local_dma_lkey; in rds_ib_recv_init_ack()
583 wr->sg_list = sge; in rds_ib_recv_init_ack()
584 wr->num_sge = 1; in rds_ib_recv_init_ack()
585 wr->opcode = IB_WR_SEND; in rds_ib_recv_init_ack()
586 wr->wr_id = RDS_IB_ACK_WR_ID; in rds_ib_recv_init_ack()
587 wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED; in rds_ib_recv_init_ack()
596 * potential issue if another HCA is available for fail-over.
604 * a single ack frame being in flight. This might not be good enough.
606 * This is implemented by have a long-lived send_wr and sge which point to a
607 * statically allocated ack frame. This ack wr does not fall under the ring
617 spin_lock_irqsave(&ic->i_ack_lock, flags); in rds_ib_set_ack()
618 ic->i_ack_next = seq; in rds_ib_set_ack()
620 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); in rds_ib_set_ack()
621 spin_unlock_irqrestore(&ic->i_ack_lock, flags); in rds_ib_set_ack()
629 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); in rds_ib_get_ack()
631 spin_lock_irqsave(&ic->i_ack_lock, flags); in rds_ib_get_ack()
632 seq = ic->i_ack_next; in rds_ib_get_ack()
633 spin_unlock_irqrestore(&ic->i_ack_lock, flags); in rds_ib_get_ack()
640 atomic64_set(&ic->i_ack_next, seq); in rds_ib_set_ack()
643 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); in rds_ib_set_ack()
649 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); in rds_ib_get_ack()
652 return atomic64_read(&ic->i_ack_next); in rds_ib_get_ack()
659 struct rds_header *hdr = ic->i_ack; in rds_ib_send_ack()
667 ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev, ic->i_ack_dma, in rds_ib_send_ack()
670 hdr->h_ack = cpu_to_be64(seq); in rds_ib_send_ack()
671 hdr->h_credit = adv_credits; in rds_ib_send_ack()
673 ib_dma_sync_single_for_device(ic->rds_ibdev->dev, ic->i_ack_dma, in rds_ib_send_ack()
676 ic->i_ack_queued = jiffies; in rds_ib_send_ack()
678 ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, NULL); in rds_ib_send_ack()
683 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); in rds_ib_send_ack()
684 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); in rds_ib_send_ack()
688 rds_ib_conn_error(ic->conn, "sending ack failed\n"); in rds_ib_send_ack()
695 * 1. We call rds_ib_attempt_ack from the recv completion handler
696 * to send an ACK-only frame.
708 * - i_ack_flags, which keeps track of whether the ACK WR
709 * is currently in the send queue or not (IB_ACK_IN_FLIGHT)
710 * - i_ack_next, which is the last sequence number we received
713 * It would be nice to not have to use a spinlock to synchronize things,
715 * not atomic on all platforms. Things would be a lot simpler if
728 * When we get here, we're called from the recv queue handler.
735 if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags)) in rds_ib_attempt_ack()
738 if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) { in rds_ib_attempt_ack()
746 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); in rds_ib_attempt_ack()
750 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); in rds_ib_attempt_ack()
760 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags); in rds_ib_ack_send_complete()
770 if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags)) in rds_ib_piggyb_ack()
777 * long-lived bitmaps. We could have posted the bitmaps and rdma written into
797 if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES) in rds_ib_cong_recv()
800 map = conn->c_fcong; in rds_ib_cong_recv()
804 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item); in rds_ib_cong_recv()
813 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off); in rds_ib_cong_recv()
816 addr = kmap_atomic(sg_page(&frag->f_sg)); in rds_ib_cong_recv()
818 src = addr + frag->f_sg.offset + frag_off; in rds_ib_cong_recv()
819 dst = (void *)map->m_page_addrs[map_page] + map_off; in rds_ib_cong_recv()
838 frag = list_entry(frag->f_item.next, in rds_ib_cong_recv()
849 struct rds_ib_recv_work *recv, u32 data_len, in rds_ib_process_recv() argument
852 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_process_recv()
853 struct rds_ib_incoming *ibinc = ic->i_ibinc; in rds_ib_process_recv()
855 dma_addr_t dma_addr = ic->i_recv_hdrs_dma[recv - ic->i_recvs]; in rds_ib_process_recv()
859 rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv, in rds_ib_process_recv()
867 &conn->c_faddr); in rds_ib_process_recv()
870 data_len -= sizeof(struct rds_header); in rds_ib_process_recv()
872 ihdr = ic->i_recv_hdrs[recv - ic->i_recvs]; in rds_ib_process_recv()
874 ib_dma_sync_single_for_cpu(ic->rds_ibdev->dev, dma_addr, in rds_ib_process_recv()
879 "from %pI6c has corrupted header - " in rds_ib_process_recv()
881 &conn->c_faddr); in rds_ib_process_recv()
887 state->ack_recv = be64_to_cpu(ihdr->h_ack); in rds_ib_process_recv()
888 state->ack_recv_valid = 1; in rds_ib_process_recv()
891 if (ihdr->h_credit) in rds_ib_process_recv()
892 rds_ib_send_add_credits(conn, ihdr->h_credit); in rds_ib_process_recv()
894 if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) { in rds_ib_process_recv()
895 /* This is an ACK-only packet. The fact that it gets in rds_ib_process_recv()
904 * page ref ourselves. We can't just leave the page on the recv in rds_ib_process_recv()
905 * because that confuses the dma mapping of pages and each recv's use in rds_ib_process_recv()
910 rds_ib_frag_free(ic, recv->r_frag); in rds_ib_process_recv()
911 recv->r_frag = NULL; in rds_ib_process_recv()
922 ibinc = recv->r_ibinc; in rds_ib_process_recv()
923 recv->r_ibinc = NULL; in rds_ib_process_recv()
924 ic->i_ibinc = ibinc; in rds_ib_process_recv()
926 hdr = &ibinc->ii_inc.i_hdr; in rds_ib_process_recv()
927 ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_HDR] = in rds_ib_process_recv()
930 ic->i_recv_data_rem = be32_to_cpu(hdr->h_len); in rds_ib_process_recv()
931 ibinc->ii_inc.i_rx_lat_trace[RDS_MSG_RX_START] = in rds_ib_process_recv()
935 ic->i_recv_data_rem, hdr->h_flags); in rds_ib_process_recv()
937 hdr = &ibinc->ii_inc.i_hdr; in rds_ib_process_recv()
940 if (hdr->h_sequence != ihdr->h_sequence || in rds_ib_process_recv()
941 hdr->h_len != ihdr->h_len || in rds_ib_process_recv()
942 hdr->h_sport != ihdr->h_sport || in rds_ib_process_recv()
943 hdr->h_dport != ihdr->h_dport) { in rds_ib_process_recv()
950 list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags); in rds_ib_process_recv()
951 recv->r_frag = NULL; in rds_ib_process_recv()
953 if (ic->i_recv_data_rem > RDS_FRAG_SIZE) in rds_ib_process_recv()
954 ic->i_recv_data_rem -= RDS_FRAG_SIZE; in rds_ib_process_recv()
956 ic->i_recv_data_rem = 0; in rds_ib_process_recv()
957 ic->i_ibinc = NULL; in rds_ib_process_recv()
959 if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP) { in rds_ib_process_recv()
962 rds_recv_incoming(conn, &conn->c_faddr, &conn->c_laddr, in rds_ib_process_recv()
963 &ibinc->ii_inc, GFP_ATOMIC); in rds_ib_process_recv()
964 state->ack_next = be64_to_cpu(hdr->h_sequence); in rds_ib_process_recv()
965 state->ack_next_valid = 1; in rds_ib_process_recv()
971 if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) { in rds_ib_process_recv()
973 state->ack_required = 1; in rds_ib_process_recv()
976 rds_inc_put(&ibinc->ii_inc); in rds_ib_process_recv()
979 ib_dma_sync_single_for_device(ic->rds_ibdev->dev, dma_addr, in rds_ib_process_recv()
987 struct rds_connection *conn = ic->conn; in rds_ib_recv_cqe_handler()
988 struct rds_ib_recv_work *recv; in rds_ib_recv_cqe_handler() local
991 (unsigned long long)wc->wr_id, wc->status, in rds_ib_recv_cqe_handler()
992 ib_wc_status_msg(wc->status), wc->byte_len, in rds_ib_recv_cqe_handler()
993 be32_to_cpu(wc->ex.imm_data)); in rds_ib_recv_cqe_handler()
996 recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)]; in rds_ib_recv_cqe_handler()
997 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, in rds_ib_recv_cqe_handler()
1001 * to get a recv completion _before_ the rdmacm ESTABLISHED in rds_ib_recv_cqe_handler()
1004 if (wc->status == IB_WC_SUCCESS) { in rds_ib_recv_cqe_handler()
1005 rds_ib_process_recv(conn, recv, wc->byte_len, state); in rds_ib_recv_cqe_handler()
1009 …rds_ib_conn_error(conn, "recv completion on <%pI6c,%pI6c, %d> had status %u (%s), vendor err 0x%x,… in rds_ib_recv_cqe_handler()
1010 &conn->c_laddr, &conn->c_faddr, in rds_ib_recv_cqe_handler()
1011 conn->c_tos, wc->status, in rds_ib_recv_cqe_handler()
1012 ib_wc_status_msg(wc->status), in rds_ib_recv_cqe_handler()
1013 wc->vendor_err); in rds_ib_recv_cqe_handler()
1017 * we might not have called it at all if the wc didn't indicate in rds_ib_recv_cqe_handler()
1020 * that it will not find an allocated frag here. Make sure we in rds_ib_recv_cqe_handler()
1023 if (recv->r_frag) { in rds_ib_recv_cqe_handler()
1024 rds_ib_frag_free(ic, recv->r_frag); in rds_ib_recv_cqe_handler()
1025 recv->r_frag = NULL; in rds_ib_recv_cqe_handler()
1027 rds_ib_ring_free(&ic->i_recv_ring, 1); in rds_ib_recv_cqe_handler()
1029 /* If we ever end up with a really empty receive ring, we're in rds_ib_recv_cqe_handler()
1032 if (rds_ib_ring_empty(&ic->i_recv_ring)) in rds_ib_recv_cqe_handler()
1035 if (rds_ib_ring_low(&ic->i_recv_ring)) { in rds_ib_recv_cqe_handler()
1043 struct rds_connection *conn = cp->cp_conn; in rds_ib_recv_path()
1044 struct rds_ib_connection *ic = conn->c_transport_data; in rds_ib_recv_path()
1059 int ret = -ENOMEM; in rds_ib_recv_init()
1061 /* Default to 30% of all available RAM for recv memory */ in rds_ib_recv_init()