Lines Matching +full:link +full:-
1 // SPDX-License-Identifier: GPL-2.0
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
17 * SMC uses this to exchange LLC (link layer control)
44 struct smc_link *link; member
52 /*------------------------------- completion --------------------------------*/
54 /* returns true if at least one tx work request is pending on the given link */
55 static inline bool smc_wr_is_tx_pend(struct smc_link *link) in smc_wr_is_tx_pend() argument
57 if (find_first_bit(link->wr_tx_mask, link->wr_tx_cnt) != in smc_wr_is_tx_pend()
58 link->wr_tx_cnt) { in smc_wr_is_tx_pend()
64 /* wait till all pending tx work requests on the given link are completed */
65 void smc_wr_tx_wait_no_pending_sends(struct smc_link *link) in smc_wr_tx_wait_no_pending_sends() argument
67 wait_event(link->wr_tx_wait, !smc_wr_is_tx_pend(link)); in smc_wr_tx_wait_no_pending_sends()
70 static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id) in smc_wr_tx_find_pending_index() argument
74 for (i = 0; i < link->wr_tx_cnt; i++) { in smc_wr_tx_find_pending_index()
75 if (link->wr_tx_pends[i].wr_id == wr_id) in smc_wr_tx_find_pending_index()
78 return link->wr_tx_cnt; in smc_wr_tx_find_pending_index()
84 struct smc_link *link; in smc_wr_tx_process_cqe() local
87 link = wc->qp->qp_context; in smc_wr_tx_process_cqe()
89 if (wc->opcode == IB_WC_REG_MR) { in smc_wr_tx_process_cqe()
90 if (wc->status) in smc_wr_tx_process_cqe()
91 link->wr_reg_state = FAILED; in smc_wr_tx_process_cqe()
93 link->wr_reg_state = CONFIRMED; in smc_wr_tx_process_cqe()
94 smc_wr_wakeup_reg_wait(link); in smc_wr_tx_process_cqe()
98 pnd_snd_idx = smc_wr_tx_find_pending_index(link, wc->wr_id); in smc_wr_tx_process_cqe()
99 if (pnd_snd_idx == link->wr_tx_cnt) in smc_wr_tx_process_cqe()
101 link->wr_tx_pends[pnd_snd_idx].wc_status = wc->status; in smc_wr_tx_process_cqe()
102 if (link->wr_tx_pends[pnd_snd_idx].compl_requested) in smc_wr_tx_process_cqe()
103 complete(&link->wr_tx_compl[pnd_snd_idx]); in smc_wr_tx_process_cqe()
104 memcpy(&pnd_snd, &link->wr_tx_pends[pnd_snd_idx], sizeof(pnd_snd)); in smc_wr_tx_process_cqe()
106 memset(&link->wr_tx_pends[pnd_snd_idx], 0, in smc_wr_tx_process_cqe()
107 sizeof(link->wr_tx_pends[pnd_snd_idx])); in smc_wr_tx_process_cqe()
108 memset(&link->wr_tx_bufs[pnd_snd_idx], 0, in smc_wr_tx_process_cqe()
109 sizeof(link->wr_tx_bufs[pnd_snd_idx])); in smc_wr_tx_process_cqe()
110 if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask)) in smc_wr_tx_process_cqe()
112 if (wc->status) { in smc_wr_tx_process_cqe()
113 /* terminate link */ in smc_wr_tx_process_cqe()
114 smcr_link_down_cond_sched(link); in smc_wr_tx_process_cqe()
117 pnd_snd.handler(&pnd_snd.priv, link, wc->status); in smc_wr_tx_process_cqe()
118 wake_up(&link->wr_tx_wait); in smc_wr_tx_process_cqe()
132 rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc); in smc_wr_tx_tasklet_fn()
134 ib_req_notify_cq(dev->roce_cq_send, in smc_wr_tx_tasklet_fn()
151 tasklet_schedule(&dev->send_tasklet); in smc_wr_tx_cq_handler()
154 /*---------------------------- request submission ---------------------------*/
156 static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx) in smc_wr_tx_get_free_slot_index() argument
158 *idx = link->wr_tx_cnt; in smc_wr_tx_get_free_slot_index()
159 if (!smc_link_sendable(link)) in smc_wr_tx_get_free_slot_index()
160 return -ENOLINK; in smc_wr_tx_get_free_slot_index()
161 for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) { in smc_wr_tx_get_free_slot_index()
162 if (!test_and_set_bit(*idx, link->wr_tx_mask)) in smc_wr_tx_get_free_slot_index()
165 *idx = link->wr_tx_cnt; in smc_wr_tx_get_free_slot_index()
166 return -EBUSY; in smc_wr_tx_get_free_slot_index()
170 * smc_wr_tx_get_free_slot() - returns buffer for message assembly,
172 * @link: Pointer to smc_link used to later send the message.
178 * Return: 0 on success, or -errno on error.
180 int smc_wr_tx_get_free_slot(struct smc_link *link, in smc_wr_tx_get_free_slot() argument
186 struct smc_link_group *lgr = smc_get_lgr(link); in smc_wr_tx_get_free_slot()
188 u32 idx = link->wr_tx_cnt; in smc_wr_tx_get_free_slot()
195 if (in_softirq() || lgr->terminating) { in smc_wr_tx_get_free_slot()
196 rc = smc_wr_tx_get_free_slot_index(link, &idx); in smc_wr_tx_get_free_slot()
201 link->wr_tx_wait, in smc_wr_tx_get_free_slot()
202 !smc_link_sendable(link) || in smc_wr_tx_get_free_slot()
203 lgr->terminating || in smc_wr_tx_get_free_slot()
204 (smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY), in smc_wr_tx_get_free_slot()
207 /* timeout - terminate link */ in smc_wr_tx_get_free_slot()
208 smcr_link_down_cond_sched(link); in smc_wr_tx_get_free_slot()
209 return -EPIPE; in smc_wr_tx_get_free_slot()
211 if (idx == link->wr_tx_cnt) in smc_wr_tx_get_free_slot()
212 return -EPIPE; in smc_wr_tx_get_free_slot()
214 wr_id = smc_wr_tx_get_next_wr_id(link); in smc_wr_tx_get_free_slot()
215 wr_pend = &link->wr_tx_pends[idx]; in smc_wr_tx_get_free_slot()
216 wr_pend->wr_id = wr_id; in smc_wr_tx_get_free_slot()
217 wr_pend->handler = handler; in smc_wr_tx_get_free_slot()
218 wr_pend->link = link; in smc_wr_tx_get_free_slot()
219 wr_pend->idx = idx; in smc_wr_tx_get_free_slot()
220 wr_ib = &link->wr_tx_ibs[idx]; in smc_wr_tx_get_free_slot()
221 wr_ib->wr_id = wr_id; in smc_wr_tx_get_free_slot()
222 *wr_buf = &link->wr_tx_bufs[idx]; in smc_wr_tx_get_free_slot()
224 *wr_rdma_buf = &link->wr_tx_rdmas[idx]; in smc_wr_tx_get_free_slot()
225 *wr_pend_priv = &wr_pend->priv; in smc_wr_tx_get_free_slot()
229 int smc_wr_tx_put_slot(struct smc_link *link, in smc_wr_tx_put_slot() argument
235 if (pend->idx < link->wr_tx_cnt) { in smc_wr_tx_put_slot()
236 u32 idx = pend->idx; in smc_wr_tx_put_slot()
239 memset(&link->wr_tx_pends[idx], 0, in smc_wr_tx_put_slot()
240 sizeof(link->wr_tx_pends[idx])); in smc_wr_tx_put_slot()
241 memset(&link->wr_tx_bufs[idx], 0, in smc_wr_tx_put_slot()
242 sizeof(link->wr_tx_bufs[idx])); in smc_wr_tx_put_slot()
243 test_and_clear_bit(idx, link->wr_tx_mask); in smc_wr_tx_put_slot()
244 wake_up(&link->wr_tx_wait); in smc_wr_tx_put_slot()
254 int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv) in smc_wr_tx_send() argument
259 ib_req_notify_cq(link->smcibdev->roce_cq_send, in smc_wr_tx_send()
262 rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx], NULL); in smc_wr_tx_send()
264 smc_wr_tx_put_slot(link, priv); in smc_wr_tx_send()
265 smcr_link_down_cond_sched(link); in smc_wr_tx_send()
274 int smc_wr_tx_send_wait(struct smc_link *link, struct smc_wr_tx_pend_priv *priv, in smc_wr_tx_send_wait() argument
282 pend->compl_requested = 1; in smc_wr_tx_send_wait()
283 pnd_idx = pend->idx; in smc_wr_tx_send_wait()
284 init_completion(&link->wr_tx_compl[pnd_idx]); in smc_wr_tx_send_wait()
286 rc = smc_wr_tx_send(link, priv); in smc_wr_tx_send_wait()
291 &link->wr_tx_compl[pnd_idx], timeout); in smc_wr_tx_send_wait()
293 rc = -ENODATA; in smc_wr_tx_send_wait()
300 int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr) in smc_wr_reg_send() argument
304 ib_req_notify_cq(link->smcibdev->roce_cq_send, in smc_wr_reg_send()
306 link->wr_reg_state = POSTED; in smc_wr_reg_send()
307 link->wr_reg.wr.wr_id = (u64)(uintptr_t)mr; in smc_wr_reg_send()
308 link->wr_reg.mr = mr; in smc_wr_reg_send()
309 link->wr_reg.key = mr->rkey; in smc_wr_reg_send()
310 rc = ib_post_send(link->roce_qp, &link->wr_reg.wr, NULL); in smc_wr_reg_send()
314 atomic_inc(&link->wr_reg_refcnt); in smc_wr_reg_send()
315 rc = wait_event_interruptible_timeout(link->wr_reg_wait, in smc_wr_reg_send()
316 (link->wr_reg_state != POSTED), in smc_wr_reg_send()
318 if (atomic_dec_and_test(&link->wr_reg_refcnt)) in smc_wr_reg_send()
319 wake_up_all(&link->wr_reg_wait); in smc_wr_reg_send()
321 /* timeout - terminate link */ in smc_wr_reg_send()
322 smcr_link_down_cond_sched(link); in smc_wr_reg_send()
323 return -EPIPE; in smc_wr_reg_send()
325 if (rc == -ERESTARTSYS) in smc_wr_reg_send()
326 return -EINTR; in smc_wr_reg_send()
327 switch (link->wr_reg_state) { in smc_wr_reg_send()
332 rc = -EIO; in smc_wr_reg_send()
335 rc = -EPIPE; in smc_wr_reg_send()
349 hash_for_each_possible(smc_wr_rx_hash, h_iter, list, handler->type) { in smc_wr_rx_register_handler()
350 if (h_iter->type == handler->type) { in smc_wr_rx_register_handler()
351 rc = -EEXIST; in smc_wr_rx_register_handler()
355 hash_add(smc_wr_rx_hash, &handler->list, handler->type); in smc_wr_rx_register_handler()
367 struct smc_link *link = (struct smc_link *)wc->qp->qp_context; in smc_wr_rx_demultiplex() local
373 if (wc->byte_len < sizeof(*wr_rx)) in smc_wr_rx_demultiplex()
375 temp_wr_id = wc->wr_id; in smc_wr_rx_demultiplex()
376 index = do_div(temp_wr_id, link->wr_rx_cnt); in smc_wr_rx_demultiplex()
377 wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[index]; in smc_wr_rx_demultiplex()
378 hash_for_each_possible(smc_wr_rx_hash, handler, list, wr_rx->type) { in smc_wr_rx_demultiplex()
379 if (handler->type == wr_rx->type) in smc_wr_rx_demultiplex()
380 handler->handler(wc, wr_rx); in smc_wr_rx_demultiplex()
386 struct smc_link *link; in smc_wr_rx_process_cqes() local
390 link = wc[i].qp->qp_context; in smc_wr_rx_process_cqes()
392 link->wr_rx_tstamp = jiffies; in smc_wr_rx_process_cqes()
394 smc_wr_rx_post(link); /* refill WR RX */ in smc_wr_rx_process_cqes()
401 smcr_link_down_cond_sched(link); in smc_wr_rx_process_cqes()
404 smc_wr_rx_post(link); /* refill WR RX */ in smc_wr_rx_process_cqes()
422 rc = ib_poll_cq(dev->roce_cq_recv, SMC_WR_MAX_POLL_CQE, wc); in smc_wr_rx_tasklet_fn()
424 ib_req_notify_cq(dev->roce_cq_recv, in smc_wr_rx_tasklet_fn()
440 tasklet_schedule(&dev->recv_tasklet); in smc_wr_rx_cq_handler()
443 int smc_wr_rx_post_init(struct smc_link *link) in smc_wr_rx_post_init() argument
448 for (i = 0; i < link->wr_rx_cnt; i++) in smc_wr_rx_post_init()
449 rc = smc_wr_rx_post(link); in smc_wr_rx_post_init()
457 struct ib_qp_attr *attr = &lnk->qp_attr; in smc_wr_remember_qp_attr()
462 ib_query_qp(lnk->roce_qp, attr, in smc_wr_remember_qp_attr()
482 lnk->wr_tx_cnt = min_t(size_t, SMC_WR_BUF_CNT, in smc_wr_remember_qp_attr()
483 lnk->qp_attr.cap.max_send_wr); in smc_wr_remember_qp_attr()
484 lnk->wr_rx_cnt = min_t(size_t, SMC_WR_BUF_CNT * 3, in smc_wr_remember_qp_attr()
485 lnk->qp_attr.cap.max_recv_wr); in smc_wr_remember_qp_attr()
492 for (i = 0; i < lnk->wr_tx_cnt; i++) { in smc_wr_init_sge()
493 lnk->wr_tx_sges[i].addr = in smc_wr_init_sge()
494 lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE; in smc_wr_init_sge()
495 lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE; in smc_wr_init_sge()
496 lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey; in smc_wr_init_sge()
497 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[0].lkey = in smc_wr_init_sge()
498 lnk->roce_pd->local_dma_lkey; in smc_wr_init_sge()
499 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[1].lkey = in smc_wr_init_sge()
500 lnk->roce_pd->local_dma_lkey; in smc_wr_init_sge()
501 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[0].lkey = in smc_wr_init_sge()
502 lnk->roce_pd->local_dma_lkey; in smc_wr_init_sge()
503 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[1].lkey = in smc_wr_init_sge()
504 lnk->roce_pd->local_dma_lkey; in smc_wr_init_sge()
505 lnk->wr_tx_ibs[i].next = NULL; in smc_wr_init_sge()
506 lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i]; in smc_wr_init_sge()
507 lnk->wr_tx_ibs[i].num_sge = 1; in smc_wr_init_sge()
508 lnk->wr_tx_ibs[i].opcode = IB_WR_SEND; in smc_wr_init_sge()
509 lnk->wr_tx_ibs[i].send_flags = in smc_wr_init_sge()
511 lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.opcode = IB_WR_RDMA_WRITE; in smc_wr_init_sge()
512 lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.opcode = IB_WR_RDMA_WRITE; in smc_wr_init_sge()
513 lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list = in smc_wr_init_sge()
514 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge; in smc_wr_init_sge()
515 lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list = in smc_wr_init_sge()
516 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge; in smc_wr_init_sge()
518 for (i = 0; i < lnk->wr_rx_cnt; i++) { in smc_wr_init_sge()
519 lnk->wr_rx_sges[i].addr = in smc_wr_init_sge()
520 lnk->wr_rx_dma_addr + i * SMC_WR_BUF_SIZE; in smc_wr_init_sge()
521 lnk->wr_rx_sges[i].length = SMC_WR_BUF_SIZE; in smc_wr_init_sge()
522 lnk->wr_rx_sges[i].lkey = lnk->roce_pd->local_dma_lkey; in smc_wr_init_sge()
523 lnk->wr_rx_ibs[i].next = NULL; in smc_wr_init_sge()
524 lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[i]; in smc_wr_init_sge()
525 lnk->wr_rx_ibs[i].num_sge = 1; in smc_wr_init_sge()
527 lnk->wr_reg.wr.next = NULL; in smc_wr_init_sge()
528 lnk->wr_reg.wr.num_sge = 0; in smc_wr_init_sge()
529 lnk->wr_reg.wr.send_flags = IB_SEND_SIGNALED; in smc_wr_init_sge()
530 lnk->wr_reg.wr.opcode = IB_WR_REG_MR; in smc_wr_init_sge()
531 lnk->wr_reg.access = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE; in smc_wr_init_sge()
538 if (!lnk->smcibdev) in smc_wr_free_link()
540 ibdev = lnk->smcibdev->ibdev; in smc_wr_free_link()
546 wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt))); in smc_wr_free_link()
547 wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt))); in smc_wr_free_link()
549 if (lnk->wr_rx_dma_addr) { in smc_wr_free_link()
550 ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr, in smc_wr_free_link()
551 SMC_WR_BUF_SIZE * lnk->wr_rx_cnt, in smc_wr_free_link()
553 lnk->wr_rx_dma_addr = 0; in smc_wr_free_link()
555 if (lnk->wr_tx_dma_addr) { in smc_wr_free_link()
556 ib_dma_unmap_single(ibdev, lnk->wr_tx_dma_addr, in smc_wr_free_link()
557 SMC_WR_BUF_SIZE * lnk->wr_tx_cnt, in smc_wr_free_link()
559 lnk->wr_tx_dma_addr = 0; in smc_wr_free_link()
565 kfree(lnk->wr_tx_compl); in smc_wr_free_link_mem()
566 lnk->wr_tx_compl = NULL; in smc_wr_free_link_mem()
567 kfree(lnk->wr_tx_pends); in smc_wr_free_link_mem()
568 lnk->wr_tx_pends = NULL; in smc_wr_free_link_mem()
569 kfree(lnk->wr_tx_mask); in smc_wr_free_link_mem()
570 lnk->wr_tx_mask = NULL; in smc_wr_free_link_mem()
571 kfree(lnk->wr_tx_sges); in smc_wr_free_link_mem()
572 lnk->wr_tx_sges = NULL; in smc_wr_free_link_mem()
573 kfree(lnk->wr_tx_rdma_sges); in smc_wr_free_link_mem()
574 lnk->wr_tx_rdma_sges = NULL; in smc_wr_free_link_mem()
575 kfree(lnk->wr_rx_sges); in smc_wr_free_link_mem()
576 lnk->wr_rx_sges = NULL; in smc_wr_free_link_mem()
577 kfree(lnk->wr_tx_rdmas); in smc_wr_free_link_mem()
578 lnk->wr_tx_rdmas = NULL; in smc_wr_free_link_mem()
579 kfree(lnk->wr_rx_ibs); in smc_wr_free_link_mem()
580 lnk->wr_rx_ibs = NULL; in smc_wr_free_link_mem()
581 kfree(lnk->wr_tx_ibs); in smc_wr_free_link_mem()
582 lnk->wr_tx_ibs = NULL; in smc_wr_free_link_mem()
583 kfree(lnk->wr_tx_bufs); in smc_wr_free_link_mem()
584 lnk->wr_tx_bufs = NULL; in smc_wr_free_link_mem()
585 kfree(lnk->wr_rx_bufs); in smc_wr_free_link_mem()
586 lnk->wr_rx_bufs = NULL; in smc_wr_free_link_mem()
589 int smc_wr_alloc_link_mem(struct smc_link *link) in smc_wr_alloc_link_mem() argument
591 /* allocate link related memory */ in smc_wr_alloc_link_mem()
592 link->wr_tx_bufs = kcalloc(SMC_WR_BUF_CNT, SMC_WR_BUF_SIZE, GFP_KERNEL); in smc_wr_alloc_link_mem()
593 if (!link->wr_tx_bufs) in smc_wr_alloc_link_mem()
595 link->wr_rx_bufs = kcalloc(SMC_WR_BUF_CNT * 3, SMC_WR_BUF_SIZE, in smc_wr_alloc_link_mem()
597 if (!link->wr_rx_bufs) in smc_wr_alloc_link_mem()
599 link->wr_tx_ibs = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_ibs[0]), in smc_wr_alloc_link_mem()
601 if (!link->wr_tx_ibs) in smc_wr_alloc_link_mem()
603 link->wr_rx_ibs = kcalloc(SMC_WR_BUF_CNT * 3, in smc_wr_alloc_link_mem()
604 sizeof(link->wr_rx_ibs[0]), in smc_wr_alloc_link_mem()
606 if (!link->wr_rx_ibs) in smc_wr_alloc_link_mem()
608 link->wr_tx_rdmas = kcalloc(SMC_WR_BUF_CNT, in smc_wr_alloc_link_mem()
609 sizeof(link->wr_tx_rdmas[0]), in smc_wr_alloc_link_mem()
611 if (!link->wr_tx_rdmas) in smc_wr_alloc_link_mem()
613 link->wr_tx_rdma_sges = kcalloc(SMC_WR_BUF_CNT, in smc_wr_alloc_link_mem()
614 sizeof(link->wr_tx_rdma_sges[0]), in smc_wr_alloc_link_mem()
616 if (!link->wr_tx_rdma_sges) in smc_wr_alloc_link_mem()
618 link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]), in smc_wr_alloc_link_mem()
620 if (!link->wr_tx_sges) in smc_wr_alloc_link_mem()
622 link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3, in smc_wr_alloc_link_mem()
623 sizeof(link->wr_rx_sges[0]), in smc_wr_alloc_link_mem()
625 if (!link->wr_rx_sges) in smc_wr_alloc_link_mem()
627 link->wr_tx_mask = kcalloc(BITS_TO_LONGS(SMC_WR_BUF_CNT), in smc_wr_alloc_link_mem()
628 sizeof(*link->wr_tx_mask), in smc_wr_alloc_link_mem()
630 if (!link->wr_tx_mask) in smc_wr_alloc_link_mem()
632 link->wr_tx_pends = kcalloc(SMC_WR_BUF_CNT, in smc_wr_alloc_link_mem()
633 sizeof(link->wr_tx_pends[0]), in smc_wr_alloc_link_mem()
635 if (!link->wr_tx_pends) in smc_wr_alloc_link_mem()
637 link->wr_tx_compl = kcalloc(SMC_WR_BUF_CNT, in smc_wr_alloc_link_mem()
638 sizeof(link->wr_tx_compl[0]), in smc_wr_alloc_link_mem()
640 if (!link->wr_tx_compl) in smc_wr_alloc_link_mem()
645 kfree(link->wr_tx_pends); in smc_wr_alloc_link_mem()
647 kfree(link->wr_tx_mask); in smc_wr_alloc_link_mem()
649 kfree(link->wr_rx_sges); in smc_wr_alloc_link_mem()
651 kfree(link->wr_tx_sges); in smc_wr_alloc_link_mem()
653 kfree(link->wr_tx_rdma_sges); in smc_wr_alloc_link_mem()
655 kfree(link->wr_tx_rdmas); in smc_wr_alloc_link_mem()
657 kfree(link->wr_rx_ibs); in smc_wr_alloc_link_mem()
659 kfree(link->wr_tx_ibs); in smc_wr_alloc_link_mem()
661 kfree(link->wr_rx_bufs); in smc_wr_alloc_link_mem()
663 kfree(link->wr_tx_bufs); in smc_wr_alloc_link_mem()
665 return -ENOMEM; in smc_wr_alloc_link_mem()
670 tasklet_kill(&smcibdev->recv_tasklet); in smc_wr_remove_dev()
671 tasklet_kill(&smcibdev->send_tasklet); in smc_wr_remove_dev()
676 tasklet_init(&smcibdev->recv_tasklet, smc_wr_rx_tasklet_fn, in smc_wr_add_dev()
678 tasklet_init(&smcibdev->send_tasklet, smc_wr_tx_tasklet_fn, in smc_wr_add_dev()
684 struct ib_device *ibdev = lnk->smcibdev->ibdev; in smc_wr_create_link()
687 smc_wr_tx_set_wr_id(&lnk->wr_tx_id, 0); in smc_wr_create_link()
688 lnk->wr_rx_id = 0; in smc_wr_create_link()
689 lnk->wr_rx_dma_addr = ib_dma_map_single( in smc_wr_create_link()
690 ibdev, lnk->wr_rx_bufs, SMC_WR_BUF_SIZE * lnk->wr_rx_cnt, in smc_wr_create_link()
692 if (ib_dma_mapping_error(ibdev, lnk->wr_rx_dma_addr)) { in smc_wr_create_link()
693 lnk->wr_rx_dma_addr = 0; in smc_wr_create_link()
694 rc = -EIO; in smc_wr_create_link()
697 lnk->wr_tx_dma_addr = ib_dma_map_single( in smc_wr_create_link()
698 ibdev, lnk->wr_tx_bufs, SMC_WR_BUF_SIZE * lnk->wr_tx_cnt, in smc_wr_create_link()
700 if (ib_dma_mapping_error(ibdev, lnk->wr_tx_dma_addr)) { in smc_wr_create_link()
701 rc = -EIO; in smc_wr_create_link()
705 memset(lnk->wr_tx_mask, 0, in smc_wr_create_link()
706 BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask)); in smc_wr_create_link()
707 init_waitqueue_head(&lnk->wr_tx_wait); in smc_wr_create_link()
708 atomic_set(&lnk->wr_tx_refcnt, 0); in smc_wr_create_link()
709 init_waitqueue_head(&lnk->wr_reg_wait); in smc_wr_create_link()
710 atomic_set(&lnk->wr_reg_refcnt, 0); in smc_wr_create_link()
714 ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr, in smc_wr_create_link()
715 SMC_WR_BUF_SIZE * lnk->wr_rx_cnt, in smc_wr_create_link()
717 lnk->wr_rx_dma_addr = 0; in smc_wr_create_link()