• Home
  • Raw
  • Download

Lines Matching +full:no +full:- +full:memory +full:- +full:wc

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2015-2016 HGST, a Western Digital Company.
25 #include <linux/nvme-rdma.h>
164 static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc);
165 static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc);
166 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc);
167 static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc);
183 return -EINVAL; in srq_size_set()
190 return 1 + (((len - 1) & PAGE_MASK) >> PAGE_SHIFT); in num_pages()
195 return nvme_is_write(rsp->req.cmd) && in nvmet_rdma_need_data_in()
196 rsp->req.transfer_len && in nvmet_rdma_need_data_in()
197 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); in nvmet_rdma_need_data_in()
202 return !nvme_is_write(rsp->req.cmd) && in nvmet_rdma_need_data_out()
203 rsp->req.transfer_len && in nvmet_rdma_need_data_out()
204 !rsp->req.cqe->status && in nvmet_rdma_need_data_out()
205 !(rsp->flags & NVMET_RDMA_REQ_INLINE_DATA); in nvmet_rdma_need_data_out()
214 spin_lock_irqsave(&queue->rsps_lock, flags); in nvmet_rdma_get_rsp()
215 rsp = list_first_entry_or_null(&queue->free_rsps, in nvmet_rdma_get_rsp()
218 list_del(&rsp->free_list); in nvmet_rdma_get_rsp()
219 spin_unlock_irqrestore(&queue->rsps_lock, flags); in nvmet_rdma_get_rsp()
227 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp); in nvmet_rdma_get_rsp()
233 rsp->allocated = true; in nvmet_rdma_get_rsp()
244 if (unlikely(rsp->allocated)) { in nvmet_rdma_put_rsp()
245 nvmet_rdma_free_rsp(rsp->queue->dev, rsp); in nvmet_rdma_put_rsp()
250 spin_lock_irqsave(&rsp->queue->rsps_lock, flags); in nvmet_rdma_put_rsp()
251 list_add_tail(&rsp->free_list, &rsp->queue->free_rsps); in nvmet_rdma_put_rsp()
252 spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags); in nvmet_rdma_put_rsp()
262 if (!ndev->inline_data_size) in nvmet_rdma_free_inline_pages()
265 sg = c->inline_sg; in nvmet_rdma_free_inline_pages()
266 sge = &c->sge[1]; in nvmet_rdma_free_inline_pages()
268 for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { in nvmet_rdma_free_inline_pages()
269 if (sge->length) in nvmet_rdma_free_inline_pages()
270 ib_dma_unmap_page(ndev->device, sge->addr, in nvmet_rdma_free_inline_pages()
271 sge->length, DMA_FROM_DEVICE); in nvmet_rdma_free_inline_pages()
286 if (!ndev->inline_data_size) in nvmet_rdma_alloc_inline_pages()
289 sg = c->inline_sg; in nvmet_rdma_alloc_inline_pages()
290 sg_init_table(sg, ndev->inline_page_count); in nvmet_rdma_alloc_inline_pages()
291 sge = &c->sge[1]; in nvmet_rdma_alloc_inline_pages()
292 len = ndev->inline_data_size; in nvmet_rdma_alloc_inline_pages()
294 for (i = 0; i < ndev->inline_page_count; i++, sg++, sge++) { in nvmet_rdma_alloc_inline_pages()
299 sge->addr = ib_dma_map_page(ndev->device, in nvmet_rdma_alloc_inline_pages()
301 if (ib_dma_mapping_error(ndev->device, sge->addr)) in nvmet_rdma_alloc_inline_pages()
303 sge->length = min_t(int, len, PAGE_SIZE); in nvmet_rdma_alloc_inline_pages()
304 sge->lkey = ndev->pd->local_dma_lkey; in nvmet_rdma_alloc_inline_pages()
305 len -= sge->length; in nvmet_rdma_alloc_inline_pages()
310 for (; i >= 0; i--, sg--, sge--) { in nvmet_rdma_alloc_inline_pages()
311 if (sge->length) in nvmet_rdma_alloc_inline_pages()
312 ib_dma_unmap_page(ndev->device, sge->addr, in nvmet_rdma_alloc_inline_pages()
313 sge->length, DMA_FROM_DEVICE); in nvmet_rdma_alloc_inline_pages()
317 return -ENOMEM; in nvmet_rdma_alloc_inline_pages()
324 c->nvme_cmd = kmalloc(sizeof(*c->nvme_cmd), GFP_KERNEL); in nvmet_rdma_alloc_cmd()
325 if (!c->nvme_cmd) in nvmet_rdma_alloc_cmd()
328 c->sge[0].addr = ib_dma_map_single(ndev->device, c->nvme_cmd, in nvmet_rdma_alloc_cmd()
329 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); in nvmet_rdma_alloc_cmd()
330 if (ib_dma_mapping_error(ndev->device, c->sge[0].addr)) in nvmet_rdma_alloc_cmd()
333 c->sge[0].length = sizeof(*c->nvme_cmd); in nvmet_rdma_alloc_cmd()
334 c->sge[0].lkey = ndev->pd->local_dma_lkey; in nvmet_rdma_alloc_cmd()
339 c->cqe.done = nvmet_rdma_recv_done; in nvmet_rdma_alloc_cmd()
341 c->wr.wr_cqe = &c->cqe; in nvmet_rdma_alloc_cmd()
342 c->wr.sg_list = c->sge; in nvmet_rdma_alloc_cmd()
343 c->wr.num_sge = admin ? 1 : ndev->inline_page_count + 1; in nvmet_rdma_alloc_cmd()
348 ib_dma_unmap_single(ndev->device, c->sge[0].addr, in nvmet_rdma_alloc_cmd()
349 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); in nvmet_rdma_alloc_cmd()
351 kfree(c->nvme_cmd); in nvmet_rdma_alloc_cmd()
354 return -ENOMEM; in nvmet_rdma_alloc_cmd()
362 ib_dma_unmap_single(ndev->device, c->sge[0].addr, in nvmet_rdma_free_cmd()
363 sizeof(*c->nvme_cmd), DMA_FROM_DEVICE); in nvmet_rdma_free_cmd()
364 kfree(c->nvme_cmd); in nvmet_rdma_free_cmd()
372 int ret = -EINVAL, i; in nvmet_rdma_alloc_cmds()
387 while (--i >= 0) in nvmet_rdma_alloc_cmds()
408 r->req.cqe = kmalloc(sizeof(*r->req.cqe), GFP_KERNEL); in nvmet_rdma_alloc_rsp()
409 if (!r->req.cqe) in nvmet_rdma_alloc_rsp()
412 r->send_sge.addr = ib_dma_map_single(ndev->device, r->req.cqe, in nvmet_rdma_alloc_rsp()
413 sizeof(*r->req.cqe), DMA_TO_DEVICE); in nvmet_rdma_alloc_rsp()
414 if (ib_dma_mapping_error(ndev->device, r->send_sge.addr)) in nvmet_rdma_alloc_rsp()
417 if (!ib_uses_virt_dma(ndev->device)) in nvmet_rdma_alloc_rsp()
418 r->req.p2p_client = &ndev->device->dev; in nvmet_rdma_alloc_rsp()
419 r->send_sge.length = sizeof(*r->req.cqe); in nvmet_rdma_alloc_rsp()
420 r->send_sge.lkey = ndev->pd->local_dma_lkey; in nvmet_rdma_alloc_rsp()
422 r->send_cqe.done = nvmet_rdma_send_done; in nvmet_rdma_alloc_rsp()
424 r->send_wr.wr_cqe = &r->send_cqe; in nvmet_rdma_alloc_rsp()
425 r->send_wr.sg_list = &r->send_sge; in nvmet_rdma_alloc_rsp()
426 r->send_wr.num_sge = 1; in nvmet_rdma_alloc_rsp()
427 r->send_wr.send_flags = IB_SEND_SIGNALED; in nvmet_rdma_alloc_rsp()
430 r->read_cqe.done = nvmet_rdma_read_data_done; in nvmet_rdma_alloc_rsp()
432 r->write_cqe.done = nvmet_rdma_write_data_done; in nvmet_rdma_alloc_rsp()
437 kfree(r->req.cqe); in nvmet_rdma_alloc_rsp()
439 return -ENOMEM; in nvmet_rdma_alloc_rsp()
445 ib_dma_unmap_single(ndev->device, r->send_sge.addr, in nvmet_rdma_free_rsp()
446 sizeof(*r->req.cqe), DMA_TO_DEVICE); in nvmet_rdma_free_rsp()
447 kfree(r->req.cqe); in nvmet_rdma_free_rsp()
453 struct nvmet_rdma_device *ndev = queue->dev; in nvmet_rdma_alloc_rsps()
454 int nr_rsps = queue->recv_queue_size * 2; in nvmet_rdma_alloc_rsps()
455 int ret = -EINVAL, i; in nvmet_rdma_alloc_rsps()
457 queue->rsps = kcalloc(nr_rsps, sizeof(struct nvmet_rdma_rsp), in nvmet_rdma_alloc_rsps()
459 if (!queue->rsps) in nvmet_rdma_alloc_rsps()
463 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; in nvmet_rdma_alloc_rsps()
469 list_add_tail(&rsp->free_list, &queue->free_rsps); in nvmet_rdma_alloc_rsps()
475 while (--i >= 0) { in nvmet_rdma_alloc_rsps()
476 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; in nvmet_rdma_alloc_rsps()
478 list_del(&rsp->free_list); in nvmet_rdma_alloc_rsps()
481 kfree(queue->rsps); in nvmet_rdma_alloc_rsps()
488 struct nvmet_rdma_device *ndev = queue->dev; in nvmet_rdma_free_rsps()
489 int i, nr_rsps = queue->recv_queue_size * 2; in nvmet_rdma_free_rsps()
492 struct nvmet_rdma_rsp *rsp = &queue->rsps[i]; in nvmet_rdma_free_rsps()
494 list_del(&rsp->free_list); in nvmet_rdma_free_rsps()
497 kfree(queue->rsps); in nvmet_rdma_free_rsps()
505 ib_dma_sync_single_for_device(ndev->device, in nvmet_rdma_post_recv()
506 cmd->sge[0].addr, cmd->sge[0].length, in nvmet_rdma_post_recv()
509 if (cmd->nsrq) in nvmet_rdma_post_recv()
510 ret = ib_post_srq_recv(cmd->nsrq->srq, &cmd->wr, NULL); in nvmet_rdma_post_recv()
512 ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL); in nvmet_rdma_post_recv()
522 spin_lock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
523 while (!list_empty(&queue->rsp_wr_wait_list)) { in nvmet_rdma_process_wr_wait_list()
527 rsp = list_entry(queue->rsp_wr_wait_list.next, in nvmet_rdma_process_wr_wait_list()
529 list_del(&rsp->wait_list); in nvmet_rdma_process_wr_wait_list()
531 spin_unlock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
533 spin_lock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
536 list_add(&rsp->wait_list, &queue->rsp_wr_wait_list); in nvmet_rdma_process_wr_wait_list()
540 spin_unlock(&queue->rsp_wr_wait_lock); in nvmet_rdma_process_wr_wait_list()
580 domain->sig_type = IB_SIG_TYPE_T10_DIF; in nvmet_rdma_set_sig_domain()
581 domain->sig.dif.bg_type = IB_T10DIF_CRC; in nvmet_rdma_set_sig_domain()
582 domain->sig.dif.pi_interval = 1 << bi->interval_exp; in nvmet_rdma_set_sig_domain()
583 domain->sig.dif.ref_tag = le32_to_cpu(cmd->rw.reftag); in nvmet_rdma_set_sig_domain()
585 domain->sig.dif.ref_remap = true; in nvmet_rdma_set_sig_domain()
587 domain->sig.dif.app_tag = le16_to_cpu(cmd->rw.apptag); in nvmet_rdma_set_sig_domain()
588 domain->sig.dif.apptag_check_mask = le16_to_cpu(cmd->rw.appmask); in nvmet_rdma_set_sig_domain()
589 domain->sig.dif.app_escape = true; in nvmet_rdma_set_sig_domain()
591 domain->sig.dif.ref_escape = true; in nvmet_rdma_set_sig_domain()
597 struct nvme_command *cmd = req->cmd; in nvmet_rdma_set_sig_attrs()
598 u16 control = le16_to_cpu(cmd->rw.control); in nvmet_rdma_set_sig_attrs()
599 u8 pi_type = req->ns->pi_type; in nvmet_rdma_set_sig_attrs()
602 bi = bdev_get_integrity(req->ns->bdev); in nvmet_rdma_set_sig_attrs()
607 /* for WRITE_INSERT/READ_STRIP no wire domain */ in nvmet_rdma_set_sig_attrs()
608 sig_attrs->wire.sig_type = IB_SIG_TYPE_NONE; in nvmet_rdma_set_sig_attrs()
609 nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control, in nvmet_rdma_set_sig_attrs()
613 cmd->rw.control = cpu_to_le16(control); in nvmet_rdma_set_sig_attrs()
615 req->transfer_len += req->metadata_len; in nvmet_rdma_set_sig_attrs()
617 /* for WRITE_PASS/READ_PASS both wire/memory domains exist */ in nvmet_rdma_set_sig_attrs()
618 nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->wire, control, in nvmet_rdma_set_sig_attrs()
620 nvmet_rdma_set_sig_domain(bi, cmd, &sig_attrs->mem, control, in nvmet_rdma_set_sig_attrs()
625 sig_attrs->check_mask |= IB_SIG_CHECK_REFTAG; in nvmet_rdma_set_sig_attrs()
627 sig_attrs->check_mask |= IB_SIG_CHECK_GUARD; in nvmet_rdma_set_sig_attrs()
629 sig_attrs->check_mask |= IB_SIG_CHECK_APPTAG; in nvmet_rdma_set_sig_attrs()
635 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_rw_ctx_init()
636 struct nvmet_req *req = &rsp->req; in nvmet_rdma_rw_ctx_init()
639 if (req->metadata_len) in nvmet_rdma_rw_ctx_init()
640 ret = rdma_rw_ctx_signature_init(&rsp->rw, cm_id->qp, in nvmet_rdma_rw_ctx_init()
641 cm_id->port_num, req->sg, req->sg_cnt, in nvmet_rdma_rw_ctx_init()
642 req->metadata_sg, req->metadata_sg_cnt, sig_attrs, in nvmet_rdma_rw_ctx_init()
645 ret = rdma_rw_ctx_init(&rsp->rw, cm_id->qp, cm_id->port_num, in nvmet_rdma_rw_ctx_init()
646 req->sg, req->sg_cnt, 0, addr, key, in nvmet_rdma_rw_ctx_init()
654 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_rw_ctx_destroy()
655 struct nvmet_req *req = &rsp->req; in nvmet_rdma_rw_ctx_destroy()
657 if (req->metadata_len) in nvmet_rdma_rw_ctx_destroy()
658 rdma_rw_ctx_destroy_signature(&rsp->rw, cm_id->qp, in nvmet_rdma_rw_ctx_destroy()
659 cm_id->port_num, req->sg, req->sg_cnt, in nvmet_rdma_rw_ctx_destroy()
660 req->metadata_sg, req->metadata_sg_cnt, in nvmet_rdma_rw_ctx_destroy()
663 rdma_rw_ctx_destroy(&rsp->rw, cm_id->qp, cm_id->port_num, in nvmet_rdma_rw_ctx_destroy()
664 req->sg, req->sg_cnt, nvmet_data_dir(req)); in nvmet_rdma_rw_ctx_destroy()
669 struct nvmet_rdma_queue *queue = rsp->queue; in nvmet_rdma_release_rsp()
671 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_release_rsp()
673 if (rsp->n_rdma) in nvmet_rdma_release_rsp()
676 if (rsp->req.sg != rsp->cmd->inline_sg) in nvmet_rdma_release_rsp()
677 nvmet_req_free_sgls(&rsp->req); in nvmet_rdma_release_rsp()
679 if (unlikely(!list_empty_careful(&queue->rsp_wr_wait_list))) in nvmet_rdma_release_rsp()
687 if (queue->nvme_sq.ctrl) { in nvmet_rdma_error_comp()
688 nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl); in nvmet_rdma_error_comp()
699 static void nvmet_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) in nvmet_rdma_send_done() argument
702 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, send_cqe); in nvmet_rdma_send_done()
703 struct nvmet_rdma_queue *queue = wc->qp->qp_context; in nvmet_rdma_send_done()
707 if (unlikely(wc->status != IB_WC_SUCCESS && in nvmet_rdma_send_done()
708 wc->status != IB_WC_WR_FLUSH_ERR)) { in nvmet_rdma_send_done()
710 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); in nvmet_rdma_send_done()
719 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_queue_response()
722 if (rsp->flags & NVMET_RDMA_REQ_INVALIDATE_RKEY) { in nvmet_rdma_queue_response()
723 rsp->send_wr.opcode = IB_WR_SEND_WITH_INV; in nvmet_rdma_queue_response()
724 rsp->send_wr.ex.invalidate_rkey = rsp->invalidate_rkey; in nvmet_rdma_queue_response()
726 rsp->send_wr.opcode = IB_WR_SEND; in nvmet_rdma_queue_response()
730 if (rsp->req.metadata_len) in nvmet_rdma_queue_response()
731 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, in nvmet_rdma_queue_response()
732 cm_id->port_num, &rsp->write_cqe, NULL); in nvmet_rdma_queue_response()
734 first_wr = rdma_rw_ctx_wrs(&rsp->rw, cm_id->qp, in nvmet_rdma_queue_response()
735 cm_id->port_num, NULL, &rsp->send_wr); in nvmet_rdma_queue_response()
737 first_wr = &rsp->send_wr; in nvmet_rdma_queue_response()
740 nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); in nvmet_rdma_queue_response()
742 ib_dma_sync_single_for_device(rsp->queue->dev->device, in nvmet_rdma_queue_response()
743 rsp->send_sge.addr, rsp->send_sge.length, in nvmet_rdma_queue_response()
746 if (unlikely(ib_post_send(cm_id->qp, first_wr, NULL))) { in nvmet_rdma_queue_response()
752 static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc) in nvmet_rdma_read_data_done() argument
755 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, read_cqe); in nvmet_rdma_read_data_done()
756 struct nvmet_rdma_queue *queue = wc->qp->qp_context; in nvmet_rdma_read_data_done()
759 WARN_ON(rsp->n_rdma <= 0); in nvmet_rdma_read_data_done()
760 atomic_add(rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_read_data_done()
761 rsp->n_rdma = 0; in nvmet_rdma_read_data_done()
763 if (unlikely(wc->status != IB_WC_SUCCESS)) { in nvmet_rdma_read_data_done()
765 nvmet_req_uninit(&rsp->req); in nvmet_rdma_read_data_done()
767 if (wc->status != IB_WC_WR_FLUSH_ERR) { in nvmet_rdma_read_data_done()
769 wc->wr_cqe, ib_wc_status_msg(wc->status), wc->status); in nvmet_rdma_read_data_done()
775 if (rsp->req.metadata_len) in nvmet_rdma_read_data_done()
776 status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr); in nvmet_rdma_read_data_done()
780 nvmet_req_complete(&rsp->req, status); in nvmet_rdma_read_data_done()
782 rsp->req.execute(&rsp->req); in nvmet_rdma_read_data_done()
785 static void nvmet_rdma_write_data_done(struct ib_cq *cq, struct ib_wc *wc) in nvmet_rdma_write_data_done() argument
788 container_of(wc->wr_cqe, struct nvmet_rdma_rsp, write_cqe); in nvmet_rdma_write_data_done()
789 struct nvmet_rdma_queue *queue = wc->qp->qp_context; in nvmet_rdma_write_data_done()
790 struct rdma_cm_id *cm_id = rsp->queue->cm_id; in nvmet_rdma_write_data_done()
796 WARN_ON(rsp->n_rdma <= 0); in nvmet_rdma_write_data_done()
797 atomic_add(rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_write_data_done()
798 rsp->n_rdma = 0; in nvmet_rdma_write_data_done()
800 if (unlikely(wc->status != IB_WC_SUCCESS)) { in nvmet_rdma_write_data_done()
802 nvmet_req_uninit(&rsp->req); in nvmet_rdma_write_data_done()
804 if (wc->status != IB_WC_WR_FLUSH_ERR) { in nvmet_rdma_write_data_done()
806 ib_wc_status_msg(wc->status), wc->status); in nvmet_rdma_write_data_done()
814 * - if succeeded send good NVMe response in nvmet_rdma_write_data_done()
815 * - if failed send bad NVMe response with appropriate error in nvmet_rdma_write_data_done()
817 status = nvmet_rdma_check_pi_status(rsp->rw.reg->mr); in nvmet_rdma_write_data_done()
819 rsp->req.cqe->status = cpu_to_le16(status << 1); in nvmet_rdma_write_data_done()
822 if (unlikely(ib_post_send(cm_id->qp, &rsp->send_wr, NULL))) { in nvmet_rdma_write_data_done()
835 sg = rsp->cmd->inline_sg; in nvmet_rdma_use_inline_sg()
837 if (i < sg_count - 1) in nvmet_rdma_use_inline_sg()
841 sg->offset = off; in nvmet_rdma_use_inline_sg()
842 sg->length = min_t(int, len, PAGE_SIZE - off); in nvmet_rdma_use_inline_sg()
843 len -= sg->length; in nvmet_rdma_use_inline_sg()
848 rsp->req.sg = rsp->cmd->inline_sg; in nvmet_rdma_use_inline_sg()
849 rsp->req.sg_cnt = sg_count; in nvmet_rdma_use_inline_sg()
854 struct nvme_sgl_desc *sgl = &rsp->req.cmd->common.dptr.sgl; in nvmet_rdma_map_sgl_inline()
855 u64 off = le64_to_cpu(sgl->addr); in nvmet_rdma_map_sgl_inline()
856 u32 len = le32_to_cpu(sgl->length); in nvmet_rdma_map_sgl_inline()
858 if (!nvme_is_write(rsp->req.cmd)) { in nvmet_rdma_map_sgl_inline()
859 rsp->req.error_loc = in nvmet_rdma_map_sgl_inline()
864 if (off + len > rsp->queue->dev->inline_data_size) { in nvmet_rdma_map_sgl_inline()
869 /* no data command? */ in nvmet_rdma_map_sgl_inline()
874 rsp->flags |= NVMET_RDMA_REQ_INLINE_DATA; in nvmet_rdma_map_sgl_inline()
875 rsp->req.transfer_len += len; in nvmet_rdma_map_sgl_inline()
882 u64 addr = le64_to_cpu(sgl->addr); in nvmet_rdma_map_sgl_keyed()
883 u32 key = get_unaligned_le32(sgl->key); in nvmet_rdma_map_sgl_keyed()
887 rsp->req.transfer_len = get_unaligned_le24(sgl->length); in nvmet_rdma_map_sgl_keyed()
889 /* no data command? */ in nvmet_rdma_map_sgl_keyed()
890 if (!rsp->req.transfer_len) in nvmet_rdma_map_sgl_keyed()
893 if (rsp->req.metadata_len) in nvmet_rdma_map_sgl_keyed()
894 nvmet_rdma_set_sig_attrs(&rsp->req, &sig_attrs); in nvmet_rdma_map_sgl_keyed()
896 ret = nvmet_req_alloc_sgls(&rsp->req); in nvmet_rdma_map_sgl_keyed()
903 rsp->n_rdma += ret; in nvmet_rdma_map_sgl_keyed()
906 rsp->invalidate_rkey = key; in nvmet_rdma_map_sgl_keyed()
907 rsp->flags |= NVMET_RDMA_REQ_INVALIDATE_RKEY; in nvmet_rdma_map_sgl_keyed()
913 rsp->req.transfer_len = 0; in nvmet_rdma_map_sgl_keyed()
919 struct nvme_keyed_sgl_desc *sgl = &rsp->req.cmd->common.dptr.ksgl; in nvmet_rdma_map_sgl()
921 switch (sgl->type >> 4) { in nvmet_rdma_map_sgl()
923 switch (sgl->type & 0xf) { in nvmet_rdma_map_sgl()
927 pr_err("invalid SGL subtype: %#x\n", sgl->type); in nvmet_rdma_map_sgl()
928 rsp->req.error_loc = in nvmet_rdma_map_sgl()
933 switch (sgl->type & 0xf) { in nvmet_rdma_map_sgl()
939 pr_err("invalid SGL subtype: %#x\n", sgl->type); in nvmet_rdma_map_sgl()
940 rsp->req.error_loc = in nvmet_rdma_map_sgl()
945 pr_err("invalid SGL type: %#x\n", sgl->type); in nvmet_rdma_map_sgl()
946 rsp->req.error_loc = offsetof(struct nvme_common_command, dptr); in nvmet_rdma_map_sgl()
953 struct nvmet_rdma_queue *queue = rsp->queue; in nvmet_rdma_execute_command()
955 if (unlikely(atomic_sub_return(1 + rsp->n_rdma, in nvmet_rdma_execute_command()
956 &queue->sq_wr_avail) < 0)) { in nvmet_rdma_execute_command()
958 1 + rsp->n_rdma, queue->idx, in nvmet_rdma_execute_command()
959 queue->nvme_sq.ctrl->cntlid); in nvmet_rdma_execute_command()
960 atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail); in nvmet_rdma_execute_command()
965 if (rdma_rw_ctx_post(&rsp->rw, queue->qp, in nvmet_rdma_execute_command()
966 queue->cm_id->port_num, &rsp->read_cqe, NULL)) in nvmet_rdma_execute_command()
967 nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR); in nvmet_rdma_execute_command()
969 rsp->req.execute(&rsp->req); in nvmet_rdma_execute_command()
980 ib_dma_sync_single_for_cpu(queue->dev->device, in nvmet_rdma_handle_command()
981 cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, in nvmet_rdma_handle_command()
983 ib_dma_sync_single_for_cpu(queue->dev->device, in nvmet_rdma_handle_command()
984 cmd->send_sge.addr, cmd->send_sge.length, in nvmet_rdma_handle_command()
987 if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, in nvmet_rdma_handle_command()
988 &queue->nvme_sq, &nvmet_rdma_ops)) in nvmet_rdma_handle_command()
996 spin_lock(&queue->rsp_wr_wait_lock); in nvmet_rdma_handle_command()
997 list_add_tail(&cmd->wait_list, &queue->rsp_wr_wait_list); in nvmet_rdma_handle_command()
998 spin_unlock(&queue->rsp_wr_wait_lock); in nvmet_rdma_handle_command()
1004 nvmet_req_complete(&cmd->req, status); in nvmet_rdma_handle_command()
1007 static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc) in nvmet_rdma_recv_done() argument
1010 container_of(wc->wr_cqe, struct nvmet_rdma_cmd, cqe); in nvmet_rdma_recv_done()
1011 struct nvmet_rdma_queue *queue = wc->qp->qp_context; in nvmet_rdma_recv_done()
1014 if (unlikely(wc->status != IB_WC_SUCCESS)) { in nvmet_rdma_recv_done()
1015 if (wc->status != IB_WC_WR_FLUSH_ERR) { in nvmet_rdma_recv_done()
1017 wc->wr_cqe, ib_wc_status_msg(wc->status), in nvmet_rdma_recv_done()
1018 wc->status); in nvmet_rdma_recv_done()
1024 if (unlikely(wc->byte_len < sizeof(struct nvme_command))) { in nvmet_rdma_recv_done()
1030 cmd->queue = queue; in nvmet_rdma_recv_done()
1034 * we get here only under memory pressure, in nvmet_rdma_recv_done()
1038 nvmet_rdma_post_recv(queue->dev, cmd); in nvmet_rdma_recv_done()
1041 rsp->queue = queue; in nvmet_rdma_recv_done()
1042 rsp->cmd = cmd; in nvmet_rdma_recv_done()
1043 rsp->flags = 0; in nvmet_rdma_recv_done()
1044 rsp->req.cmd = cmd->nvme_cmd; in nvmet_rdma_recv_done()
1045 rsp->req.port = queue->port; in nvmet_rdma_recv_done()
1046 rsp->n_rdma = 0; in nvmet_rdma_recv_done()
1048 if (unlikely(queue->state != NVMET_RDMA_Q_LIVE)) { in nvmet_rdma_recv_done()
1051 spin_lock_irqsave(&queue->state_lock, flags); in nvmet_rdma_recv_done()
1052 if (queue->state == NVMET_RDMA_Q_CONNECTING) in nvmet_rdma_recv_done()
1053 list_add_tail(&rsp->wait_list, &queue->rsp_wait_list); in nvmet_rdma_recv_done()
1056 spin_unlock_irqrestore(&queue->state_lock, flags); in nvmet_rdma_recv_done()
1065 nvmet_rdma_free_cmds(nsrq->ndev, nsrq->cmds, nsrq->ndev->srq_size, in nvmet_rdma_destroy_srq()
1067 ib_destroy_srq(nsrq->srq); in nvmet_rdma_destroy_srq()
1076 if (!ndev->srqs) in nvmet_rdma_destroy_srqs()
1079 for (i = 0; i < ndev->srq_count; i++) in nvmet_rdma_destroy_srqs()
1080 nvmet_rdma_destroy_srq(ndev->srqs[i]); in nvmet_rdma_destroy_srqs()
1082 kfree(ndev->srqs); in nvmet_rdma_destroy_srqs()
1089 size_t srq_size = ndev->srq_size; in nvmet_rdma_init_srq()
1096 return ERR_PTR(-ENOMEM); in nvmet_rdma_init_srq()
1099 srq_attr.attr.max_sge = 1 + ndev->inline_page_count; in nvmet_rdma_init_srq()
1102 srq = ib_create_srq(ndev->pd, &srq_attr); in nvmet_rdma_init_srq()
1108 nsrq->cmds = nvmet_rdma_alloc_cmds(ndev, srq_size, false); in nvmet_rdma_init_srq()
1109 if (IS_ERR(nsrq->cmds)) { in nvmet_rdma_init_srq()
1110 ret = PTR_ERR(nsrq->cmds); in nvmet_rdma_init_srq()
1114 nsrq->srq = srq; in nvmet_rdma_init_srq()
1115 nsrq->ndev = ndev; in nvmet_rdma_init_srq()
1118 nsrq->cmds[i].nsrq = nsrq; in nvmet_rdma_init_srq()
1119 ret = nvmet_rdma_post_recv(ndev, &nsrq->cmds[i]); in nvmet_rdma_init_srq()
1127 nvmet_rdma_free_cmds(ndev, nsrq->cmds, srq_size, false); in nvmet_rdma_init_srq()
1139 if (!ndev->device->attrs.max_srq_wr || !ndev->device->attrs.max_srq) { in nvmet_rdma_init_srqs()
1142 * non-shared receive queues. in nvmet_rdma_init_srqs()
1148 ndev->srq_size = min(ndev->device->attrs.max_srq_wr, in nvmet_rdma_init_srqs()
1150 ndev->srq_count = min(ndev->device->num_comp_vectors, in nvmet_rdma_init_srqs()
1151 ndev->device->attrs.max_srq); in nvmet_rdma_init_srqs()
1153 ndev->srqs = kcalloc(ndev->srq_count, sizeof(*ndev->srqs), GFP_KERNEL); in nvmet_rdma_init_srqs()
1154 if (!ndev->srqs) in nvmet_rdma_init_srqs()
1155 return -ENOMEM; in nvmet_rdma_init_srqs()
1157 for (i = 0; i < ndev->srq_count; i++) { in nvmet_rdma_init_srqs()
1158 ndev->srqs[i] = nvmet_rdma_init_srq(ndev); in nvmet_rdma_init_srqs()
1159 if (IS_ERR(ndev->srqs[i])) { in nvmet_rdma_init_srqs()
1160 ret = PTR_ERR(ndev->srqs[i]); in nvmet_rdma_init_srqs()
1168 while (--i >= 0) in nvmet_rdma_init_srqs()
1169 nvmet_rdma_destroy_srq(ndev->srqs[i]); in nvmet_rdma_init_srqs()
1170 kfree(ndev->srqs); in nvmet_rdma_init_srqs()
1180 list_del(&ndev->entry); in nvmet_rdma_free_dev()
1184 ib_dealloc_pd(ndev->pd); in nvmet_rdma_free_dev()
1192 struct nvmet_rdma_port *port = cm_id->context; in nvmet_rdma_find_get_device()
1193 struct nvmet_port *nport = port->nport; in nvmet_rdma_find_get_device()
1201 if (ndev->device->node_guid == cm_id->device->node_guid && in nvmet_rdma_find_get_device()
1202 kref_get_unless_zero(&ndev->ref)) in nvmet_rdma_find_get_device()
1210 inline_page_count = num_pages(nport->inline_data_size); in nvmet_rdma_find_get_device()
1211 inline_sge_count = max(cm_id->device->attrs.max_sge_rd, in nvmet_rdma_find_get_device()
1212 cm_id->device->attrs.max_recv_sge) - 1; in nvmet_rdma_find_get_device()
1215 nport->inline_data_size, cm_id->device->name, in nvmet_rdma_find_get_device()
1217 nport->inline_data_size = inline_sge_count * PAGE_SIZE; in nvmet_rdma_find_get_device()
1220 ndev->inline_data_size = nport->inline_data_size; in nvmet_rdma_find_get_device()
1221 ndev->inline_page_count = inline_page_count; in nvmet_rdma_find_get_device()
1223 if (nport->pi_enable && !(cm_id->device->attrs.device_cap_flags & in nvmet_rdma_find_get_device()
1225 pr_warn("T10-PI is not supported by device %s. Disabling it\n", in nvmet_rdma_find_get_device()
1226 cm_id->device->name); in nvmet_rdma_find_get_device()
1227 nport->pi_enable = false; in nvmet_rdma_find_get_device()
1230 ndev->device = cm_id->device; in nvmet_rdma_find_get_device()
1231 kref_init(&ndev->ref); in nvmet_rdma_find_get_device()
1233 ndev->pd = ib_alloc_pd(ndev->device, 0); in nvmet_rdma_find_get_device()
1234 if (IS_ERR(ndev->pd)) in nvmet_rdma_find_get_device()
1243 list_add(&ndev->entry, &device_list); in nvmet_rdma_find_get_device()
1246 pr_debug("added %s.\n", ndev->device->name); in nvmet_rdma_find_get_device()
1250 ib_dealloc_pd(ndev->pd); in nvmet_rdma_find_get_device()
1261 struct nvmet_rdma_device *ndev = queue->dev; in nvmet_rdma_create_queue_ib()
1267 nr_cqe = queue->recv_queue_size + 2 * queue->send_queue_size; in nvmet_rdma_create_queue_ib()
1269 queue->cq = ib_cq_pool_get(ndev->device, nr_cqe + 1, in nvmet_rdma_create_queue_ib()
1270 queue->comp_vector, IB_POLL_WORKQUEUE); in nvmet_rdma_create_queue_ib()
1271 if (IS_ERR(queue->cq)) { in nvmet_rdma_create_queue_ib()
1272 ret = PTR_ERR(queue->cq); in nvmet_rdma_create_queue_ib()
1281 qp_attr.send_cq = queue->cq; in nvmet_rdma_create_queue_ib()
1282 qp_attr.recv_cq = queue->cq; in nvmet_rdma_create_queue_ib()
1286 qp_attr.cap.max_send_wr = queue->send_queue_size + 1; in nvmet_rdma_create_queue_ib()
1287 factor = rdma_rw_mr_factor(ndev->device, queue->cm_id->port_num, in nvmet_rdma_create_queue_ib()
1289 qp_attr.cap.max_rdma_ctxs = queue->send_queue_size * factor; in nvmet_rdma_create_queue_ib()
1290 qp_attr.cap.max_send_sge = max(ndev->device->attrs.max_sge_rd, in nvmet_rdma_create_queue_ib()
1291 ndev->device->attrs.max_send_sge); in nvmet_rdma_create_queue_ib()
1293 if (queue->nsrq) { in nvmet_rdma_create_queue_ib()
1294 qp_attr.srq = queue->nsrq->srq; in nvmet_rdma_create_queue_ib()
1297 qp_attr.cap.max_recv_wr = 1 + queue->recv_queue_size; in nvmet_rdma_create_queue_ib()
1298 qp_attr.cap.max_recv_sge = 1 + ndev->inline_page_count; in nvmet_rdma_create_queue_ib()
1301 if (queue->port->pi_enable && queue->host_qid) in nvmet_rdma_create_queue_ib()
1304 ret = rdma_create_qp(queue->cm_id, ndev->pd, &qp_attr); in nvmet_rdma_create_queue_ib()
1309 queue->qp = queue->cm_id->qp; in nvmet_rdma_create_queue_ib()
1311 atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr); in nvmet_rdma_create_queue_ib()
1314 __func__, queue->cq->cqe, qp_attr.cap.max_send_sge, in nvmet_rdma_create_queue_ib()
1315 qp_attr.cap.max_send_wr, queue->cm_id); in nvmet_rdma_create_queue_ib()
1317 if (!queue->nsrq) { in nvmet_rdma_create_queue_ib()
1318 for (i = 0; i < queue->recv_queue_size; i++) { in nvmet_rdma_create_queue_ib()
1319 queue->cmds[i].queue = queue; in nvmet_rdma_create_queue_ib()
1320 ret = nvmet_rdma_post_recv(ndev, &queue->cmds[i]); in nvmet_rdma_create_queue_ib()
1330 rdma_destroy_qp(queue->cm_id); in nvmet_rdma_create_queue_ib()
1332 ib_cq_pool_put(queue->cq, nr_cqe + 1); in nvmet_rdma_create_queue_ib()
1338 ib_drain_qp(queue->qp); in nvmet_rdma_destroy_queue_ib()
1339 if (queue->cm_id) in nvmet_rdma_destroy_queue_ib()
1340 rdma_destroy_id(queue->cm_id); in nvmet_rdma_destroy_queue_ib()
1341 ib_destroy_qp(queue->qp); in nvmet_rdma_destroy_queue_ib()
1342 ib_cq_pool_put(queue->cq, queue->recv_queue_size + 2 * in nvmet_rdma_destroy_queue_ib()
1343 queue->send_queue_size + 1); in nvmet_rdma_destroy_queue_ib()
1348 pr_debug("freeing queue %d\n", queue->idx); in nvmet_rdma_free_queue()
1350 nvmet_sq_destroy(&queue->nvme_sq); in nvmet_rdma_free_queue()
1353 if (!queue->nsrq) { in nvmet_rdma_free_queue()
1354 nvmet_rdma_free_cmds(queue->dev, queue->cmds, in nvmet_rdma_free_queue()
1355 queue->recv_queue_size, in nvmet_rdma_free_queue()
1356 !queue->host_qid); in nvmet_rdma_free_queue()
1359 ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); in nvmet_rdma_free_queue()
1367 struct nvmet_rdma_device *dev = queue->dev; in nvmet_rdma_release_queue_work()
1371 kref_put(&dev->ref, nvmet_rdma_free_dev); in nvmet_rdma_release_queue_work()
1380 req = (struct nvme_rdma_cm_req *)conn->private_data; in nvmet_rdma_parse_cm_connect_req()
1381 if (!req || conn->private_data_len == 0) in nvmet_rdma_parse_cm_connect_req()
1384 if (le16_to_cpu(req->recfmt) != NVME_RDMA_CM_FMT_1_0) in nvmet_rdma_parse_cm_connect_req()
1387 queue->host_qid = le16_to_cpu(req->qid); in nvmet_rdma_parse_cm_connect_req()
1390 * req->hsqsize corresponds to our recv queue size plus 1 in nvmet_rdma_parse_cm_connect_req()
1391 * req->hrqsize corresponds to our send queue size in nvmet_rdma_parse_cm_connect_req()
1393 queue->recv_queue_size = le16_to_cpu(req->hsqsize) + 1; in nvmet_rdma_parse_cm_connect_req()
1394 queue->send_queue_size = le16_to_cpu(req->hrqsize); in nvmet_rdma_parse_cm_connect_req()
1396 if (!queue->host_qid && queue->recv_queue_size > NVME_AQ_DEPTH) in nvmet_rdma_parse_cm_connect_req()
1424 struct nvmet_rdma_port *port = cm_id->context; in nvmet_rdma_alloc_queue()
1434 ret = nvmet_sq_init(&queue->nvme_sq); in nvmet_rdma_alloc_queue()
1440 ret = nvmet_rdma_parse_cm_connect_req(&event->param.conn, queue); in nvmet_rdma_alloc_queue()
1448 INIT_WORK(&queue->release_work, nvmet_rdma_release_queue_work); in nvmet_rdma_alloc_queue()
1449 queue->dev = ndev; in nvmet_rdma_alloc_queue()
1450 queue->cm_id = cm_id; in nvmet_rdma_alloc_queue()
1451 queue->port = port->nport; in nvmet_rdma_alloc_queue()
1453 spin_lock_init(&queue->state_lock); in nvmet_rdma_alloc_queue()
1454 queue->state = NVMET_RDMA_Q_CONNECTING; in nvmet_rdma_alloc_queue()
1455 INIT_LIST_HEAD(&queue->rsp_wait_list); in nvmet_rdma_alloc_queue()
1456 INIT_LIST_HEAD(&queue->rsp_wr_wait_list); in nvmet_rdma_alloc_queue()
1457 spin_lock_init(&queue->rsp_wr_wait_lock); in nvmet_rdma_alloc_queue()
1458 INIT_LIST_HEAD(&queue->free_rsps); in nvmet_rdma_alloc_queue()
1459 spin_lock_init(&queue->rsps_lock); in nvmet_rdma_alloc_queue()
1460 INIT_LIST_HEAD(&queue->queue_list); in nvmet_rdma_alloc_queue()
1462 queue->idx = ida_simple_get(&nvmet_rdma_queue_ida, 0, 0, GFP_KERNEL); in nvmet_rdma_alloc_queue()
1463 if (queue->idx < 0) { in nvmet_rdma_alloc_queue()
1472 queue->comp_vector = !queue->host_qid ? 0 : in nvmet_rdma_alloc_queue()
1473 queue->idx % ndev->device->num_comp_vectors; in nvmet_rdma_alloc_queue()
1482 if (ndev->srqs) { in nvmet_rdma_alloc_queue()
1483 queue->nsrq = ndev->srqs[queue->comp_vector % ndev->srq_count]; in nvmet_rdma_alloc_queue()
1485 queue->cmds = nvmet_rdma_alloc_cmds(ndev, in nvmet_rdma_alloc_queue()
1486 queue->recv_queue_size, in nvmet_rdma_alloc_queue()
1487 !queue->host_qid); in nvmet_rdma_alloc_queue()
1488 if (IS_ERR(queue->cmds)) { in nvmet_rdma_alloc_queue()
1505 if (!queue->nsrq) { in nvmet_rdma_alloc_queue()
1506 nvmet_rdma_free_cmds(queue->dev, queue->cmds, in nvmet_rdma_alloc_queue()
1507 queue->recv_queue_size, in nvmet_rdma_alloc_queue()
1508 !queue->host_qid); in nvmet_rdma_alloc_queue()
1513 ida_simple_remove(&nvmet_rdma_queue_ida, queue->idx); in nvmet_rdma_alloc_queue()
1515 nvmet_sq_destroy(&queue->nvme_sq); in nvmet_rdma_alloc_queue()
1527 switch (event->event) { in nvmet_rdma_qp_event()
1529 rdma_notify(queue->cm_id, event->event); in nvmet_rdma_qp_event()
1537 ib_event_msg(event->event), event->event); in nvmet_rdma_qp_event()
1548 int ret = -ENOMEM; in nvmet_rdma_cm_accept()
1552 param.initiator_depth = min_t(u8, p->initiator_depth, in nvmet_rdma_cm_accept()
1553 queue->dev->device->attrs.max_qp_init_rd_atom); in nvmet_rdma_cm_accept()
1557 priv.crqsize = cpu_to_le16(queue->recv_queue_size); in nvmet_rdma_cm_accept()
1571 int ret = -EINVAL; in nvmet_rdma_queue_connect()
1576 return -ECONNREFUSED; in nvmet_rdma_queue_connect()
1581 ret = -ENOMEM; in nvmet_rdma_queue_connect()
1585 if (queue->host_qid == 0) { in nvmet_rdma_queue_connect()
1590 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); in nvmet_rdma_queue_connect()
1594 * destroy the cm_id here with non-zero ret code. in nvmet_rdma_queue_connect()
1596 queue->cm_id = NULL; in nvmet_rdma_queue_connect()
1601 list_add_tail(&queue->queue_list, &nvmet_rdma_queue_list); in nvmet_rdma_queue_connect()
1609 kref_put(&ndev->ref, nvmet_rdma_free_dev); in nvmet_rdma_queue_connect()
1618 spin_lock_irqsave(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1619 if (queue->state != NVMET_RDMA_Q_CONNECTING) { in nvmet_rdma_queue_established()
1623 queue->state = NVMET_RDMA_Q_LIVE; in nvmet_rdma_queue_established()
1625 while (!list_empty(&queue->rsp_wait_list)) { in nvmet_rdma_queue_established()
1628 cmd = list_first_entry(&queue->rsp_wait_list, in nvmet_rdma_queue_established()
1630 list_del(&cmd->wait_list); in nvmet_rdma_queue_established()
1632 spin_unlock_irqrestore(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1634 spin_lock_irqsave(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1638 spin_unlock_irqrestore(&queue->state_lock, flags); in nvmet_rdma_queue_established()
1646 pr_debug("cm_id= %p queue->state= %d\n", queue->cm_id, queue->state); in __nvmet_rdma_queue_disconnect()
1648 spin_lock_irqsave(&queue->state_lock, flags); in __nvmet_rdma_queue_disconnect()
1649 switch (queue->state) { in __nvmet_rdma_queue_disconnect()
1651 while (!list_empty(&queue->rsp_wait_list)) { in __nvmet_rdma_queue_disconnect()
1654 rsp = list_first_entry(&queue->rsp_wait_list, in __nvmet_rdma_queue_disconnect()
1657 list_del(&rsp->wait_list); in __nvmet_rdma_queue_disconnect()
1662 queue->state = NVMET_RDMA_Q_DISCONNECTING; in __nvmet_rdma_queue_disconnect()
1668 spin_unlock_irqrestore(&queue->state_lock, flags); in __nvmet_rdma_queue_disconnect()
1671 rdma_disconnect(queue->cm_id); in __nvmet_rdma_queue_disconnect()
1672 schedule_work(&queue->release_work); in __nvmet_rdma_queue_disconnect()
1681 if (!list_empty(&queue->queue_list)) { in nvmet_rdma_queue_disconnect()
1682 list_del_init(&queue->queue_list); in nvmet_rdma_queue_disconnect()
1694 WARN_ON_ONCE(queue->state != NVMET_RDMA_Q_CONNECTING); in nvmet_rdma_queue_connect_fail()
1697 if (!list_empty(&queue->queue_list)) in nvmet_rdma_queue_connect_fail()
1698 list_del_init(&queue->queue_list); in nvmet_rdma_queue_connect_fail()
1701 pr_err("failed to connect queue %d\n", queue->idx); in nvmet_rdma_queue_connect_fail()
1702 schedule_work(&queue->release_work); in nvmet_rdma_queue_connect_fail()
1706 * nvme_rdma_device_removal() - Handle RDMA device removal
1718 * the cm_id implicitely by returning a non-zero rc to the callout.
1734 port = cm_id->context; in nvmet_rdma_device_removal()
1742 if (xchg(&port->cm_id, NULL) != cm_id) in nvmet_rdma_device_removal()
1758 if (cm_id->qp) in nvmet_rdma_cm_handler()
1759 queue = cm_id->qp->qp_context; in nvmet_rdma_cm_handler()
1762 rdma_event_msg(event->event), event->event, in nvmet_rdma_cm_handler()
1763 event->status, cm_id); in nvmet_rdma_cm_handler()
1765 switch (event->event) { in nvmet_rdma_cm_handler()
1774 struct nvmet_rdma_port *port = cm_id->context; in nvmet_rdma_cm_handler()
1776 schedule_delayed_work(&port->repair_work, 0); in nvmet_rdma_cm_handler()
1789 rdma_reject_msg(cm_id, event->status)); in nvmet_rdma_cm_handler()
1797 event->event); in nvmet_rdma_cm_handler()
1811 if (queue->nvme_sq.ctrl == ctrl) { in nvmet_rdma_delete_ctrl()
1812 list_del_init(&queue->queue_list); in nvmet_rdma_delete_ctrl()
1825 struct nvmet_port *nport = port->nport; in nvmet_rdma_destroy_port_queues()
1830 if (queue->port != nport) in nvmet_rdma_destroy_port_queues()
1833 list_del_init(&queue->queue_list); in nvmet_rdma_destroy_port_queues()
1841 struct rdma_cm_id *cm_id = xchg(&port->cm_id, NULL); in nvmet_rdma_disable_port()
1848 * controller yet. Do it here after the RDMA-CM was destroyed in nvmet_rdma_disable_port()
1849 * guarantees that no new queue will be created. in nvmet_rdma_disable_port()
1856 struct sockaddr *addr = (struct sockaddr *)&port->addr; in nvmet_rdma_enable_port()
1889 port->cm_id = cm_id; in nvmet_rdma_enable_port()
1906 schedule_delayed_work(&port->repair_work, 5 * HZ); in nvmet_rdma_repair_port_work()
1917 return -ENOMEM; in nvmet_rdma_add_port()
1919 nport->priv = port; in nvmet_rdma_add_port()
1920 port->nport = nport; in nvmet_rdma_add_port()
1921 INIT_DELAYED_WORK(&port->repair_work, nvmet_rdma_repair_port_work); in nvmet_rdma_add_port()
1923 switch (nport->disc_addr.adrfam) { in nvmet_rdma_add_port()
1932 nport->disc_addr.adrfam); in nvmet_rdma_add_port()
1933 ret = -EINVAL; in nvmet_rdma_add_port()
1937 if (nport->inline_data_size < 0) { in nvmet_rdma_add_port()
1938 nport->inline_data_size = NVMET_RDMA_DEFAULT_INLINE_DATA_SIZE; in nvmet_rdma_add_port()
1939 } else if (nport->inline_data_size > NVMET_RDMA_MAX_INLINE_DATA_SIZE) { in nvmet_rdma_add_port()
1941 nport->inline_data_size, in nvmet_rdma_add_port()
1943 nport->inline_data_size = NVMET_RDMA_MAX_INLINE_DATA_SIZE; in nvmet_rdma_add_port()
1946 ret = inet_pton_with_scope(&init_net, af, nport->disc_addr.traddr, in nvmet_rdma_add_port()
1947 nport->disc_addr.trsvcid, &port->addr); in nvmet_rdma_add_port()
1950 nport->disc_addr.traddr, nport->disc_addr.trsvcid); in nvmet_rdma_add_port()
1959 le16_to_cpu(nport->disc_addr.portid), in nvmet_rdma_add_port()
1960 (struct sockaddr *)&port->addr); in nvmet_rdma_add_port()
1971 struct nvmet_rdma_port *port = nport->priv; in nvmet_rdma_remove_port()
1973 cancel_delayed_work_sync(&port->repair_work); in nvmet_rdma_remove_port()
1981 struct nvmet_rdma_port *port = nport->priv; in nvmet_rdma_disc_port_addr()
1982 struct rdma_cm_id *cm_id = port->cm_id; in nvmet_rdma_disc_port_addr()
1984 if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) { in nvmet_rdma_disc_port_addr()
1987 struct rdma_cm_id *req_cm_id = rsp->queue->cm_id; in nvmet_rdma_disc_port_addr()
1988 struct sockaddr *addr = (void *)&req_cm_id->route.addr.src_addr; in nvmet_rdma_disc_port_addr()
1992 memcpy(traddr, nport->disc_addr.traddr, NVMF_TRADDR_SIZE); in nvmet_rdma_disc_port_addr()
1998 if (ctrl->pi_support) in nvmet_rdma_get_mdts()
2024 if (ndev->device == ib_device) { in nvmet_rdma_remove_one()
2041 if (queue->dev->device != ib_device) in nvmet_rdma_remove_one()
2044 pr_info("Removing queue %d\n", queue->idx); in nvmet_rdma_remove_one()
2045 list_del_init(&queue->queue_list); in nvmet_rdma_remove_one()
2089 MODULE_ALIAS("nvmet-transport-1"); /* 1 == NVMF_TRTYPE_RDMA */