Lines Matching +full:supports +full:- +full:cqe
1 // SPDX-License-Identifier: GPL-2.0
15 #include <linux/nvme-fc-driver.h>
16 #include <linux/nvme-fc.h>
65 struct list_head lsreq_list; /* rport->ls_req_list */
77 struct list_head lsrcv_list; /* rport->ls_rcv_list */
133 struct list_head endp_list; /* for lport->endp_list */
147 /* fc_ctrl flags values - specified as bit positions */
164 struct list_head ctrl_list; /* rport->ctrl_list */
229 * These items are short-term. They will eventually be moved into
236 /* *********************** FC-NVME Port Management ************************ */
251 WARN_ON(lport->localport.port_state != FC_OBJSTATE_DELETED); in nvme_fc_free_lport()
252 WARN_ON(!list_empty(&lport->endp_list)); in nvme_fc_free_lport()
256 list_del(&lport->port_list); in nvme_fc_free_lport()
261 ida_simple_remove(&nvme_fc_local_port_cnt, lport->localport.port_num); in nvme_fc_free_lport()
262 ida_destroy(&lport->endp_cnt); in nvme_fc_free_lport()
264 put_device(lport->dev); in nvme_fc_free_lport()
272 kref_put(&lport->ref, nvme_fc_free_lport); in nvme_fc_lport_put()
278 return kref_get_unless_zero(&lport->ref); in nvme_fc_lport_get()
293 if (lport->localport.node_name != pinfo->node_name || in nvme_fc_attach_to_unreg_lport()
294 lport->localport.port_name != pinfo->port_name) in nvme_fc_attach_to_unreg_lport()
297 if (lport->dev != dev) { in nvme_fc_attach_to_unreg_lport()
298 lport = ERR_PTR(-EXDEV); in nvme_fc_attach_to_unreg_lport()
302 if (lport->localport.port_state != FC_OBJSTATE_DELETED) { in nvme_fc_attach_to_unreg_lport()
303 lport = ERR_PTR(-EEXIST); in nvme_fc_attach_to_unreg_lport()
318 lport->ops = ops; in nvme_fc_attach_to_unreg_lport()
319 lport->localport.port_role = pinfo->port_role; in nvme_fc_attach_to_unreg_lport()
320 lport->localport.port_id = pinfo->port_id; in nvme_fc_attach_to_unreg_lport()
321 lport->localport.port_state = FC_OBJSTATE_ONLINE; in nvme_fc_attach_to_unreg_lport()
337 * nvme_fc_register_localport - transport entry point called by an
351 * (ex: -ENXIO) upon failure.
363 if (!template->localport_delete || !template->remoteport_delete || in nvme_fc_register_localport()
364 !template->ls_req || !template->fcp_io || in nvme_fc_register_localport()
365 !template->ls_abort || !template->fcp_abort || in nvme_fc_register_localport()
366 !template->max_hw_queues || !template->max_sgl_segments || in nvme_fc_register_localport()
367 !template->max_dif_sgl_segments || !template->dma_boundary) { in nvme_fc_register_localport()
368 ret = -EINVAL; in nvme_fc_register_localport()
376 * expired, we can simply re-enable the localport. Remoteports in nvme_fc_register_localport()
388 *portptr = &newrec->localport; in nvme_fc_register_localport()
392 /* nothing found - allocate a new localport struct */ in nvme_fc_register_localport()
394 newrec = kmalloc((sizeof(*newrec) + template->local_priv_sz), in nvme_fc_register_localport()
397 ret = -ENOMEM; in nvme_fc_register_localport()
403 ret = -ENOSPC; in nvme_fc_register_localport()
408 ret = -ENODEV; in nvme_fc_register_localport()
412 INIT_LIST_HEAD(&newrec->port_list); in nvme_fc_register_localport()
413 INIT_LIST_HEAD(&newrec->endp_list); in nvme_fc_register_localport()
414 kref_init(&newrec->ref); in nvme_fc_register_localport()
415 atomic_set(&newrec->act_rport_cnt, 0); in nvme_fc_register_localport()
416 newrec->ops = template; in nvme_fc_register_localport()
417 newrec->dev = dev; in nvme_fc_register_localport()
418 ida_init(&newrec->endp_cnt); in nvme_fc_register_localport()
419 if (template->local_priv_sz) in nvme_fc_register_localport()
420 newrec->localport.private = &newrec[1]; in nvme_fc_register_localport()
422 newrec->localport.private = NULL; in nvme_fc_register_localport()
423 newrec->localport.node_name = pinfo->node_name; in nvme_fc_register_localport()
424 newrec->localport.port_name = pinfo->port_name; in nvme_fc_register_localport()
425 newrec->localport.port_role = pinfo->port_role; in nvme_fc_register_localport()
426 newrec->localport.port_id = pinfo->port_id; in nvme_fc_register_localport()
427 newrec->localport.port_state = FC_OBJSTATE_ONLINE; in nvme_fc_register_localport()
428 newrec->localport.port_num = idx; in nvme_fc_register_localport()
431 list_add_tail(&newrec->port_list, &nvme_fc_lport_list); in nvme_fc_register_localport()
435 dma_set_seg_boundary(dev, template->dma_boundary); in nvme_fc_register_localport()
437 *portptr = &newrec->localport; in nvme_fc_register_localport()
452 * nvme_fc_unregister_localport - transport entry point called by an
459 * (ex: -ENXIO) upon failure.
468 return -EINVAL; in nvme_fc_unregister_localport()
472 if (portptr->port_state != FC_OBJSTATE_ONLINE) { in nvme_fc_unregister_localport()
474 return -EINVAL; in nvme_fc_unregister_localport()
476 portptr->port_state = FC_OBJSTATE_DELETED; in nvme_fc_unregister_localport()
480 if (atomic_read(&lport->act_rport_cnt) == 0) in nvme_fc_unregister_localport()
481 lport->ops->localport_delete(&lport->localport); in nvme_fc_unregister_localport()
490 * TRADDR strings, per FC-NVME are fixed format:
491 * "nn-0x<16hexdigits>:pn-0x<16hexdigits>" - 43 characters
494 * "NVMEFC_HOST_TRADDR=" or "NVMEFC_TRADDR=" - 19 max characters
507 if (!(rport->remoteport.port_role & FC_PORT_ROLE_NVME_DISCOVERY)) in nvme_fc_signal_discovery_scan()
511 "NVMEFC_HOST_TRADDR=nn-0x%016llx:pn-0x%016llx", in nvme_fc_signal_discovery_scan()
512 lport->localport.node_name, lport->localport.port_name); in nvme_fc_signal_discovery_scan()
514 "NVMEFC_TRADDR=nn-0x%016llx:pn-0x%016llx", in nvme_fc_signal_discovery_scan()
515 rport->remoteport.node_name, rport->remoteport.port_name); in nvme_fc_signal_discovery_scan()
516 kobject_uevent_env(&fc_udev_device->kobj, KOBJ_CHANGE, envp); in nvme_fc_signal_discovery_scan()
525 localport_to_lport(rport->remoteport.localport); in nvme_fc_free_rport()
528 WARN_ON(rport->remoteport.port_state != FC_OBJSTATE_DELETED); in nvme_fc_free_rport()
529 WARN_ON(!list_empty(&rport->ctrl_list)); in nvme_fc_free_rport()
533 list_del(&rport->endp_list); in nvme_fc_free_rport()
536 WARN_ON(!list_empty(&rport->disc_list)); in nvme_fc_free_rport()
537 ida_simple_remove(&lport->endp_cnt, rport->remoteport.port_num); in nvme_fc_free_rport()
547 kref_put(&rport->ref, nvme_fc_free_rport); in nvme_fc_rport_put()
553 return kref_get_unless_zero(&rport->ref); in nvme_fc_rport_get()
559 switch (ctrl->ctrl.state) { in nvme_fc_resume_controller()
566 dev_info(ctrl->ctrl.device, in nvme_fc_resume_controller()
567 "NVME-FC{%d}: connectivity re-established. " in nvme_fc_resume_controller()
568 "Attempting reconnect\n", ctrl->cnum); in nvme_fc_resume_controller()
570 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0); in nvme_fc_resume_controller()
582 /* no action to take - let it delete */ in nvme_fc_resume_controller()
597 list_for_each_entry(rport, &lport->endp_list, endp_list) { in nvme_fc_attach_to_suspended_rport()
598 if (rport->remoteport.node_name != pinfo->node_name || in nvme_fc_attach_to_suspended_rport()
599 rport->remoteport.port_name != pinfo->port_name) in nvme_fc_attach_to_suspended_rport()
603 rport = ERR_PTR(-ENOLCK); in nvme_fc_attach_to_suspended_rport()
609 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_attach_to_suspended_rport()
612 if (rport->remoteport.port_state != FC_OBJSTATE_DELETED) { in nvme_fc_attach_to_suspended_rport()
614 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_attach_to_suspended_rport()
616 return ERR_PTR(-ESTALE); in nvme_fc_attach_to_suspended_rport()
619 rport->remoteport.port_role = pinfo->port_role; in nvme_fc_attach_to_suspended_rport()
620 rport->remoteport.port_id = pinfo->port_id; in nvme_fc_attach_to_suspended_rport()
621 rport->remoteport.port_state = FC_OBJSTATE_ONLINE; in nvme_fc_attach_to_suspended_rport()
622 rport->dev_loss_end = 0; in nvme_fc_attach_to_suspended_rport()
628 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) in nvme_fc_attach_to_suspended_rport()
631 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_attach_to_suspended_rport()
648 if (pinfo->dev_loss_tmo) in __nvme_fc_set_dev_loss_tmo()
649 rport->remoteport.dev_loss_tmo = pinfo->dev_loss_tmo; in __nvme_fc_set_dev_loss_tmo()
651 rport->remoteport.dev_loss_tmo = NVME_FC_DEFAULT_DEV_LOSS_TMO; in __nvme_fc_set_dev_loss_tmo()
655 * nvme_fc_register_remoteport - transport entry point called by an
668 * (ex: -ENXIO) upon failure.
681 ret = -ESHUTDOWN; in nvme_fc_register_remoteport()
702 *portptr = &newrec->remoteport; in nvme_fc_register_remoteport()
706 /* nothing found - allocate a new remoteport struct */ in nvme_fc_register_remoteport()
708 newrec = kmalloc((sizeof(*newrec) + lport->ops->remote_priv_sz), in nvme_fc_register_remoteport()
711 ret = -ENOMEM; in nvme_fc_register_remoteport()
715 idx = ida_simple_get(&lport->endp_cnt, 0, 0, GFP_KERNEL); in nvme_fc_register_remoteport()
717 ret = -ENOSPC; in nvme_fc_register_remoteport()
721 INIT_LIST_HEAD(&newrec->endp_list); in nvme_fc_register_remoteport()
722 INIT_LIST_HEAD(&newrec->ctrl_list); in nvme_fc_register_remoteport()
723 INIT_LIST_HEAD(&newrec->ls_req_list); in nvme_fc_register_remoteport()
724 INIT_LIST_HEAD(&newrec->disc_list); in nvme_fc_register_remoteport()
725 kref_init(&newrec->ref); in nvme_fc_register_remoteport()
726 atomic_set(&newrec->act_ctrl_cnt, 0); in nvme_fc_register_remoteport()
727 spin_lock_init(&newrec->lock); in nvme_fc_register_remoteport()
728 newrec->remoteport.localport = &lport->localport; in nvme_fc_register_remoteport()
729 INIT_LIST_HEAD(&newrec->ls_rcv_list); in nvme_fc_register_remoteport()
730 newrec->dev = lport->dev; in nvme_fc_register_remoteport()
731 newrec->lport = lport; in nvme_fc_register_remoteport()
732 if (lport->ops->remote_priv_sz) in nvme_fc_register_remoteport()
733 newrec->remoteport.private = &newrec[1]; in nvme_fc_register_remoteport()
735 newrec->remoteport.private = NULL; in nvme_fc_register_remoteport()
736 newrec->remoteport.port_role = pinfo->port_role; in nvme_fc_register_remoteport()
737 newrec->remoteport.node_name = pinfo->node_name; in nvme_fc_register_remoteport()
738 newrec->remoteport.port_name = pinfo->port_name; in nvme_fc_register_remoteport()
739 newrec->remoteport.port_id = pinfo->port_id; in nvme_fc_register_remoteport()
740 newrec->remoteport.port_state = FC_OBJSTATE_ONLINE; in nvme_fc_register_remoteport()
741 newrec->remoteport.port_num = idx; in nvme_fc_register_remoteport()
743 INIT_WORK(&newrec->lsrcv_work, nvme_fc_handle_ls_rqst_work); in nvme_fc_register_remoteport()
746 list_add_tail(&newrec->endp_list, &lport->endp_list); in nvme_fc_register_remoteport()
751 *portptr = &newrec->remoteport; in nvme_fc_register_remoteport()
771 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_abort_lsops()
773 list_for_each_entry(lsop, &rport->ls_req_list, lsreq_list) { in nvme_fc_abort_lsops()
774 if (!(lsop->flags & FCOP_FLAGS_TERMIO)) { in nvme_fc_abort_lsops()
775 lsop->flags |= FCOP_FLAGS_TERMIO; in nvme_fc_abort_lsops()
776 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_abort_lsops()
777 rport->lport->ops->ls_abort(&rport->lport->localport, in nvme_fc_abort_lsops()
778 &rport->remoteport, in nvme_fc_abort_lsops()
779 &lsop->ls_req); in nvme_fc_abort_lsops()
783 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_abort_lsops()
791 dev_info(ctrl->ctrl.device, in nvme_fc_ctrl_connectivity_loss()
792 "NVME-FC{%d}: controller connectivity lost. Awaiting " in nvme_fc_ctrl_connectivity_loss()
793 "Reconnect", ctrl->cnum); in nvme_fc_ctrl_connectivity_loss()
795 switch (ctrl->ctrl.state) { in nvme_fc_ctrl_connectivity_loss()
805 if (nvme_reset_ctrl(&ctrl->ctrl)) { in nvme_fc_ctrl_connectivity_loss()
806 dev_warn(ctrl->ctrl.device, in nvme_fc_ctrl_connectivity_loss()
807 "NVME-FC{%d}: Couldn't schedule reset.\n", in nvme_fc_ctrl_connectivity_loss()
808 ctrl->cnum); in nvme_fc_ctrl_connectivity_loss()
809 nvme_delete_ctrl(&ctrl->ctrl); in nvme_fc_ctrl_connectivity_loss()
835 /* no action to take - let it delete */ in nvme_fc_ctrl_connectivity_loss()
841 * nvme_fc_unregister_remoteport - transport entry point called by an
849 * (ex: -ENXIO) upon failure.
859 return -EINVAL; in nvme_fc_unregister_remoteport()
861 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_unregister_remoteport()
863 if (portptr->port_state != FC_OBJSTATE_ONLINE) { in nvme_fc_unregister_remoteport()
864 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_unregister_remoteport()
865 return -EINVAL; in nvme_fc_unregister_remoteport()
867 portptr->port_state = FC_OBJSTATE_DELETED; in nvme_fc_unregister_remoteport()
869 rport->dev_loss_end = jiffies + (portptr->dev_loss_tmo * HZ); in nvme_fc_unregister_remoteport()
871 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { in nvme_fc_unregister_remoteport()
873 if (!portptr->dev_loss_tmo) { in nvme_fc_unregister_remoteport()
874 dev_warn(ctrl->ctrl.device, in nvme_fc_unregister_remoteport()
875 "NVME-FC{%d}: controller connectivity lost.\n", in nvme_fc_unregister_remoteport()
876 ctrl->cnum); in nvme_fc_unregister_remoteport()
877 nvme_delete_ctrl(&ctrl->ctrl); in nvme_fc_unregister_remoteport()
882 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_unregister_remoteport()
886 if (atomic_read(&rport->act_ctrl_cnt) == 0) in nvme_fc_unregister_remoteport()
887 rport->lport->ops->remoteport_delete(portptr); in nvme_fc_unregister_remoteport()
901 * nvme_fc_rescan_remoteport - transport entry point called by an
913 nvme_fc_signal_discovery_scan(rport->lport, rport); in nvme_fc_rescan_remoteport()
924 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_set_remoteport_devloss()
926 if (portptr->port_state != FC_OBJSTATE_ONLINE) { in nvme_fc_set_remoteport_devloss()
927 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_set_remoteport_devloss()
928 return -EINVAL; in nvme_fc_set_remoteport_devloss()
932 rport->remoteport.dev_loss_tmo = dev_loss_tmo; in nvme_fc_set_remoteport_devloss()
934 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_set_remoteport_devloss()
941 /* *********************** FC-NVME DMA Handling **************************** */
1006 s->dma_address = 0L; in fc_map_sg()
1008 s->dma_length = s->length; in fc_map_sg()
1029 /* *********************** FC-NVME LS Handling **************************** */
1039 struct nvme_fc_rport *rport = lsop->rport; in __nvme_fc_finish_ls_req()
1040 struct nvmefc_ls_req *lsreq = &lsop->ls_req; in __nvme_fc_finish_ls_req()
1043 spin_lock_irqsave(&rport->lock, flags); in __nvme_fc_finish_ls_req()
1045 if (!lsop->req_queued) { in __nvme_fc_finish_ls_req()
1046 spin_unlock_irqrestore(&rport->lock, flags); in __nvme_fc_finish_ls_req()
1050 list_del(&lsop->lsreq_list); in __nvme_fc_finish_ls_req()
1052 lsop->req_queued = false; in __nvme_fc_finish_ls_req()
1054 spin_unlock_irqrestore(&rport->lock, flags); in __nvme_fc_finish_ls_req()
1056 fc_dma_unmap_single(rport->dev, lsreq->rqstdma, in __nvme_fc_finish_ls_req()
1057 (lsreq->rqstlen + lsreq->rsplen), in __nvme_fc_finish_ls_req()
1068 struct nvmefc_ls_req *lsreq = &lsop->ls_req; in __nvme_fc_send_ls_req()
1072 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) in __nvme_fc_send_ls_req()
1073 return -ECONNREFUSED; in __nvme_fc_send_ls_req()
1076 return -ESHUTDOWN; in __nvme_fc_send_ls_req()
1078 lsreq->done = done; in __nvme_fc_send_ls_req()
1079 lsop->rport = rport; in __nvme_fc_send_ls_req()
1080 lsop->req_queued = false; in __nvme_fc_send_ls_req()
1081 INIT_LIST_HEAD(&lsop->lsreq_list); in __nvme_fc_send_ls_req()
1082 init_completion(&lsop->ls_done); in __nvme_fc_send_ls_req()
1084 lsreq->rqstdma = fc_dma_map_single(rport->dev, lsreq->rqstaddr, in __nvme_fc_send_ls_req()
1085 lsreq->rqstlen + lsreq->rsplen, in __nvme_fc_send_ls_req()
1087 if (fc_dma_mapping_error(rport->dev, lsreq->rqstdma)) { in __nvme_fc_send_ls_req()
1088 ret = -EFAULT; in __nvme_fc_send_ls_req()
1091 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; in __nvme_fc_send_ls_req()
1093 spin_lock_irqsave(&rport->lock, flags); in __nvme_fc_send_ls_req()
1095 list_add_tail(&lsop->lsreq_list, &rport->ls_req_list); in __nvme_fc_send_ls_req()
1097 lsop->req_queued = true; in __nvme_fc_send_ls_req()
1099 spin_unlock_irqrestore(&rport->lock, flags); in __nvme_fc_send_ls_req()
1101 ret = rport->lport->ops->ls_req(&rport->lport->localport, in __nvme_fc_send_ls_req()
1102 &rport->remoteport, lsreq); in __nvme_fc_send_ls_req()
1109 lsop->ls_error = ret; in __nvme_fc_send_ls_req()
1110 spin_lock_irqsave(&rport->lock, flags); in __nvme_fc_send_ls_req()
1111 lsop->req_queued = false; in __nvme_fc_send_ls_req()
1112 list_del(&lsop->lsreq_list); in __nvme_fc_send_ls_req()
1113 spin_unlock_irqrestore(&rport->lock, flags); in __nvme_fc_send_ls_req()
1114 fc_dma_unmap_single(rport->dev, lsreq->rqstdma, in __nvme_fc_send_ls_req()
1115 (lsreq->rqstlen + lsreq->rsplen), in __nvme_fc_send_ls_req()
1128 lsop->ls_error = status; in nvme_fc_send_ls_req_done()
1129 complete(&lsop->ls_done); in nvme_fc_send_ls_req_done()
1135 struct nvmefc_ls_req *lsreq = &lsop->ls_req; in nvme_fc_send_ls_req()
1136 struct fcnvme_ls_rjt *rjt = lsreq->rspaddr; in nvme_fc_send_ls_req()
1148 wait_for_completion(&lsop->ls_done); in nvme_fc_send_ls_req()
1152 ret = lsop->ls_error; in nvme_fc_send_ls_req()
1159 if (rjt->w0.ls_cmd == FCNVME_LS_RJT) in nvme_fc_send_ls_req()
1160 return -ENXIO; in nvme_fc_send_ls_req()
1188 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); in nvme_fc_connect_admin_queue()
1190 dev_info(ctrl->ctrl.device, in nvme_fc_connect_admin_queue()
1191 "NVME-FC{%d}: send Create Association failed: ENOMEM\n", in nvme_fc_connect_admin_queue()
1192 ctrl->cnum); in nvme_fc_connect_admin_queue()
1193 ret = -ENOMEM; in nvme_fc_connect_admin_queue()
1199 lsreq = &lsop->ls_req; in nvme_fc_connect_admin_queue()
1200 if (ctrl->lport->ops->lsrqst_priv_sz) in nvme_fc_connect_admin_queue()
1201 lsreq->private = &assoc_acc[1]; in nvme_fc_connect_admin_queue()
1203 lsreq->private = NULL; in nvme_fc_connect_admin_queue()
1205 assoc_rqst->w0.ls_cmd = FCNVME_LS_CREATE_ASSOCIATION; in nvme_fc_connect_admin_queue()
1206 assoc_rqst->desc_list_len = in nvme_fc_connect_admin_queue()
1209 assoc_rqst->assoc_cmd.desc_tag = in nvme_fc_connect_admin_queue()
1211 assoc_rqst->assoc_cmd.desc_len = in nvme_fc_connect_admin_queue()
1215 assoc_rqst->assoc_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); in nvme_fc_connect_admin_queue()
1216 assoc_rqst->assoc_cmd.sqsize = cpu_to_be16(qsize - 1); in nvme_fc_connect_admin_queue()
1217 /* Linux supports only Dynamic controllers */ in nvme_fc_connect_admin_queue()
1218 assoc_rqst->assoc_cmd.cntlid = cpu_to_be16(0xffff); in nvme_fc_connect_admin_queue()
1219 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); in nvme_fc_connect_admin_queue()
1220 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn, in nvme_fc_connect_admin_queue()
1222 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn, in nvme_fc_connect_admin_queue()
1225 lsop->queue = queue; in nvme_fc_connect_admin_queue()
1226 lsreq->rqstaddr = assoc_rqst; in nvme_fc_connect_admin_queue()
1227 lsreq->rqstlen = sizeof(*assoc_rqst); in nvme_fc_connect_admin_queue()
1228 lsreq->rspaddr = assoc_acc; in nvme_fc_connect_admin_queue()
1229 lsreq->rsplen = sizeof(*assoc_acc); in nvme_fc_connect_admin_queue()
1230 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; in nvme_fc_connect_admin_queue()
1232 ret = nvme_fc_send_ls_req(ctrl->rport, lsop); in nvme_fc_connect_admin_queue()
1239 if (assoc_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) in nvme_fc_connect_admin_queue()
1241 else if (assoc_acc->hdr.desc_list_len != in nvme_fc_connect_admin_queue()
1245 else if (assoc_acc->hdr.rqst.desc_tag != in nvme_fc_connect_admin_queue()
1248 else if (assoc_acc->hdr.rqst.desc_len != in nvme_fc_connect_admin_queue()
1251 else if (assoc_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_ASSOCIATION) in nvme_fc_connect_admin_queue()
1253 else if (assoc_acc->associd.desc_tag != in nvme_fc_connect_admin_queue()
1256 else if (assoc_acc->associd.desc_len != in nvme_fc_connect_admin_queue()
1260 else if (assoc_acc->connectid.desc_tag != in nvme_fc_connect_admin_queue()
1263 else if (assoc_acc->connectid.desc_len != in nvme_fc_connect_admin_queue()
1268 ret = -EBADF; in nvme_fc_connect_admin_queue()
1269 dev_err(ctrl->dev, in nvme_fc_connect_admin_queue()
1271 queue->qnum, validation_errors[fcret]); in nvme_fc_connect_admin_queue()
1273 spin_lock_irqsave(&ctrl->lock, flags); in nvme_fc_connect_admin_queue()
1274 ctrl->association_id = in nvme_fc_connect_admin_queue()
1275 be64_to_cpu(assoc_acc->associd.association_id); in nvme_fc_connect_admin_queue()
1276 queue->connection_id = in nvme_fc_connect_admin_queue()
1277 be64_to_cpu(assoc_acc->connectid.connection_id); in nvme_fc_connect_admin_queue()
1278 set_bit(NVME_FC_Q_CONNECTED, &queue->flags); in nvme_fc_connect_admin_queue()
1279 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_connect_admin_queue()
1286 dev_err(ctrl->dev, in nvme_fc_connect_admin_queue()
1288 queue->qnum, ret); in nvme_fc_connect_admin_queue()
1304 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); in nvme_fc_connect_queue()
1306 dev_info(ctrl->ctrl.device, in nvme_fc_connect_queue()
1307 "NVME-FC{%d}: send Create Connection failed: ENOMEM\n", in nvme_fc_connect_queue()
1308 ctrl->cnum); in nvme_fc_connect_queue()
1309 ret = -ENOMEM; in nvme_fc_connect_queue()
1315 lsreq = &lsop->ls_req; in nvme_fc_connect_queue()
1316 if (ctrl->lport->ops->lsrqst_priv_sz) in nvme_fc_connect_queue()
1317 lsreq->private = (void *)&conn_acc[1]; in nvme_fc_connect_queue()
1319 lsreq->private = NULL; in nvme_fc_connect_queue()
1321 conn_rqst->w0.ls_cmd = FCNVME_LS_CREATE_CONNECTION; in nvme_fc_connect_queue()
1322 conn_rqst->desc_list_len = cpu_to_be32( in nvme_fc_connect_queue()
1326 conn_rqst->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); in nvme_fc_connect_queue()
1327 conn_rqst->associd.desc_len = in nvme_fc_connect_queue()
1330 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id); in nvme_fc_connect_queue()
1331 conn_rqst->connect_cmd.desc_tag = in nvme_fc_connect_queue()
1333 conn_rqst->connect_cmd.desc_len = in nvme_fc_connect_queue()
1336 conn_rqst->connect_cmd.ersp_ratio = cpu_to_be16(ersp_ratio); in nvme_fc_connect_queue()
1337 conn_rqst->connect_cmd.qid = cpu_to_be16(queue->qnum); in nvme_fc_connect_queue()
1338 conn_rqst->connect_cmd.sqsize = cpu_to_be16(qsize - 1); in nvme_fc_connect_queue()
1340 lsop->queue = queue; in nvme_fc_connect_queue()
1341 lsreq->rqstaddr = conn_rqst; in nvme_fc_connect_queue()
1342 lsreq->rqstlen = sizeof(*conn_rqst); in nvme_fc_connect_queue()
1343 lsreq->rspaddr = conn_acc; in nvme_fc_connect_queue()
1344 lsreq->rsplen = sizeof(*conn_acc); in nvme_fc_connect_queue()
1345 lsreq->timeout = NVME_FC_LS_TIMEOUT_SEC; in nvme_fc_connect_queue()
1347 ret = nvme_fc_send_ls_req(ctrl->rport, lsop); in nvme_fc_connect_queue()
1354 if (conn_acc->hdr.w0.ls_cmd != FCNVME_LS_ACC) in nvme_fc_connect_queue()
1356 else if (conn_acc->hdr.desc_list_len != in nvme_fc_connect_queue()
1359 else if (conn_acc->hdr.rqst.desc_tag != cpu_to_be32(FCNVME_LSDESC_RQST)) in nvme_fc_connect_queue()
1361 else if (conn_acc->hdr.rqst.desc_len != in nvme_fc_connect_queue()
1364 else if (conn_acc->hdr.rqst.w0.ls_cmd != FCNVME_LS_CREATE_CONNECTION) in nvme_fc_connect_queue()
1366 else if (conn_acc->connectid.desc_tag != in nvme_fc_connect_queue()
1369 else if (conn_acc->connectid.desc_len != in nvme_fc_connect_queue()
1374 ret = -EBADF; in nvme_fc_connect_queue()
1375 dev_err(ctrl->dev, in nvme_fc_connect_queue()
1377 queue->qnum, validation_errors[fcret]); in nvme_fc_connect_queue()
1379 queue->connection_id = in nvme_fc_connect_queue()
1380 be64_to_cpu(conn_acc->connectid.connection_id); in nvme_fc_connect_queue()
1381 set_bit(NVME_FC_Q_CONNECTED, &queue->flags); in nvme_fc_connect_queue()
1388 dev_err(ctrl->dev, in nvme_fc_connect_queue()
1390 queue->qnum, ret); in nvme_fc_connect_queue()
1401 /* fc-nvme initiator doesn't care about success or failure of cmd */ in nvme_fc_disconnect_assoc_done()
1407 * This routine sends a FC-NVME LS to disconnect (aka terminate)
1408 * the FC-NVME Association. Terminating the association also
1409 * terminates the FC-NVME connections (per queue, both admin and io
1411 * down, and the related FC-NVME Association ID and Connection IDs
1414 * The behavior of the fc-nvme initiator is such that it's
1417 * connectivity with the fc-nvme target, so you may never get a
1420 * continue on with terminating the association. If the fc-nvme target
1434 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); in nvme_fc_xmt_disconnect_assoc()
1436 dev_info(ctrl->ctrl.device, in nvme_fc_xmt_disconnect_assoc()
1437 "NVME-FC{%d}: send Disconnect Association " in nvme_fc_xmt_disconnect_assoc()
1439 ctrl->cnum); in nvme_fc_xmt_disconnect_assoc()
1445 lsreq = &lsop->ls_req; in nvme_fc_xmt_disconnect_assoc()
1446 if (ctrl->lport->ops->lsrqst_priv_sz) in nvme_fc_xmt_disconnect_assoc()
1447 lsreq->private = (void *)&discon_acc[1]; in nvme_fc_xmt_disconnect_assoc()
1449 lsreq->private = NULL; in nvme_fc_xmt_disconnect_assoc()
1452 ctrl->association_id); in nvme_fc_xmt_disconnect_assoc()
1454 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop, in nvme_fc_xmt_disconnect_assoc()
1463 struct nvmefc_ls_rcv_op *lsop = lsrsp->nvme_fc_private; in nvme_fc_xmt_ls_rsp_done()
1464 struct nvme_fc_rport *rport = lsop->rport; in nvme_fc_xmt_ls_rsp_done()
1465 struct nvme_fc_lport *lport = rport->lport; in nvme_fc_xmt_ls_rsp_done()
1468 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_xmt_ls_rsp_done()
1469 list_del(&lsop->lsrcv_list); in nvme_fc_xmt_ls_rsp_done()
1470 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_xmt_ls_rsp_done()
1472 fc_dma_sync_single_for_cpu(lport->dev, lsop->rspdma, in nvme_fc_xmt_ls_rsp_done()
1473 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); in nvme_fc_xmt_ls_rsp_done()
1474 fc_dma_unmap_single(lport->dev, lsop->rspdma, in nvme_fc_xmt_ls_rsp_done()
1475 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); in nvme_fc_xmt_ls_rsp_done()
1485 struct nvme_fc_rport *rport = lsop->rport; in nvme_fc_xmt_ls_rsp()
1486 struct nvme_fc_lport *lport = rport->lport; in nvme_fc_xmt_ls_rsp()
1487 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0; in nvme_fc_xmt_ls_rsp()
1490 fc_dma_sync_single_for_device(lport->dev, lsop->rspdma, in nvme_fc_xmt_ls_rsp()
1491 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); in nvme_fc_xmt_ls_rsp()
1493 ret = lport->ops->xmt_ls_rsp(&lport->localport, &rport->remoteport, in nvme_fc_xmt_ls_rsp()
1494 lsop->lsrsp); in nvme_fc_xmt_ls_rsp()
1496 dev_warn(lport->dev, in nvme_fc_xmt_ls_rsp()
1498 w0->ls_cmd, ret); in nvme_fc_xmt_ls_rsp()
1499 nvme_fc_xmt_ls_rsp_done(lsop->lsrsp); in nvme_fc_xmt_ls_rsp()
1509 &lsop->rqstbuf->rq_dis_assoc; in nvme_fc_match_disconn_ls()
1512 u64 association_id = be64_to_cpu(rqst->associd.association_id); in nvme_fc_match_disconn_ls()
1515 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_match_disconn_ls()
1517 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { in nvme_fc_match_disconn_ls()
1520 spin_lock(&ctrl->lock); in nvme_fc_match_disconn_ls()
1521 if (association_id == ctrl->association_id) { in nvme_fc_match_disconn_ls()
1522 oldls = ctrl->rcv_disconn; in nvme_fc_match_disconn_ls()
1523 ctrl->rcv_disconn = lsop; in nvme_fc_match_disconn_ls()
1526 spin_unlock(&ctrl->lock); in nvme_fc_match_disconn_ls()
1533 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_match_disconn_ls()
1537 dev_info(rport->lport->dev, in nvme_fc_match_disconn_ls()
1538 "NVME-FC{%d}: Multiple Disconnect Association " in nvme_fc_match_disconn_ls()
1539 "LS's received\n", ctrl->cnum); in nvme_fc_match_disconn_ls()
1541 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, in nvme_fc_match_disconn_ls()
1542 sizeof(*oldls->rspbuf), in nvme_fc_match_disconn_ls()
1543 rqst->w0.ls_cmd, in nvme_fc_match_disconn_ls()
1560 struct nvme_fc_rport *rport = lsop->rport; in nvme_fc_ls_disconnect_assoc()
1562 &lsop->rqstbuf->rq_dis_assoc; in nvme_fc_ls_disconnect_assoc()
1564 &lsop->rspbuf->rsp_dis_assoc; in nvme_fc_ls_disconnect_assoc()
1570 ret = nvmefc_vldt_lsreq_discon_assoc(lsop->rqstdatalen, rqst); in nvme_fc_ls_disconnect_assoc()
1579 dev_info(rport->lport->dev, in nvme_fc_ls_disconnect_assoc()
1582 lsop->lsrsp->rsplen = nvme_fc_format_rjt(acc, in nvme_fc_ls_disconnect_assoc()
1583 sizeof(*acc), rqst->w0.ls_cmd, in nvme_fc_ls_disconnect_assoc()
1593 lsop->lsrsp->rsplen = sizeof(*acc); in nvme_fc_ls_disconnect_assoc()
1616 * Actual Processing routine for received FC-NVME LS Requests from the LLD
1623 struct fcnvme_ls_rqst_w0 *w0 = &lsop->rqstbuf->w0; in nvme_fc_handle_ls_rqst()
1626 lsop->lsrsp->nvme_fc_private = lsop; in nvme_fc_handle_ls_rqst()
1627 lsop->lsrsp->rspbuf = lsop->rspbuf; in nvme_fc_handle_ls_rqst()
1628 lsop->lsrsp->rspdma = lsop->rspdma; in nvme_fc_handle_ls_rqst()
1629 lsop->lsrsp->done = nvme_fc_xmt_ls_rsp_done; in nvme_fc_handle_ls_rqst()
1631 lsop->lsrsp->rsplen = 0; in nvme_fc_handle_ls_rqst()
1638 switch (w0->ls_cmd) { in nvme_fc_handle_ls_rqst()
1643 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, in nvme_fc_handle_ls_rqst()
1644 sizeof(*lsop->rspbuf), w0->ls_cmd, in nvme_fc_handle_ls_rqst()
1649 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, in nvme_fc_handle_ls_rqst()
1650 sizeof(*lsop->rspbuf), w0->ls_cmd, in nvme_fc_handle_ls_rqst()
1654 lsop->lsrsp->rsplen = nvme_fc_format_rjt(lsop->rspbuf, in nvme_fc_handle_ls_rqst()
1655 sizeof(*lsop->rspbuf), w0->ls_cmd, in nvme_fc_handle_ls_rqst()
1675 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_handle_ls_rqst_work()
1676 list_for_each_entry(lsop, &rport->ls_rcv_list, lsrcv_list) { in nvme_fc_handle_ls_rqst_work()
1677 if (lsop->handled) in nvme_fc_handle_ls_rqst_work()
1680 lsop->handled = true; in nvme_fc_handle_ls_rqst_work()
1681 if (rport->remoteport.port_state == FC_OBJSTATE_ONLINE) { in nvme_fc_handle_ls_rqst_work()
1682 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_handle_ls_rqst_work()
1685 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_handle_ls_rqst_work()
1686 w0 = &lsop->rqstbuf->w0; in nvme_fc_handle_ls_rqst_work()
1687 lsop->lsrsp->rsplen = nvme_fc_format_rjt( in nvme_fc_handle_ls_rqst_work()
1688 lsop->rspbuf, in nvme_fc_handle_ls_rqst_work()
1689 sizeof(*lsop->rspbuf), in nvme_fc_handle_ls_rqst_work()
1690 w0->ls_cmd, in nvme_fc_handle_ls_rqst_work()
1698 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_handle_ls_rqst_work()
1702 * nvme_fc_rcv_ls_req - transport entry point called by an LLDD
1705 * The nvme-fc layer will copy payload to an internal structure for
1726 struct nvme_fc_lport *lport = rport->lport; in nvme_fc_rcv_ls_req()
1735 if (!lport->ops->xmt_ls_rsp) { in nvme_fc_rcv_ls_req()
1736 dev_info(lport->dev, in nvme_fc_rcv_ls_req()
1738 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvme_fc_rcv_ls_req()
1739 nvmefc_ls_names[w0->ls_cmd] : ""); in nvme_fc_rcv_ls_req()
1740 ret = -EINVAL; in nvme_fc_rcv_ls_req()
1745 dev_info(lport->dev, in nvme_fc_rcv_ls_req()
1747 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvme_fc_rcv_ls_req()
1748 nvmefc_ls_names[w0->ls_cmd] : ""); in nvme_fc_rcv_ls_req()
1749 ret = -E2BIG; in nvme_fc_rcv_ls_req()
1758 dev_info(lport->dev, in nvme_fc_rcv_ls_req()
1760 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvme_fc_rcv_ls_req()
1761 nvmefc_ls_names[w0->ls_cmd] : ""); in nvme_fc_rcv_ls_req()
1762 ret = -ENOMEM; in nvme_fc_rcv_ls_req()
1765 lsop->rqstbuf = (union nvmefc_ls_requests *)&lsop[1]; in nvme_fc_rcv_ls_req()
1766 lsop->rspbuf = (union nvmefc_ls_responses *)&lsop->rqstbuf[1]; in nvme_fc_rcv_ls_req()
1768 lsop->rspdma = fc_dma_map_single(lport->dev, lsop->rspbuf, in nvme_fc_rcv_ls_req()
1769 sizeof(*lsop->rspbuf), in nvme_fc_rcv_ls_req()
1771 if (fc_dma_mapping_error(lport->dev, lsop->rspdma)) { in nvme_fc_rcv_ls_req()
1772 dev_info(lport->dev, in nvme_fc_rcv_ls_req()
1774 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvme_fc_rcv_ls_req()
1775 nvmefc_ls_names[w0->ls_cmd] : ""); in nvme_fc_rcv_ls_req()
1776 ret = -EFAULT; in nvme_fc_rcv_ls_req()
1780 lsop->rport = rport; in nvme_fc_rcv_ls_req()
1781 lsop->lsrsp = lsrsp; in nvme_fc_rcv_ls_req()
1783 memcpy(lsop->rqstbuf, lsreqbuf, lsreqbuf_len); in nvme_fc_rcv_ls_req()
1784 lsop->rqstdatalen = lsreqbuf_len; in nvme_fc_rcv_ls_req()
1786 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_rcv_ls_req()
1787 if (rport->remoteport.port_state != FC_OBJSTATE_ONLINE) { in nvme_fc_rcv_ls_req()
1788 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_rcv_ls_req()
1789 ret = -ENOTCONN; in nvme_fc_rcv_ls_req()
1792 list_add_tail(&lsop->lsrcv_list, &rport->ls_rcv_list); in nvme_fc_rcv_ls_req()
1793 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_rcv_ls_req()
1795 schedule_work(&rport->lsrcv_work); in nvme_fc_rcv_ls_req()
1800 fc_dma_unmap_single(lport->dev, lsop->rspdma, in nvme_fc_rcv_ls_req()
1801 sizeof(*lsop->rspbuf), DMA_TO_DEVICE); in nvme_fc_rcv_ls_req()
1817 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma, in __nvme_fc_exit_request()
1818 sizeof(op->rsp_iu), DMA_FROM_DEVICE); in __nvme_fc_exit_request()
1819 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma, in __nvme_fc_exit_request()
1820 sizeof(op->cmd_iu), DMA_TO_DEVICE); in __nvme_fc_exit_request()
1822 atomic_set(&op->state, FCPOP_STATE_UNINIT); in __nvme_fc_exit_request()
1831 return __nvme_fc_exit_request(set->driver_data, op); in nvme_fc_exit_request()
1840 spin_lock_irqsave(&ctrl->lock, flags); in __nvme_fc_abort_op()
1841 opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED); in __nvme_fc_abort_op()
1843 atomic_set(&op->state, opstate); in __nvme_fc_abort_op()
1844 else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) { in __nvme_fc_abort_op()
1845 op->flags |= FCOP_FLAGS_TERMIO; in __nvme_fc_abort_op()
1846 ctrl->iocnt++; in __nvme_fc_abort_op()
1848 spin_unlock_irqrestore(&ctrl->lock, flags); in __nvme_fc_abort_op()
1851 return -ECANCELED; in __nvme_fc_abort_op()
1853 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, in __nvme_fc_abort_op()
1854 &ctrl->rport->remoteport, in __nvme_fc_abort_op()
1855 op->queue->lldd_handle, in __nvme_fc_abort_op()
1856 &op->fcp_req); in __nvme_fc_abort_op()
1864 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; in nvme_fc_abort_aen_ops()
1868 if (!(aen_op->flags & FCOP_FLAGS_AEN)) in nvme_fc_abort_aen_ops()
1882 spin_lock_irqsave(&ctrl->lock, flags); in __nvme_fc_fcpop_chk_teardowns()
1883 if (test_bit(FCCTRL_TERMIO, &ctrl->flags) && in __nvme_fc_fcpop_chk_teardowns()
1884 op->flags & FCOP_FLAGS_TERMIO) { in __nvme_fc_fcpop_chk_teardowns()
1885 if (!--ctrl->iocnt) in __nvme_fc_fcpop_chk_teardowns()
1886 wake_up(&ctrl->ioabort_wait); in __nvme_fc_fcpop_chk_teardowns()
1888 spin_unlock_irqrestore(&ctrl->lock, flags); in __nvme_fc_fcpop_chk_teardowns()
1905 struct request *rq = op->rq; in nvme_fc_fcpio_done()
1906 struct nvmefc_fcp_req *freq = &op->fcp_req; in nvme_fc_fcpio_done()
1907 struct nvme_fc_ctrl *ctrl = op->ctrl; in nvme_fc_fcpio_done()
1908 struct nvme_fc_queue *queue = op->queue; in nvme_fc_fcpio_done()
1909 struct nvme_completion *cqe = &op->rsp_iu.cqe; in nvme_fc_fcpio_done() local
1910 struct nvme_command *sqe = &op->cmd_iu.sqe; in nvme_fc_fcpio_done()
1925 * This affects the FC-NVME implementation in two ways: in nvme_fc_fcpio_done()
1930 * 2) The FC-NVME implementation requires that delivery of in nvme_fc_fcpio_done()
1939 * every field in the cqe - in cases where the FC transport must in nvme_fc_fcpio_done()
1940 * fabricate a CQE, the following fields will not be set as they in nvme_fc_fcpio_done()
1942 * cqe.sqid, cqe.sqhd, cqe.command_id in nvme_fc_fcpio_done()
1948 * Per FC-NVME spec, failure of an individual command requires in nvme_fc_fcpio_done()
1953 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); in nvme_fc_fcpio_done()
1955 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, in nvme_fc_fcpio_done()
1956 sizeof(op->rsp_iu), DMA_FROM_DEVICE); in nvme_fc_fcpio_done()
1960 else if (freq->status) { in nvme_fc_fcpio_done()
1962 dev_info(ctrl->ctrl.device, in nvme_fc_fcpio_done()
1963 "NVME-FC{%d}: io failed due to lldd error %d\n", in nvme_fc_fcpio_done()
1964 ctrl->cnum, freq->status); in nvme_fc_fcpio_done()
1969 * status, they blk-mq layer can typically be called with the in nvme_fc_fcpio_done()
1970 * non-zero status and the content of the cqe isn't important. in nvme_fc_fcpio_done()
1978 * extract the status and result from the cqe (create it in nvme_fc_fcpio_done()
1982 switch (freq->rcv_rsplen) { in nvme_fc_fcpio_done()
1989 * no payload in the CQE by the transport. in nvme_fc_fcpio_done()
1991 if (freq->transferred_length != in nvme_fc_fcpio_done()
1992 be32_to_cpu(op->cmd_iu.data_len)) { in nvme_fc_fcpio_done()
1994 dev_info(ctrl->ctrl.device, in nvme_fc_fcpio_done()
1995 "NVME-FC{%d}: io failed due to bad transfer " in nvme_fc_fcpio_done()
1997 ctrl->cnum, freq->transferred_length, in nvme_fc_fcpio_done()
1998 be32_to_cpu(op->cmd_iu.data_len)); in nvme_fc_fcpio_done()
2006 * The ERSP IU contains a full completion with CQE. in nvme_fc_fcpio_done()
2007 * Validate ERSP IU and look at cqe. in nvme_fc_fcpio_done()
2009 if (unlikely(be16_to_cpu(op->rsp_iu.iu_len) != in nvme_fc_fcpio_done()
2010 (freq->rcv_rsplen / 4) || in nvme_fc_fcpio_done()
2011 be32_to_cpu(op->rsp_iu.xfrd_len) != in nvme_fc_fcpio_done()
2012 freq->transferred_length || in nvme_fc_fcpio_done()
2013 op->rsp_iu.ersp_result || in nvme_fc_fcpio_done()
2014 sqe->common.command_id != cqe->command_id)) { in nvme_fc_fcpio_done()
2016 dev_info(ctrl->ctrl.device, in nvme_fc_fcpio_done()
2017 "NVME-FC{%d}: io failed due to bad NVMe_ERSP: " in nvme_fc_fcpio_done()
2020 ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len), in nvme_fc_fcpio_done()
2021 be32_to_cpu(op->rsp_iu.xfrd_len), in nvme_fc_fcpio_done()
2022 freq->transferred_length, in nvme_fc_fcpio_done()
2023 op->rsp_iu.ersp_result, in nvme_fc_fcpio_done()
2024 sqe->common.command_id, in nvme_fc_fcpio_done()
2025 cqe->command_id); in nvme_fc_fcpio_done()
2028 result = cqe->result; in nvme_fc_fcpio_done()
2029 status = cqe->status; in nvme_fc_fcpio_done()
2034 dev_info(ctrl->ctrl.device, in nvme_fc_fcpio_done()
2035 "NVME-FC{%d}: io failed due to odd NVMe_xRSP iu " in nvme_fc_fcpio_done()
2037 ctrl->cnum, freq->rcv_rsplen); in nvme_fc_fcpio_done()
2044 if (op->flags & FCOP_FLAGS_AEN) { in nvme_fc_fcpio_done()
2045 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); in nvme_fc_fcpio_done()
2047 atomic_set(&op->state, FCPOP_STATE_IDLE); in nvme_fc_fcpio_done()
2048 op->flags = FCOP_FLAGS_AEN; /* clear other flags */ in nvme_fc_fcpio_done()
2058 if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING) in nvme_fc_fcpio_done()
2059 queue_work(nvme_reset_wq, &ctrl->ioerr_work); in nvme_fc_fcpio_done()
2069 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; in __nvme_fc_init_request()
2073 op->fcp_req.cmdaddr = &op->cmd_iu; in __nvme_fc_init_request()
2074 op->fcp_req.cmdlen = sizeof(op->cmd_iu); in __nvme_fc_init_request()
2075 op->fcp_req.rspaddr = &op->rsp_iu; in __nvme_fc_init_request()
2076 op->fcp_req.rsplen = sizeof(op->rsp_iu); in __nvme_fc_init_request()
2077 op->fcp_req.done = nvme_fc_fcpio_done; in __nvme_fc_init_request()
2078 op->ctrl = ctrl; in __nvme_fc_init_request()
2079 op->queue = queue; in __nvme_fc_init_request()
2080 op->rq = rq; in __nvme_fc_init_request()
2081 op->rqno = rqno; in __nvme_fc_init_request()
2083 cmdiu->format_id = NVME_CMD_FORMAT_ID; in __nvme_fc_init_request()
2084 cmdiu->fc_id = NVME_CMD_FC_ID; in __nvme_fc_init_request()
2085 cmdiu->iu_len = cpu_to_be16(sizeof(*cmdiu) / sizeof(u32)); in __nvme_fc_init_request()
2086 if (queue->qnum) in __nvme_fc_init_request()
2087 cmdiu->rsv_cat = fccmnd_set_cat_css(0, in __nvme_fc_init_request()
2090 cmdiu->rsv_cat = fccmnd_set_cat_admin(0); in __nvme_fc_init_request()
2092 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev, in __nvme_fc_init_request()
2093 &op->cmd_iu, sizeof(op->cmd_iu), DMA_TO_DEVICE); in __nvme_fc_init_request()
2094 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { in __nvme_fc_init_request()
2095 dev_err(ctrl->dev, in __nvme_fc_init_request()
2096 "FCP Op failed - cmdiu dma mapping failed.\n"); in __nvme_fc_init_request()
2097 ret = -EFAULT; in __nvme_fc_init_request()
2101 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev, in __nvme_fc_init_request()
2102 &op->rsp_iu, sizeof(op->rsp_iu), in __nvme_fc_init_request()
2104 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { in __nvme_fc_init_request()
2105 dev_err(ctrl->dev, in __nvme_fc_init_request()
2106 "FCP Op failed - rspiu dma mapping failed.\n"); in __nvme_fc_init_request()
2107 ret = -EFAULT; in __nvme_fc_init_request()
2110 atomic_set(&op->state, FCPOP_STATE_IDLE); in __nvme_fc_init_request()
2119 struct nvme_fc_ctrl *ctrl = set->driver_data; in nvme_fc_init_request()
2121 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_fc_init_request()
2122 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx]; in nvme_fc_init_request()
2125 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++); in nvme_fc_init_request()
2128 op->op.fcp_req.first_sgl = op->sgl; in nvme_fc_init_request()
2129 op->op.fcp_req.private = &op->priv[0]; in nvme_fc_init_request()
2130 nvme_req(rq)->ctrl = &ctrl->ctrl; in nvme_fc_init_request()
2143 aen_op = ctrl->aen_ops; in nvme_fc_init_aen_ops()
2145 if (ctrl->lport->ops->fcprqst_priv_sz) { in nvme_fc_init_aen_ops()
2146 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz, in nvme_fc_init_aen_ops()
2149 return -ENOMEM; in nvme_fc_init_aen_ops()
2152 cmdiu = &aen_op->cmd_iu; in nvme_fc_init_aen_ops()
2153 sqe = &cmdiu->sqe; in nvme_fc_init_aen_ops()
2154 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0], in nvme_fc_init_aen_ops()
2162 aen_op->flags = FCOP_FLAGS_AEN; in nvme_fc_init_aen_ops()
2163 aen_op->fcp_req.private = private; in nvme_fc_init_aen_ops()
2166 sqe->common.opcode = nvme_admin_async_event; in nvme_fc_init_aen_ops()
2168 sqe->common.command_id = NVME_AQ_BLK_MQ_DEPTH + i; in nvme_fc_init_aen_ops()
2179 cancel_work_sync(&ctrl->ctrl.async_event_work); in nvme_fc_term_aen_ops()
2180 aen_op = ctrl->aen_ops; in nvme_fc_term_aen_ops()
2184 kfree(aen_op->fcp_req.private); in nvme_fc_term_aen_ops()
2185 aen_op->fcp_req.private = NULL; in nvme_fc_term_aen_ops()
2193 struct nvme_fc_queue *queue = &ctrl->queues[qidx]; in __nvme_fc_init_hctx()
2195 hctx->driver_data = queue; in __nvme_fc_init_hctx()
2196 queue->hctx = hctx; in __nvme_fc_init_hctx()
2226 queue = &ctrl->queues[idx]; in nvme_fc_init_queue()
2228 queue->ctrl = ctrl; in nvme_fc_init_queue()
2229 queue->qnum = idx; in nvme_fc_init_queue()
2230 atomic_set(&queue->csn, 0); in nvme_fc_init_queue()
2231 queue->dev = ctrl->dev; in nvme_fc_init_queue()
2234 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; in nvme_fc_init_queue()
2236 queue->cmnd_capsule_len = sizeof(struct nvme_command); in nvme_fc_init_queue()
2240 * and CQEs and dma map them - mapping their respective entries in nvme_fc_init_queue()
2245 * a native NVME sqe/cqe. More reasonable if FC-NVME IU payload in nvme_fc_init_queue()
2261 if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags)) in nvme_fc_free_queue()
2264 clear_bit(NVME_FC_Q_LIVE, &queue->flags); in nvme_fc_free_queue()
2271 queue->connection_id = 0; in nvme_fc_free_queue()
2272 atomic_set(&queue->csn, 0); in nvme_fc_free_queue()
2279 if (ctrl->lport->ops->delete_queue) in __nvme_fc_delete_hw_queue()
2280 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx, in __nvme_fc_delete_hw_queue()
2281 queue->lldd_handle); in __nvme_fc_delete_hw_queue()
2282 queue->lldd_handle = NULL; in __nvme_fc_delete_hw_queue()
2290 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_fc_free_io_queues()
2291 nvme_fc_free_queue(&ctrl->queues[i]); in nvme_fc_free_io_queues()
2300 queue->lldd_handle = NULL; in __nvme_fc_create_hw_queue()
2301 if (ctrl->lport->ops->create_queue) in __nvme_fc_create_hw_queue()
2302 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport, in __nvme_fc_create_hw_queue()
2303 qidx, qsize, &queue->lldd_handle); in __nvme_fc_create_hw_queue()
2311 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1]; in nvme_fc_delete_hw_io_queues()
2314 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--) in nvme_fc_delete_hw_io_queues()
2321 struct nvme_fc_queue *queue = &ctrl->queues[1]; in nvme_fc_create_hw_io_queues()
2324 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) { in nvme_fc_create_hw_io_queues()
2333 for (; i > 0; i--) in nvme_fc_create_hw_io_queues()
2334 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i); in nvme_fc_create_hw_io_queues()
2343 for (i = 1; i < ctrl->ctrl.queue_count; i++) { in nvme_fc_connect_io_queues()
2344 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize, in nvme_fc_connect_io_queues()
2348 ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false); in nvme_fc_connect_io_queues()
2352 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags); in nvme_fc_connect_io_queues()
2363 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_fc_init_io_queues()
2374 if (ctrl->ctrl.tagset) { in nvme_fc_ctrl_free()
2375 blk_cleanup_queue(ctrl->ctrl.connect_q); in nvme_fc_ctrl_free()
2376 blk_mq_free_tag_set(&ctrl->tag_set); in nvme_fc_ctrl_free()
2380 spin_lock_irqsave(&ctrl->rport->lock, flags); in nvme_fc_ctrl_free()
2381 list_del(&ctrl->ctrl_list); in nvme_fc_ctrl_free()
2382 spin_unlock_irqrestore(&ctrl->rport->lock, flags); in nvme_fc_ctrl_free()
2384 nvme_start_admin_queue(&ctrl->ctrl); in nvme_fc_ctrl_free()
2385 blk_cleanup_queue(ctrl->ctrl.admin_q); in nvme_fc_ctrl_free()
2386 blk_cleanup_queue(ctrl->ctrl.fabrics_q); in nvme_fc_ctrl_free()
2387 blk_mq_free_tag_set(&ctrl->admin_tag_set); in nvme_fc_ctrl_free()
2389 kfree(ctrl->queues); in nvme_fc_ctrl_free()
2391 put_device(ctrl->dev); in nvme_fc_ctrl_free()
2392 nvme_fc_rport_put(ctrl->rport); in nvme_fc_ctrl_free()
2394 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); in nvme_fc_ctrl_free()
2395 if (ctrl->ctrl.opts) in nvme_fc_ctrl_free()
2396 nvmf_free_options(ctrl->ctrl.opts); in nvme_fc_ctrl_free()
2403 kref_put(&ctrl->ref, nvme_fc_ctrl_free); in nvme_fc_ctrl_put()
2409 return kref_get_unless_zero(&ctrl->ref); in nvme_fc_ctrl_get()
2413 * All accesses from nvme core layer done - can now free the
2421 WARN_ON(nctrl != &ctrl->ctrl); in nvme_fc_nvme_ctrl_freed()
2446 op->nreq.flags |= NVME_REQ_CANCELLED; in nvme_fc_terminate_exchange()
2469 if (ctrl->ctrl.queue_count > 1) { in __nvme_fc_abort_outstanding_ios()
2470 for (q = 1; q < ctrl->ctrl.queue_count; q++) in __nvme_fc_abort_outstanding_ios()
2471 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags); in __nvme_fc_abort_outstanding_ios()
2473 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); in __nvme_fc_abort_outstanding_ios()
2487 if (ctrl->ctrl.queue_count > 1) { in __nvme_fc_abort_outstanding_ios()
2488 nvme_stop_queues(&ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2489 nvme_sync_io_queues(&ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2490 blk_mq_tagset_busy_iter(&ctrl->tag_set, in __nvme_fc_abort_outstanding_ios()
2491 nvme_fc_terminate_exchange, &ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2492 blk_mq_tagset_wait_completed_request(&ctrl->tag_set); in __nvme_fc_abort_outstanding_ios()
2494 nvme_start_queues(&ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2498 * Other transports, which don't have link-level contexts bound in __nvme_fc_abort_outstanding_ios()
2512 nvme_stop_admin_queue(&ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2513 blk_sync_queue(ctrl->ctrl.admin_q); in __nvme_fc_abort_outstanding_ios()
2514 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, in __nvme_fc_abort_outstanding_ios()
2515 nvme_fc_terminate_exchange, &ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2516 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set); in __nvme_fc_abort_outstanding_ios()
2529 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) { in nvme_fc_error_recovery()
2531 set_bit(ASSOC_FAILED, &ctrl->flags); in nvme_fc_error_recovery()
2535 /* Otherwise, only proceed if in LIVE state - e.g. on first error */ in nvme_fc_error_recovery()
2536 if (ctrl->ctrl.state != NVME_CTRL_LIVE) in nvme_fc_error_recovery()
2539 dev_warn(ctrl->ctrl.device, in nvme_fc_error_recovery()
2540 "NVME-FC{%d}: transport association event: %s\n", in nvme_fc_error_recovery()
2541 ctrl->cnum, errmsg); in nvme_fc_error_recovery()
2542 dev_warn(ctrl->ctrl.device, in nvme_fc_error_recovery()
2543 "NVME-FC{%d}: resetting controller\n", ctrl->cnum); in nvme_fc_error_recovery()
2545 nvme_reset_ctrl(&ctrl->ctrl); in nvme_fc_error_recovery()
2552 struct nvme_fc_ctrl *ctrl = op->ctrl; in nvme_fc_timeout()
2553 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; in nvme_fc_timeout()
2554 struct nvme_command *sqe = &cmdiu->sqe; in nvme_fc_timeout()
2560 dev_info(ctrl->ctrl.device, in nvme_fc_timeout()
2561 "NVME-FC{%d.%d}: io timeout: opcode %d fctype %d w10/11: " in nvme_fc_timeout()
2563 ctrl->cnum, op->queue->qnum, sqe->common.opcode, in nvme_fc_timeout()
2564 sqe->connect.fctype, sqe->common.cdw10, sqe->common.cdw11); in nvme_fc_timeout()
2580 struct nvmefc_fcp_req *freq = &op->fcp_req; in nvme_fc_map_data()
2583 freq->sg_cnt = 0; in nvme_fc_map_data()
2588 freq->sg_table.sgl = freq->first_sgl; in nvme_fc_map_data()
2589 ret = sg_alloc_table_chained(&freq->sg_table, in nvme_fc_map_data()
2590 blk_rq_nr_phys_segments(rq), freq->sg_table.sgl, in nvme_fc_map_data()
2593 return -ENOMEM; in nvme_fc_map_data()
2595 op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); in nvme_fc_map_data()
2596 WARN_ON(op->nents > blk_rq_nr_phys_segments(rq)); in nvme_fc_map_data()
2597 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, in nvme_fc_map_data()
2598 op->nents, rq_dma_dir(rq)); in nvme_fc_map_data()
2599 if (unlikely(freq->sg_cnt <= 0)) { in nvme_fc_map_data()
2600 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); in nvme_fc_map_data()
2601 freq->sg_cnt = 0; in nvme_fc_map_data()
2602 return -EFAULT; in nvme_fc_map_data()
2615 struct nvmefc_fcp_req *freq = &op->fcp_req; in nvme_fc_unmap_data()
2617 if (!freq->sg_cnt) in nvme_fc_unmap_data()
2620 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, in nvme_fc_unmap_data()
2623 sg_free_table_chained(&freq->sg_table, NVME_INLINE_SG_CNT); in nvme_fc_unmap_data()
2625 freq->sg_cnt = 0; in nvme_fc_unmap_data()
2636 * as part of the exchange. The CQE is the last thing for the io,
2638 * sent on the exchange. After the CQE is received, the FC exchange is
2647 * So - while the operation is outstanding to the LLDD, there is a link
2656 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; in nvme_fc_start_fcp_op()
2657 struct nvme_command *sqe = &cmdiu->sqe; in nvme_fc_start_fcp_op()
2664 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) in nvme_fc_start_fcp_op()
2670 /* format the FC-NVME CMD IU and fcp_req */ in nvme_fc_start_fcp_op()
2671 cmdiu->connection_id = cpu_to_be64(queue->connection_id); in nvme_fc_start_fcp_op()
2672 cmdiu->data_len = cpu_to_be32(data_len); in nvme_fc_start_fcp_op()
2675 cmdiu->flags = FCNVME_CMD_FLAGS_WRITE; in nvme_fc_start_fcp_op()
2678 cmdiu->flags = FCNVME_CMD_FLAGS_READ; in nvme_fc_start_fcp_op()
2681 cmdiu->flags = 0; in nvme_fc_start_fcp_op()
2684 op->fcp_req.payload_length = data_len; in nvme_fc_start_fcp_op()
2685 op->fcp_req.io_dir = io_dir; in nvme_fc_start_fcp_op()
2686 op->fcp_req.transferred_length = 0; in nvme_fc_start_fcp_op()
2687 op->fcp_req.rcv_rsplen = 0; in nvme_fc_start_fcp_op()
2688 op->fcp_req.status = NVME_SC_SUCCESS; in nvme_fc_start_fcp_op()
2689 op->fcp_req.sqid = cpu_to_le16(queue->qnum); in nvme_fc_start_fcp_op()
2693 * as well as those by FC-NVME spec. in nvme_fc_start_fcp_op()
2695 WARN_ON_ONCE(sqe->common.metadata); in nvme_fc_start_fcp_op()
2696 sqe->common.flags |= NVME_CMD_SGL_METABUF; in nvme_fc_start_fcp_op()
2699 * format SQE DPTR field per FC-NVME rules: in nvme_fc_start_fcp_op()
2701 * subtype=0xA Transport-specific value in nvme_fc_start_fcp_op()
2705 sqe->rw.dptr.sgl.type = (NVME_TRANSPORT_SGL_DATA_DESC << 4) | in nvme_fc_start_fcp_op()
2707 sqe->rw.dptr.sgl.length = cpu_to_le32(data_len); in nvme_fc_start_fcp_op()
2708 sqe->rw.dptr.sgl.addr = 0; in nvme_fc_start_fcp_op()
2710 if (!(op->flags & FCOP_FLAGS_AEN)) { in nvme_fc_start_fcp_op()
2711 ret = nvme_fc_map_data(ctrl, op->rq, op); in nvme_fc_start_fcp_op()
2713 nvme_cleanup_cmd(op->rq); in nvme_fc_start_fcp_op()
2715 if (ret == -ENOMEM || ret == -EAGAIN) in nvme_fc_start_fcp_op()
2721 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma, in nvme_fc_start_fcp_op()
2722 sizeof(op->cmd_iu), DMA_TO_DEVICE); in nvme_fc_start_fcp_op()
2724 atomic_set(&op->state, FCPOP_STATE_ACTIVE); in nvme_fc_start_fcp_op()
2726 if (!(op->flags & FCOP_FLAGS_AEN)) in nvme_fc_start_fcp_op()
2727 blk_mq_start_request(op->rq); in nvme_fc_start_fcp_op()
2729 cmdiu->csn = cpu_to_be32(atomic_inc_return(&queue->csn)); in nvme_fc_start_fcp_op()
2730 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, in nvme_fc_start_fcp_op()
2731 &ctrl->rport->remoteport, in nvme_fc_start_fcp_op()
2732 queue->lldd_handle, &op->fcp_req); in nvme_fc_start_fcp_op()
2738 * no - as the connection won't be live. If it is a command in nvme_fc_start_fcp_op()
2739 * post-connect, it's possible a gap in csn may be created. in nvme_fc_start_fcp_op()
2747 opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE); in nvme_fc_start_fcp_op()
2750 if (!(op->flags & FCOP_FLAGS_AEN)) { in nvme_fc_start_fcp_op()
2751 nvme_fc_unmap_data(ctrl, op->rq, op); in nvme_fc_start_fcp_op()
2752 nvme_cleanup_cmd(op->rq); in nvme_fc_start_fcp_op()
2757 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE && in nvme_fc_start_fcp_op()
2758 ret != -EBUSY) in nvme_fc_start_fcp_op()
2771 struct nvme_ns *ns = hctx->queue->queuedata; in nvme_fc_queue_rq()
2772 struct nvme_fc_queue *queue = hctx->driver_data; in nvme_fc_queue_rq()
2773 struct nvme_fc_ctrl *ctrl = queue->ctrl; in nvme_fc_queue_rq()
2774 struct request *rq = bd->rq; in nvme_fc_queue_rq()
2776 struct nvme_fc_cmd_iu *cmdiu = &op->cmd_iu; in nvme_fc_queue_rq()
2777 struct nvme_command *sqe = &cmdiu->sqe; in nvme_fc_queue_rq()
2779 bool queue_ready = test_bit(NVME_FC_Q_LIVE, &queue->flags); in nvme_fc_queue_rq()
2783 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || in nvme_fc_queue_rq()
2784 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_fc_queue_rq()
2785 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_fc_queue_rq()
2793 * as WRITE ZEROES will return a non-zero rq payload_bytes yet in nvme_fc_queue_rq()
2819 if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) in nvme_fc_submit_async_event()
2822 aen_op = &ctrl->aen_ops[0]; in nvme_fc_submit_async_event()
2824 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0, in nvme_fc_submit_async_event()
2827 dev_err(ctrl->ctrl.device, in nvme_fc_submit_async_event()
2835 struct nvme_fc_ctrl *ctrl = op->ctrl; in nvme_fc_complete_rq()
2837 atomic_set(&op->state, FCPOP_STATE_IDLE); in nvme_fc_complete_rq()
2838 op->flags &= ~FCOP_FLAGS_TERMIO; in nvme_fc_complete_rq()
2858 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_fc_create_io_queues()
2862 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), in nvme_fc_create_io_queues()
2863 ctrl->lport->ops->max_hw_queues); in nvme_fc_create_io_queues()
2864 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); in nvme_fc_create_io_queues()
2866 dev_info(ctrl->ctrl.device, in nvme_fc_create_io_queues()
2871 ctrl->ctrl.queue_count = nr_io_queues + 1; in nvme_fc_create_io_queues()
2877 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); in nvme_fc_create_io_queues()
2878 ctrl->tag_set.ops = &nvme_fc_mq_ops; in nvme_fc_create_io_queues()
2879 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; in nvme_fc_create_io_queues()
2880 ctrl->tag_set.reserved_tags = 1; /* fabric connect */ in nvme_fc_create_io_queues()
2881 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; in nvme_fc_create_io_queues()
2882 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in nvme_fc_create_io_queues()
2883 ctrl->tag_set.cmd_size = in nvme_fc_create_io_queues()
2885 ctrl->lport->ops->fcprqst_priv_sz); in nvme_fc_create_io_queues()
2886 ctrl->tag_set.driver_data = ctrl; in nvme_fc_create_io_queues()
2887 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1; in nvme_fc_create_io_queues()
2888 ctrl->tag_set.timeout = NVME_IO_TIMEOUT; in nvme_fc_create_io_queues()
2890 ret = blk_mq_alloc_tag_set(&ctrl->tag_set); in nvme_fc_create_io_queues()
2894 ctrl->ctrl.tagset = &ctrl->tag_set; in nvme_fc_create_io_queues()
2896 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); in nvme_fc_create_io_queues()
2897 if (IS_ERR(ctrl->ctrl.connect_q)) { in nvme_fc_create_io_queues()
2898 ret = PTR_ERR(ctrl->ctrl.connect_q); in nvme_fc_create_io_queues()
2902 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_create_io_queues()
2906 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_create_io_queues()
2910 ctrl->ioq_live = true; in nvme_fc_create_io_queues()
2917 blk_cleanup_queue(ctrl->ctrl.connect_q); in nvme_fc_create_io_queues()
2919 blk_mq_free_tag_set(&ctrl->tag_set); in nvme_fc_create_io_queues()
2923 ctrl->ctrl.tagset = NULL; in nvme_fc_create_io_queues()
2931 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_fc_recreate_io_queues()
2932 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1; in nvme_fc_recreate_io_queues()
2936 nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), in nvme_fc_recreate_io_queues()
2937 ctrl->lport->ops->max_hw_queues); in nvme_fc_recreate_io_queues()
2938 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); in nvme_fc_recreate_io_queues()
2940 dev_info(ctrl->ctrl.device, in nvme_fc_recreate_io_queues()
2946 dev_info(ctrl->ctrl.device, in nvme_fc_recreate_io_queues()
2949 return -ENOSPC; in nvme_fc_recreate_io_queues()
2952 ctrl->ctrl.queue_count = nr_io_queues + 1; in nvme_fc_recreate_io_queues()
2954 if (ctrl->ctrl.queue_count == 1) in nvme_fc_recreate_io_queues()
2958 dev_info(ctrl->ctrl.device, in nvme_fc_recreate_io_queues()
2961 nvme_wait_freeze(&ctrl->ctrl); in nvme_fc_recreate_io_queues()
2962 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues); in nvme_fc_recreate_io_queues()
2963 nvme_unfreeze(&ctrl->ctrl); in nvme_fc_recreate_io_queues()
2966 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_recreate_io_queues()
2970 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_recreate_io_queues()
2986 struct nvme_fc_lport *lport = rport->lport; in nvme_fc_rport_active_on_lport()
2988 atomic_inc(&lport->act_rport_cnt); in nvme_fc_rport_active_on_lport()
2994 struct nvme_fc_lport *lport = rport->lport; in nvme_fc_rport_inactive_on_lport()
2997 cnt = atomic_dec_return(&lport->act_rport_cnt); in nvme_fc_rport_inactive_on_lport()
2998 if (cnt == 0 && lport->localport.port_state == FC_OBJSTATE_DELETED) in nvme_fc_rport_inactive_on_lport()
2999 lport->ops->localport_delete(&lport->localport); in nvme_fc_rport_inactive_on_lport()
3005 struct nvme_fc_rport *rport = ctrl->rport; in nvme_fc_ctlr_active_on_rport()
3008 if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags)) in nvme_fc_ctlr_active_on_rport()
3011 cnt = atomic_inc_return(&rport->act_ctrl_cnt); in nvme_fc_ctlr_active_on_rport()
3021 struct nvme_fc_rport *rport = ctrl->rport; in nvme_fc_ctlr_inactive_on_rport()
3022 struct nvme_fc_lport *lport = rport->lport; in nvme_fc_ctlr_inactive_on_rport()
3025 /* clearing of ctrl->flags ASSOC_ACTIVE bit is in association delete */ in nvme_fc_ctlr_inactive_on_rport()
3027 cnt = atomic_dec_return(&rport->act_ctrl_cnt); in nvme_fc_ctlr_inactive_on_rport()
3029 if (rport->remoteport.port_state == FC_OBJSTATE_DELETED) in nvme_fc_ctlr_inactive_on_rport()
3030 lport->ops->remoteport_delete(&rport->remoteport); in nvme_fc_ctlr_inactive_on_rport()
3044 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_fc_create_association()
3050 ++ctrl->ctrl.nr_reconnects; in nvme_fc_create_association()
3052 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) in nvme_fc_create_association()
3053 return -ENODEV; in nvme_fc_create_association()
3056 return -ENOTUNIQ; in nvme_fc_create_association()
3058 dev_info(ctrl->ctrl.device, in nvme_fc_create_association()
3059 "NVME-FC{%d}: create association : host wwpn 0x%016llx " in nvme_fc_create_association()
3061 ctrl->cnum, ctrl->lport->localport.port_name, in nvme_fc_create_association()
3062 ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn); in nvme_fc_create_association()
3064 clear_bit(ASSOC_FAILED, &ctrl->flags); in nvme_fc_create_association()
3070 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, in nvme_fc_create_association()
3075 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], in nvme_fc_create_association()
3080 ret = nvmf_connect_admin_queue(&ctrl->ctrl); in nvme_fc_create_association()
3084 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); in nvme_fc_create_association()
3089 * todo:- add code to check if ctrl attributes changed from in nvme_fc_create_association()
3093 ret = nvme_enable_ctrl(&ctrl->ctrl); in nvme_fc_create_association()
3094 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) in nvme_fc_create_association()
3097 ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments; in nvme_fc_create_association()
3098 ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments << in nvme_fc_create_association()
3099 (ilog2(SZ_4K) - 9); in nvme_fc_create_association()
3101 nvme_start_admin_queue(&ctrl->ctrl); in nvme_fc_create_association()
3103 ret = nvme_init_identify(&ctrl->ctrl); in nvme_fc_create_association()
3104 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) in nvme_fc_create_association()
3109 /* FC-NVME does not have other data in the capsule */ in nvme_fc_create_association()
3110 if (ctrl->ctrl.icdoff) { in nvme_fc_create_association()
3111 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", in nvme_fc_create_association()
3112 ctrl->ctrl.icdoff); in nvme_fc_create_association()
3116 /* FC-NVME supports normal SGL Data Block Descriptors */ in nvme_fc_create_association()
3118 if (opts->queue_size > ctrl->ctrl.maxcmd) { in nvme_fc_create_association()
3120 dev_warn(ctrl->ctrl.device, in nvme_fc_create_association()
3123 opts->queue_size, ctrl->ctrl.maxcmd); in nvme_fc_create_association()
3124 opts->queue_size = ctrl->ctrl.maxcmd; in nvme_fc_create_association()
3127 if (opts->queue_size > ctrl->ctrl.sqsize + 1) { in nvme_fc_create_association()
3129 dev_warn(ctrl->ctrl.device, in nvme_fc_create_association()
3132 opts->queue_size, ctrl->ctrl.sqsize + 1); in nvme_fc_create_association()
3133 opts->queue_size = ctrl->ctrl.sqsize + 1; in nvme_fc_create_association()
3144 if (ctrl->ctrl.queue_count > 1) { in nvme_fc_create_association()
3145 if (!ctrl->ioq_live) in nvme_fc_create_association()
3150 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) in nvme_fc_create_association()
3153 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); in nvme_fc_create_association()
3155 ctrl->ctrl.nr_reconnects = 0; in nvme_fc_create_association()
3158 nvme_start_ctrl(&ctrl->ctrl); in nvme_fc_create_association()
3165 /* send a Disconnect(association) LS to fc-nvme target */ in nvme_fc_create_association()
3167 spin_lock_irqsave(&ctrl->lock, flags); in nvme_fc_create_association()
3168 ctrl->association_id = 0; in nvme_fc_create_association()
3169 disls = ctrl->rcv_disconn; in nvme_fc_create_association()
3170 ctrl->rcv_disconn = NULL; in nvme_fc_create_association()
3171 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_create_association()
3175 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); in nvme_fc_create_association()
3177 nvme_fc_free_queue(&ctrl->queues[0]); in nvme_fc_create_association()
3178 clear_bit(ASSOC_ACTIVE, &ctrl->flags); in nvme_fc_create_association()
3197 if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags)) in nvme_fc_delete_association()
3200 spin_lock_irqsave(&ctrl->lock, flags); in nvme_fc_delete_association()
3201 set_bit(FCCTRL_TERMIO, &ctrl->flags); in nvme_fc_delete_association()
3202 ctrl->iocnt = 0; in nvme_fc_delete_association()
3203 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_delete_association()
3211 spin_lock_irq(&ctrl->lock); in nvme_fc_delete_association()
3212 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock); in nvme_fc_delete_association()
3213 clear_bit(FCCTRL_TERMIO, &ctrl->flags); in nvme_fc_delete_association()
3214 spin_unlock_irq(&ctrl->lock); in nvme_fc_delete_association()
3219 * send a Disconnect(association) LS to fc-nvme target in nvme_fc_delete_association()
3224 if (ctrl->association_id) in nvme_fc_delete_association()
3227 spin_lock_irqsave(&ctrl->lock, flags); in nvme_fc_delete_association()
3228 ctrl->association_id = 0; in nvme_fc_delete_association()
3229 disls = ctrl->rcv_disconn; in nvme_fc_delete_association()
3230 ctrl->rcv_disconn = NULL; in nvme_fc_delete_association()
3231 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_delete_association()
3239 if (ctrl->ctrl.tagset) { in nvme_fc_delete_association()
3244 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); in nvme_fc_delete_association()
3245 nvme_fc_free_queue(&ctrl->queues[0]); in nvme_fc_delete_association()
3247 /* re-enable the admin_q so anything new can fast fail */ in nvme_fc_delete_association()
3248 nvme_start_admin_queue(&ctrl->ctrl); in nvme_fc_delete_association()
3251 nvme_start_queues(&ctrl->ctrl); in nvme_fc_delete_association()
3261 cancel_work_sync(&ctrl->ioerr_work); in nvme_fc_delete_ctrl()
3262 cancel_delayed_work_sync(&ctrl->connect_work); in nvme_fc_delete_ctrl()
3273 struct nvme_fc_rport *rport = ctrl->rport; in nvme_fc_reconnect_or_delete()
3274 struct nvme_fc_remote_port *portptr = &rport->remoteport; in nvme_fc_reconnect_or_delete()
3275 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ; in nvme_fc_reconnect_or_delete()
3278 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) in nvme_fc_reconnect_or_delete()
3281 if (portptr->port_state == FC_OBJSTATE_ONLINE) in nvme_fc_reconnect_or_delete()
3282 dev_info(ctrl->ctrl.device, in nvme_fc_reconnect_or_delete()
3283 "NVME-FC{%d}: reset: Reconnect attempt failed (%d)\n", in nvme_fc_reconnect_or_delete()
3284 ctrl->cnum, status); in nvme_fc_reconnect_or_delete()
3285 else if (time_after_eq(jiffies, rport->dev_loss_end)) in nvme_fc_reconnect_or_delete()
3288 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) { in nvme_fc_reconnect_or_delete()
3289 if (portptr->port_state == FC_OBJSTATE_ONLINE) in nvme_fc_reconnect_or_delete()
3290 dev_info(ctrl->ctrl.device, in nvme_fc_reconnect_or_delete()
3291 "NVME-FC{%d}: Reconnect attempt in %ld " in nvme_fc_reconnect_or_delete()
3293 ctrl->cnum, recon_delay / HZ); in nvme_fc_reconnect_or_delete()
3294 else if (time_after(jiffies + recon_delay, rport->dev_loss_end)) in nvme_fc_reconnect_or_delete()
3295 recon_delay = rport->dev_loss_end - jiffies; in nvme_fc_reconnect_or_delete()
3297 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay); in nvme_fc_reconnect_or_delete()
3299 if (portptr->port_state == FC_OBJSTATE_ONLINE) in nvme_fc_reconnect_or_delete()
3300 dev_warn(ctrl->ctrl.device, in nvme_fc_reconnect_or_delete()
3301 "NVME-FC{%d}: Max reconnect attempts (%d) " in nvme_fc_reconnect_or_delete()
3303 ctrl->cnum, ctrl->ctrl.nr_reconnects); in nvme_fc_reconnect_or_delete()
3305 dev_warn(ctrl->ctrl.device, in nvme_fc_reconnect_or_delete()
3306 "NVME-FC{%d}: dev_loss_tmo (%d) expired " in nvme_fc_reconnect_or_delete()
3308 ctrl->cnum, min_t(int, portptr->dev_loss_tmo, in nvme_fc_reconnect_or_delete()
3309 (ctrl->ctrl.opts->max_reconnects * in nvme_fc_reconnect_or_delete()
3310 ctrl->ctrl.opts->reconnect_delay))); in nvme_fc_reconnect_or_delete()
3311 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl)); in nvme_fc_reconnect_or_delete()
3321 nvme_stop_ctrl(&ctrl->ctrl); in nvme_fc_reset_ctrl_work()
3326 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) in nvme_fc_reset_ctrl_work()
3327 dev_err(ctrl->ctrl.device, in nvme_fc_reset_ctrl_work()
3328 "NVME-FC{%d}: error_recovery: Couldn't change state " in nvme_fc_reset_ctrl_work()
3329 "to CONNECTING\n", ctrl->cnum); in nvme_fc_reset_ctrl_work()
3331 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) { in nvme_fc_reset_ctrl_work()
3332 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { in nvme_fc_reset_ctrl_work()
3333 dev_err(ctrl->ctrl.device, in nvme_fc_reset_ctrl_work()
3334 "NVME-FC{%d}: failed to schedule connect " in nvme_fc_reset_ctrl_work()
3335 "after reset\n", ctrl->cnum); in nvme_fc_reset_ctrl_work()
3337 flush_delayed_work(&ctrl->connect_work); in nvme_fc_reset_ctrl_work()
3340 nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN); in nvme_fc_reset_ctrl_work()
3371 dev_info(ctrl->ctrl.device, in nvme_fc_connect_ctrl_work()
3372 "NVME-FC{%d}: controller connect complete\n", in nvme_fc_connect_ctrl_work()
3373 ctrl->cnum); in nvme_fc_connect_ctrl_work()
3403 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_existing_controller()
3404 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { in nvme_fc_existing_controller()
3405 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts); in nvme_fc_existing_controller()
3409 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_existing_controller()
3422 if (!(rport->remoteport.port_role & in nvme_fc_init_ctrl()
3424 ret = -EBADR; in nvme_fc_init_ctrl()
3428 if (!opts->duplicate_connect && in nvme_fc_init_ctrl()
3430 ret = -EALREADY; in nvme_fc_init_ctrl()
3436 ret = -ENOMEM; in nvme_fc_init_ctrl()
3442 ret = -ENOSPC; in nvme_fc_init_ctrl()
3450 if (opts->max_reconnects != -1 && in nvme_fc_init_ctrl()
3451 opts->reconnect_delay == NVMF_DEF_RECONNECT_DELAY && in nvme_fc_init_ctrl()
3452 opts->reconnect_delay > NVME_FC_DEFAULT_RECONNECT_TMO) { in nvme_fc_init_ctrl()
3453 ctrl_loss_tmo = opts->max_reconnects * opts->reconnect_delay; in nvme_fc_init_ctrl()
3454 opts->reconnect_delay = NVME_FC_DEFAULT_RECONNECT_TMO; in nvme_fc_init_ctrl()
3455 opts->max_reconnects = DIV_ROUND_UP(ctrl_loss_tmo, in nvme_fc_init_ctrl()
3456 opts->reconnect_delay); in nvme_fc_init_ctrl()
3459 ctrl->ctrl.opts = opts; in nvme_fc_init_ctrl()
3460 ctrl->ctrl.nr_reconnects = 0; in nvme_fc_init_ctrl()
3461 if (lport->dev) in nvme_fc_init_ctrl()
3462 ctrl->ctrl.numa_node = dev_to_node(lport->dev); in nvme_fc_init_ctrl()
3464 ctrl->ctrl.numa_node = NUMA_NO_NODE; in nvme_fc_init_ctrl()
3465 INIT_LIST_HEAD(&ctrl->ctrl_list); in nvme_fc_init_ctrl()
3466 ctrl->lport = lport; in nvme_fc_init_ctrl()
3467 ctrl->rport = rport; in nvme_fc_init_ctrl()
3468 ctrl->dev = lport->dev; in nvme_fc_init_ctrl()
3469 ctrl->cnum = idx; in nvme_fc_init_ctrl()
3470 ctrl->ioq_live = false; in nvme_fc_init_ctrl()
3471 init_waitqueue_head(&ctrl->ioabort_wait); in nvme_fc_init_ctrl()
3473 get_device(ctrl->dev); in nvme_fc_init_ctrl()
3474 kref_init(&ctrl->ref); in nvme_fc_init_ctrl()
3476 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); in nvme_fc_init_ctrl()
3477 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); in nvme_fc_init_ctrl()
3478 INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work); in nvme_fc_init_ctrl()
3479 spin_lock_init(&ctrl->lock); in nvme_fc_init_ctrl()
3482 ctrl->ctrl.queue_count = min_t(unsigned int, in nvme_fc_init_ctrl()
3483 opts->nr_io_queues, in nvme_fc_init_ctrl()
3484 lport->ops->max_hw_queues); in nvme_fc_init_ctrl()
3485 ctrl->ctrl.queue_count++; /* +1 for admin queue */ in nvme_fc_init_ctrl()
3487 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_fc_init_ctrl()
3488 ctrl->ctrl.kato = opts->kato; in nvme_fc_init_ctrl()
3489 ctrl->ctrl.cntlid = 0xffff; in nvme_fc_init_ctrl()
3491 ret = -ENOMEM; in nvme_fc_init_ctrl()
3492 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, in nvme_fc_init_ctrl()
3494 if (!ctrl->queues) in nvme_fc_init_ctrl()
3499 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); in nvme_fc_init_ctrl()
3500 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops; in nvme_fc_init_ctrl()
3501 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; in nvme_fc_init_ctrl()
3502 ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */ in nvme_fc_init_ctrl()
3503 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node; in nvme_fc_init_ctrl()
3504 ctrl->admin_tag_set.cmd_size = in nvme_fc_init_ctrl()
3506 ctrl->lport->ops->fcprqst_priv_sz); in nvme_fc_init_ctrl()
3507 ctrl->admin_tag_set.driver_data = ctrl; in nvme_fc_init_ctrl()
3508 ctrl->admin_tag_set.nr_hw_queues = 1; in nvme_fc_init_ctrl()
3509 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; in nvme_fc_init_ctrl()
3510 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED; in nvme_fc_init_ctrl()
3512 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); in nvme_fc_init_ctrl()
3515 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set; in nvme_fc_init_ctrl()
3517 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set); in nvme_fc_init_ctrl()
3518 if (IS_ERR(ctrl->ctrl.fabrics_q)) { in nvme_fc_init_ctrl()
3519 ret = PTR_ERR(ctrl->ctrl.fabrics_q); in nvme_fc_init_ctrl()
3523 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); in nvme_fc_init_ctrl()
3524 if (IS_ERR(ctrl->ctrl.admin_q)) { in nvme_fc_init_ctrl()
3525 ret = PTR_ERR(ctrl->ctrl.admin_q); in nvme_fc_init_ctrl()
3536 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0); in nvme_fc_init_ctrl()
3542 spin_lock_irqsave(&rport->lock, flags); in nvme_fc_init_ctrl()
3543 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); in nvme_fc_init_ctrl()
3544 spin_unlock_irqrestore(&rport->lock, flags); in nvme_fc_init_ctrl()
3546 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) || in nvme_fc_init_ctrl()
3547 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_fc_init_ctrl()
3548 dev_err(ctrl->ctrl.device, in nvme_fc_init_ctrl()
3549 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum); in nvme_fc_init_ctrl()
3553 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { in nvme_fc_init_ctrl()
3554 dev_err(ctrl->ctrl.device, in nvme_fc_init_ctrl()
3555 "NVME-FC{%d}: failed to schedule initial connect\n", in nvme_fc_init_ctrl()
3556 ctrl->cnum); in nvme_fc_init_ctrl()
3560 flush_delayed_work(&ctrl->connect_work); in nvme_fc_init_ctrl()
3562 dev_info(ctrl->ctrl.device, in nvme_fc_init_ctrl()
3563 "NVME-FC{%d}: new ctrl: NQN \"%s\"\n", in nvme_fc_init_ctrl()
3564 ctrl->cnum, ctrl->ctrl.opts->subsysnqn); in nvme_fc_init_ctrl()
3566 return &ctrl->ctrl; in nvme_fc_init_ctrl()
3569 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); in nvme_fc_init_ctrl()
3570 cancel_work_sync(&ctrl->ioerr_work); in nvme_fc_init_ctrl()
3571 cancel_work_sync(&ctrl->ctrl.reset_work); in nvme_fc_init_ctrl()
3572 cancel_delayed_work_sync(&ctrl->connect_work); in nvme_fc_init_ctrl()
3574 ctrl->ctrl.opts = NULL; in nvme_fc_init_ctrl()
3577 nvme_uninit_ctrl(&ctrl->ctrl); in nvme_fc_init_ctrl()
3580 nvme_put_ctrl(&ctrl->ctrl); in nvme_fc_init_ctrl()
3591 return ERR_PTR(-EIO); in nvme_fc_init_ctrl()
3594 blk_cleanup_queue(ctrl->ctrl.admin_q); in nvme_fc_init_ctrl()
3596 blk_cleanup_queue(ctrl->ctrl.fabrics_q); in nvme_fc_init_ctrl()
3598 blk_mq_free_tag_set(&ctrl->admin_tag_set); in nvme_fc_init_ctrl()
3600 kfree(ctrl->queues); in nvme_fc_init_ctrl()
3602 put_device(ctrl->dev); in nvme_fc_init_ctrl()
3603 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); in nvme_fc_init_ctrl()
3623 return -EINVAL; in __nvme_fc_parse_u64()
3638 substring_t wwn = { name, &name[sizeof(name)-1] }; in nvme_fc_parse_traddr()
3643 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && in nvme_fc_parse_traddr()
3645 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { in nvme_fc_parse_traddr()
3650 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && in nvme_fc_parse_traddr()
3652 "pn-", NVME_FC_TRADDR_NNLEN))) { in nvme_fc_parse_traddr()
3663 if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) in nvme_fc_parse_traddr()
3667 if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) in nvme_fc_parse_traddr()
3674 return -EINVAL; in nvme_fc_parse_traddr()
3688 ret = nvme_fc_parse_traddr(&raddr, opts->traddr, NVMF_TRADDR_SIZE); in nvme_fc_create_ctrl()
3690 return ERR_PTR(-EINVAL); in nvme_fc_create_ctrl()
3692 ret = nvme_fc_parse_traddr(&laddr, opts->host_traddr, NVMF_TRADDR_SIZE); in nvme_fc_create_ctrl()
3694 return ERR_PTR(-EINVAL); in nvme_fc_create_ctrl()
3699 if (lport->localport.node_name != laddr.nn || in nvme_fc_create_ctrl()
3700 lport->localport.port_name != laddr.pn || in nvme_fc_create_ctrl()
3701 lport->localport.port_state != FC_OBJSTATE_ONLINE) in nvme_fc_create_ctrl()
3704 list_for_each_entry(rport, &lport->endp_list, endp_list) { in nvme_fc_create_ctrl()
3705 if (rport->remoteport.node_name != raddr.nn || in nvme_fc_create_ctrl()
3706 rport->remoteport.port_name != raddr.pn || in nvme_fc_create_ctrl()
3707 rport->remoteport.port_state != FC_OBJSTATE_ONLINE) in nvme_fc_create_ctrl()
3724 pr_warn("%s: %s - %s combination not found\n", in nvme_fc_create_ctrl()
3725 __func__, opts->traddr, opts->host_traddr); in nvme_fc_create_ctrl()
3726 return ERR_PTR(-ENOENT); in nvme_fc_create_ctrl()
3753 list_for_each_entry(rport, &lport->endp_list, endp_list) { in nvme_fc_nvme_discovery_store()
3775 if (list_empty(&rport->disc_list)) in nvme_fc_nvme_discovery_store()
3776 list_add_tail(&rport->disc_list, in nvme_fc_nvme_discovery_store()
3785 list_del_init(&rport->disc_list); in nvme_fc_nvme_discovery_store()
3788 lport = rport->lport; in nvme_fc_nvme_discovery_store()
3828 return -ENOMEM; in nvme_fc_init_module()
3833 * the FC-isms that are currently under scsi and now being in nvme_fc_init_module()
3838 * As we need something to post FC-specific udev events to, in nvme_fc_init_module()
3851 * Create a device for the FC-centric udev events in nvme_fc_init_module()
3882 spin_lock(&rport->lock); in nvme_fc_delete_controllers()
3883 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { in nvme_fc_delete_controllers()
3884 dev_warn(ctrl->ctrl.device, in nvme_fc_delete_controllers()
3885 "NVME-FC{%d}: transport unloading: deleting ctrl\n", in nvme_fc_delete_controllers()
3886 ctrl->cnum); in nvme_fc_delete_controllers()
3887 nvme_delete_ctrl(&ctrl->ctrl); in nvme_fc_delete_controllers()
3889 spin_unlock(&rport->lock); in nvme_fc_delete_controllers()
3899 list_for_each_entry(rport, &lport->endp_list, endp_list) { in nvme_fc_cleanup_for_unload()