Lines Matching full:ctrl
89 struct nvme_rdma_ctrl *ctrl; member
125 struct nvme_ctrl ctrl; member
130 static inline struct nvme_rdma_ctrl *to_rdma_ctrl(struct nvme_ctrl *ctrl) in to_rdma_ctrl() argument
132 return container_of(ctrl, struct nvme_rdma_ctrl, ctrl); in to_rdma_ctrl()
161 return queue - queue->ctrl->queues; in nvme_rdma_queue_idx()
167 queue->ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_rdma_poll_queue()
168 queue->ctrl->io_queues[HCTX_TYPE_READ]; in nvme_rdma_poll_queue()
300 struct nvme_rdma_ctrl *ctrl = set->driver_data; in nvme_rdma_init_request() local
302 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_rdma_init_request()
303 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; in nvme_rdma_init_request()
305 nvme_req(rq)->ctrl = &ctrl->ctrl; in nvme_rdma_init_request()
324 struct nvme_rdma_ctrl *ctrl = data; in nvme_rdma_init_hctx() local
325 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_rdma_init_hctx()
327 BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); in nvme_rdma_init_hctx()
336 struct nvme_rdma_ctrl *ctrl = data; in nvme_rdma_init_admin_hctx() local
337 struct nvme_rdma_queue *queue = &ctrl->queues[0]; in nvme_rdma_init_admin_hctx()
539 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_create_queue_ib()
550 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_create_queue_ib()
575 static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl, in nvme_rdma_alloc_queue() argument
582 queue = &ctrl->queues[idx]; in nvme_rdma_alloc_queue()
584 queue->ctrl = ctrl; in nvme_rdma_alloc_queue()
585 if (idx && ctrl->ctrl.max_integrity_segments) in nvme_rdma_alloc_queue()
592 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; in nvme_rdma_alloc_queue()
601 dev_info(ctrl->ctrl.device, in nvme_rdma_alloc_queue()
607 if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) in nvme_rdma_alloc_queue()
608 src_addr = (struct sockaddr *)&ctrl->src_addr; in nvme_rdma_alloc_queue()
612 (struct sockaddr *)&ctrl->addr, in nvme_rdma_alloc_queue()
615 dev_info(ctrl->ctrl.device, in nvme_rdma_alloc_queue()
622 dev_info(ctrl->ctrl.device, in nvme_rdma_alloc_queue()
666 static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl) in nvme_rdma_free_io_queues() argument
670 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_rdma_free_io_queues()
671 nvme_rdma_free_queue(&ctrl->queues[i]); in nvme_rdma_free_io_queues()
674 static void nvme_rdma_stop_io_queues(struct nvme_rdma_ctrl *ctrl) in nvme_rdma_stop_io_queues() argument
678 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_rdma_stop_io_queues()
679 nvme_rdma_stop_queue(&ctrl->queues[i]); in nvme_rdma_stop_io_queues()
682 static int nvme_rdma_start_queue(struct nvme_rdma_ctrl *ctrl, int idx) in nvme_rdma_start_queue() argument
684 struct nvme_rdma_queue *queue = &ctrl->queues[idx]; in nvme_rdma_start_queue()
689 ret = nvmf_connect_io_queue(&ctrl->ctrl, idx, poll); in nvme_rdma_start_queue()
691 ret = nvmf_connect_admin_queue(&ctrl->ctrl); in nvme_rdma_start_queue()
698 dev_info(ctrl->ctrl.device, in nvme_rdma_start_queue()
704 static int nvme_rdma_start_io_queues(struct nvme_rdma_ctrl *ctrl) in nvme_rdma_start_io_queues() argument
708 for (i = 1; i < ctrl->ctrl.queue_count; i++) { in nvme_rdma_start_io_queues()
709 ret = nvme_rdma_start_queue(ctrl, i); in nvme_rdma_start_io_queues()
718 nvme_rdma_stop_queue(&ctrl->queues[i]); in nvme_rdma_start_io_queues()
722 static int nvme_rdma_alloc_io_queues(struct nvme_rdma_ctrl *ctrl) in nvme_rdma_alloc_io_queues() argument
724 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_rdma_alloc_io_queues()
725 struct ib_device *ibdev = ctrl->device->dev; in nvme_rdma_alloc_io_queues()
737 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); in nvme_rdma_alloc_io_queues()
742 dev_err(ctrl->ctrl.device, in nvme_rdma_alloc_io_queues()
747 ctrl->ctrl.queue_count = nr_io_queues + 1; in nvme_rdma_alloc_io_queues()
748 dev_info(ctrl->ctrl.device, in nvme_rdma_alloc_io_queues()
757 ctrl->io_queues[HCTX_TYPE_READ] = nr_read_queues; in nvme_rdma_alloc_io_queues()
758 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_READ]; in nvme_rdma_alloc_io_queues()
759 ctrl->io_queues[HCTX_TYPE_DEFAULT] = in nvme_rdma_alloc_io_queues()
761 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_rdma_alloc_io_queues()
768 ctrl->io_queues[HCTX_TYPE_DEFAULT] = in nvme_rdma_alloc_io_queues()
770 nr_io_queues -= ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_rdma_alloc_io_queues()
775 ctrl->io_queues[HCTX_TYPE_POLL] = in nvme_rdma_alloc_io_queues()
779 for (i = 1; i < ctrl->ctrl.queue_count; i++) { in nvme_rdma_alloc_io_queues()
780 ret = nvme_rdma_alloc_queue(ctrl, i, in nvme_rdma_alloc_io_queues()
781 ctrl->ctrl.sqsize + 1); in nvme_rdma_alloc_io_queues()
790 nvme_rdma_free_queue(&ctrl->queues[i]); in nvme_rdma_alloc_io_queues()
798 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); in nvme_rdma_alloc_tagset() local
803 set = &ctrl->admin_tag_set; in nvme_rdma_alloc_tagset()
811 set->driver_data = ctrl; in nvme_rdma_alloc_tagset()
816 set = &ctrl->tag_set; in nvme_rdma_alloc_tagset()
828 set->driver_data = ctrl; in nvme_rdma_alloc_tagset()
841 static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl, in nvme_rdma_destroy_admin_queue() argument
845 blk_cleanup_queue(ctrl->ctrl.admin_q); in nvme_rdma_destroy_admin_queue()
846 blk_cleanup_queue(ctrl->ctrl.fabrics_q); in nvme_rdma_destroy_admin_queue()
847 blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); in nvme_rdma_destroy_admin_queue()
849 if (ctrl->async_event_sqe.data) { in nvme_rdma_destroy_admin_queue()
850 cancel_work_sync(&ctrl->ctrl.async_event_work); in nvme_rdma_destroy_admin_queue()
851 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, in nvme_rdma_destroy_admin_queue()
853 ctrl->async_event_sqe.data = NULL; in nvme_rdma_destroy_admin_queue()
855 nvme_rdma_free_queue(&ctrl->queues[0]); in nvme_rdma_destroy_admin_queue()
858 static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl, in nvme_rdma_configure_admin_queue() argument
864 error = nvme_rdma_alloc_queue(ctrl, 0, NVME_AQ_DEPTH); in nvme_rdma_configure_admin_queue()
868 ctrl->device = ctrl->queues[0].device; in nvme_rdma_configure_admin_queue()
869 ctrl->ctrl.numa_node = ibdev_to_node(ctrl->device->dev); in nvme_rdma_configure_admin_queue()
872 if (ctrl->device->dev->attrs.device_cap_flags & in nvme_rdma_configure_admin_queue()
876 ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev, in nvme_rdma_configure_admin_queue()
884 error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe, in nvme_rdma_configure_admin_queue()
890 ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true); in nvme_rdma_configure_admin_queue()
891 if (IS_ERR(ctrl->ctrl.admin_tagset)) { in nvme_rdma_configure_admin_queue()
892 error = PTR_ERR(ctrl->ctrl.admin_tagset); in nvme_rdma_configure_admin_queue()
896 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set); in nvme_rdma_configure_admin_queue()
897 if (IS_ERR(ctrl->ctrl.fabrics_q)) { in nvme_rdma_configure_admin_queue()
898 error = PTR_ERR(ctrl->ctrl.fabrics_q); in nvme_rdma_configure_admin_queue()
902 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); in nvme_rdma_configure_admin_queue()
903 if (IS_ERR(ctrl->ctrl.admin_q)) { in nvme_rdma_configure_admin_queue()
904 error = PTR_ERR(ctrl->ctrl.admin_q); in nvme_rdma_configure_admin_queue()
909 error = nvme_rdma_start_queue(ctrl, 0); in nvme_rdma_configure_admin_queue()
913 error = nvme_enable_ctrl(&ctrl->ctrl); in nvme_rdma_configure_admin_queue()
917 ctrl->ctrl.max_segments = ctrl->max_fr_pages; in nvme_rdma_configure_admin_queue()
918 ctrl->ctrl.max_hw_sectors = ctrl->max_fr_pages << (ilog2(SZ_4K) - 9); in nvme_rdma_configure_admin_queue()
920 ctrl->ctrl.max_integrity_segments = ctrl->max_fr_pages; in nvme_rdma_configure_admin_queue()
922 ctrl->ctrl.max_integrity_segments = 0; in nvme_rdma_configure_admin_queue()
924 nvme_start_admin_queue(&ctrl->ctrl); in nvme_rdma_configure_admin_queue()
926 error = nvme_init_identify(&ctrl->ctrl); in nvme_rdma_configure_admin_queue()
933 nvme_stop_admin_queue(&ctrl->ctrl); in nvme_rdma_configure_admin_queue()
934 blk_sync_queue(ctrl->ctrl.admin_q); in nvme_rdma_configure_admin_queue()
936 nvme_rdma_stop_queue(&ctrl->queues[0]); in nvme_rdma_configure_admin_queue()
937 nvme_cancel_admin_tagset(&ctrl->ctrl); in nvme_rdma_configure_admin_queue()
940 blk_cleanup_queue(ctrl->ctrl.admin_q); in nvme_rdma_configure_admin_queue()
943 blk_cleanup_queue(ctrl->ctrl.fabrics_q); in nvme_rdma_configure_admin_queue()
946 blk_mq_free_tag_set(ctrl->ctrl.admin_tagset); in nvme_rdma_configure_admin_queue()
948 if (ctrl->async_event_sqe.data) { in nvme_rdma_configure_admin_queue()
949 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe, in nvme_rdma_configure_admin_queue()
951 ctrl->async_event_sqe.data = NULL; in nvme_rdma_configure_admin_queue()
954 nvme_rdma_free_queue(&ctrl->queues[0]); in nvme_rdma_configure_admin_queue()
958 static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl, in nvme_rdma_destroy_io_queues() argument
962 blk_cleanup_queue(ctrl->ctrl.connect_q); in nvme_rdma_destroy_io_queues()
963 blk_mq_free_tag_set(ctrl->ctrl.tagset); in nvme_rdma_destroy_io_queues()
965 nvme_rdma_free_io_queues(ctrl); in nvme_rdma_destroy_io_queues()
968 static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) in nvme_rdma_configure_io_queues() argument
972 ret = nvme_rdma_alloc_io_queues(ctrl); in nvme_rdma_configure_io_queues()
977 ctrl->ctrl.tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, false); in nvme_rdma_configure_io_queues()
978 if (IS_ERR(ctrl->ctrl.tagset)) { in nvme_rdma_configure_io_queues()
979 ret = PTR_ERR(ctrl->ctrl.tagset); in nvme_rdma_configure_io_queues()
983 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); in nvme_rdma_configure_io_queues()
984 if (IS_ERR(ctrl->ctrl.connect_q)) { in nvme_rdma_configure_io_queues()
985 ret = PTR_ERR(ctrl->ctrl.connect_q); in nvme_rdma_configure_io_queues()
990 ret = nvme_rdma_start_io_queues(ctrl); in nvme_rdma_configure_io_queues()
995 nvme_start_freeze(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
996 nvme_start_queues(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
997 if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) { in nvme_rdma_configure_io_queues()
1004 nvme_unfreeze(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
1007 blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset, in nvme_rdma_configure_io_queues()
1008 ctrl->ctrl.queue_count - 1); in nvme_rdma_configure_io_queues()
1009 nvme_unfreeze(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
1015 nvme_stop_queues(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
1016 nvme_sync_io_queues(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
1017 nvme_rdma_stop_io_queues(ctrl); in nvme_rdma_configure_io_queues()
1019 nvme_cancel_tagset(&ctrl->ctrl); in nvme_rdma_configure_io_queues()
1021 blk_cleanup_queue(ctrl->ctrl.connect_q); in nvme_rdma_configure_io_queues()
1024 blk_mq_free_tag_set(ctrl->ctrl.tagset); in nvme_rdma_configure_io_queues()
1026 nvme_rdma_free_io_queues(ctrl); in nvme_rdma_configure_io_queues()
1030 static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl, in nvme_rdma_teardown_admin_queue() argument
1033 nvme_stop_admin_queue(&ctrl->ctrl); in nvme_rdma_teardown_admin_queue()
1034 blk_sync_queue(ctrl->ctrl.admin_q); in nvme_rdma_teardown_admin_queue()
1035 nvme_rdma_stop_queue(&ctrl->queues[0]); in nvme_rdma_teardown_admin_queue()
1036 if (ctrl->ctrl.admin_tagset) { in nvme_rdma_teardown_admin_queue()
1037 blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset, in nvme_rdma_teardown_admin_queue()
1038 nvme_cancel_request, &ctrl->ctrl); in nvme_rdma_teardown_admin_queue()
1039 blk_mq_tagset_wait_completed_request(ctrl->ctrl.admin_tagset); in nvme_rdma_teardown_admin_queue()
1042 nvme_start_admin_queue(&ctrl->ctrl); in nvme_rdma_teardown_admin_queue()
1043 nvme_rdma_destroy_admin_queue(ctrl, remove); in nvme_rdma_teardown_admin_queue()
1046 static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, in nvme_rdma_teardown_io_queues() argument
1049 if (ctrl->ctrl.queue_count > 1) { in nvme_rdma_teardown_io_queues()
1050 nvme_stop_queues(&ctrl->ctrl); in nvme_rdma_teardown_io_queues()
1051 nvme_sync_io_queues(&ctrl->ctrl); in nvme_rdma_teardown_io_queues()
1052 nvme_rdma_stop_io_queues(ctrl); in nvme_rdma_teardown_io_queues()
1053 if (ctrl->ctrl.tagset) { in nvme_rdma_teardown_io_queues()
1054 blk_mq_tagset_busy_iter(ctrl->ctrl.tagset, in nvme_rdma_teardown_io_queues()
1055 nvme_cancel_request, &ctrl->ctrl); in nvme_rdma_teardown_io_queues()
1056 blk_mq_tagset_wait_completed_request(ctrl->ctrl.tagset); in nvme_rdma_teardown_io_queues()
1059 nvme_start_queues(&ctrl->ctrl); in nvme_rdma_teardown_io_queues()
1060 nvme_rdma_destroy_io_queues(ctrl, remove); in nvme_rdma_teardown_io_queues()
1066 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); in nvme_rdma_stop_ctrl() local
1068 cancel_work_sync(&ctrl->err_work); in nvme_rdma_stop_ctrl()
1069 cancel_delayed_work_sync(&ctrl->reconnect_work); in nvme_rdma_stop_ctrl()
1074 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(nctrl); in nvme_rdma_free_ctrl() local
1076 if (list_empty(&ctrl->list)) in nvme_rdma_free_ctrl()
1080 list_del(&ctrl->list); in nvme_rdma_free_ctrl()
1085 kfree(ctrl->queues); in nvme_rdma_free_ctrl()
1086 kfree(ctrl); in nvme_rdma_free_ctrl()
1089 static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl) in nvme_rdma_reconnect_or_remove() argument
1092 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) { in nvme_rdma_reconnect_or_remove()
1093 WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW || in nvme_rdma_reconnect_or_remove()
1094 ctrl->ctrl.state == NVME_CTRL_LIVE); in nvme_rdma_reconnect_or_remove()
1098 if (nvmf_should_reconnect(&ctrl->ctrl)) { in nvme_rdma_reconnect_or_remove()
1099 dev_info(ctrl->ctrl.device, "Reconnecting in %d seconds...\n", in nvme_rdma_reconnect_or_remove()
1100 ctrl->ctrl.opts->reconnect_delay); in nvme_rdma_reconnect_or_remove()
1101 queue_delayed_work(nvme_wq, &ctrl->reconnect_work, in nvme_rdma_reconnect_or_remove()
1102 ctrl->ctrl.opts->reconnect_delay * HZ); in nvme_rdma_reconnect_or_remove()
1104 nvme_delete_ctrl(&ctrl->ctrl); in nvme_rdma_reconnect_or_remove()
1108 static int nvme_rdma_setup_ctrl(struct nvme_rdma_ctrl *ctrl, bool new) in nvme_rdma_setup_ctrl() argument
1113 ret = nvme_rdma_configure_admin_queue(ctrl, new); in nvme_rdma_setup_ctrl()
1117 if (ctrl->ctrl.icdoff) { in nvme_rdma_setup_ctrl()
1119 dev_err(ctrl->ctrl.device, "icdoff is not supported!\n"); in nvme_rdma_setup_ctrl()
1123 if (!(ctrl->ctrl.sgls & (1 << 2))) { in nvme_rdma_setup_ctrl()
1125 dev_err(ctrl->ctrl.device, in nvme_rdma_setup_ctrl()
1130 if (ctrl->ctrl.opts->queue_size > ctrl->ctrl.sqsize + 1) { in nvme_rdma_setup_ctrl()
1131 dev_warn(ctrl->ctrl.device, in nvme_rdma_setup_ctrl()
1132 "queue_size %zu > ctrl sqsize %u, clamping down\n", in nvme_rdma_setup_ctrl()
1133 ctrl->ctrl.opts->queue_size, ctrl->ctrl.sqsize + 1); in nvme_rdma_setup_ctrl()
1136 if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) { in nvme_rdma_setup_ctrl()
1137 dev_warn(ctrl->ctrl.device, in nvme_rdma_setup_ctrl()
1138 "sqsize %u > ctrl maxcmd %u, clamping down\n", in nvme_rdma_setup_ctrl()
1139 ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd); in nvme_rdma_setup_ctrl()
1140 ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1; in nvme_rdma_setup_ctrl()
1143 if (ctrl->ctrl.sgls & (1 << 20)) in nvme_rdma_setup_ctrl()
1144 ctrl->use_inline_data = true; in nvme_rdma_setup_ctrl()
1146 if (ctrl->ctrl.queue_count > 1) { in nvme_rdma_setup_ctrl()
1147 ret = nvme_rdma_configure_io_queues(ctrl, new); in nvme_rdma_setup_ctrl()
1152 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); in nvme_rdma_setup_ctrl()
1155 * state change failure is ok if we started ctrl delete, in nvme_rdma_setup_ctrl()
1159 WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING && in nvme_rdma_setup_ctrl()
1160 ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO); in nvme_rdma_setup_ctrl()
1166 nvme_start_ctrl(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1170 if (ctrl->ctrl.queue_count > 1) { in nvme_rdma_setup_ctrl()
1171 nvme_stop_queues(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1172 nvme_sync_io_queues(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1173 nvme_rdma_stop_io_queues(ctrl); in nvme_rdma_setup_ctrl()
1174 nvme_cancel_tagset(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1175 nvme_rdma_destroy_io_queues(ctrl, new); in nvme_rdma_setup_ctrl()
1178 nvme_stop_admin_queue(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1179 blk_sync_queue(ctrl->ctrl.admin_q); in nvme_rdma_setup_ctrl()
1180 nvme_rdma_stop_queue(&ctrl->queues[0]); in nvme_rdma_setup_ctrl()
1181 nvme_cancel_admin_tagset(&ctrl->ctrl); in nvme_rdma_setup_ctrl()
1182 nvme_rdma_destroy_admin_queue(ctrl, new); in nvme_rdma_setup_ctrl()
1188 struct nvme_rdma_ctrl *ctrl = container_of(to_delayed_work(work), in nvme_rdma_reconnect_ctrl_work() local
1191 ++ctrl->ctrl.nr_reconnects; in nvme_rdma_reconnect_ctrl_work()
1193 if (nvme_rdma_setup_ctrl(ctrl, false)) in nvme_rdma_reconnect_ctrl_work()
1196 dev_info(ctrl->ctrl.device, "Successfully reconnected (%d attempts)\n", in nvme_rdma_reconnect_ctrl_work()
1197 ctrl->ctrl.nr_reconnects); in nvme_rdma_reconnect_ctrl_work()
1199 ctrl->ctrl.nr_reconnects = 0; in nvme_rdma_reconnect_ctrl_work()
1204 dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n", in nvme_rdma_reconnect_ctrl_work()
1205 ctrl->ctrl.nr_reconnects); in nvme_rdma_reconnect_ctrl_work()
1206 nvme_rdma_reconnect_or_remove(ctrl); in nvme_rdma_reconnect_ctrl_work()
1211 struct nvme_rdma_ctrl *ctrl = container_of(work, in nvme_rdma_error_recovery_work() local
1214 nvme_stop_keep_alive(&ctrl->ctrl); in nvme_rdma_error_recovery_work()
1215 flush_work(&ctrl->ctrl.async_event_work); in nvme_rdma_error_recovery_work()
1216 nvme_rdma_teardown_io_queues(ctrl, false); in nvme_rdma_error_recovery_work()
1217 nvme_start_queues(&ctrl->ctrl); in nvme_rdma_error_recovery_work()
1218 nvme_rdma_teardown_admin_queue(ctrl, false); in nvme_rdma_error_recovery_work()
1219 nvme_start_admin_queue(&ctrl->ctrl); in nvme_rdma_error_recovery_work()
1221 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_rdma_error_recovery_work()
1222 /* state change failure is ok if we started ctrl delete */ in nvme_rdma_error_recovery_work()
1223 WARN_ON_ONCE(ctrl->ctrl.state != NVME_CTRL_DELETING && in nvme_rdma_error_recovery_work()
1224 ctrl->ctrl.state != NVME_CTRL_DELETING_NOIO); in nvme_rdma_error_recovery_work()
1228 nvme_rdma_reconnect_or_remove(ctrl); in nvme_rdma_error_recovery_work()
1231 static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl) in nvme_rdma_error_recovery() argument
1233 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) in nvme_rdma_error_recovery()
1236 dev_warn(ctrl->ctrl.device, "starting error recovery\n"); in nvme_rdma_error_recovery()
1237 queue_work(nvme_reset_wq, &ctrl->err_work); in nvme_rdma_error_recovery()
1254 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_wr_error() local
1256 if (ctrl->ctrl.state == NVME_CTRL_LIVE) in nvme_rdma_wr_error()
1257 dev_info(ctrl->ctrl.device, in nvme_rdma_wr_error()
1261 nvme_rdma_error_recovery(ctrl); in nvme_rdma_wr_error()
1358 sg->addr = cpu_to_le64(queue->ctrl->ctrl.icdoff); in nvme_rdma_map_sg_inline()
1390 * Align the MR to a 4K page size to match the ctrl page size and in nvme_rdma_map_sg_fr()
1602 queue->ctrl->use_inline_data && in nvme_rdma_map_data()
1676 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_post_send()
1702 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_post_recv()
1713 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_rdma_tagset()
1714 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_rdma_tagset()
1725 struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg); in nvme_rdma_submit_async_event() local
1726 struct nvme_rdma_queue *queue = &ctrl->queues[0]; in nvme_rdma_submit_async_event()
1728 struct nvme_rdma_qe *sqe = &ctrl->async_event_sqe; in nvme_rdma_submit_async_event()
1758 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1761 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1772 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1775 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1782 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_process_nvme_rsp()
1785 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_process_nvme_rsp()
1810 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_recv_done()
1812 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_recv_done()
1825 nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status, in nvme_rdma_recv_done()
1862 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_conn_rejected()
1866 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_conn_rejected()
1875 struct nvme_ctrl *ctrl = &queue->ctrl->ctrl; in nvme_rdma_addr_resolved() local
1882 if (ctrl->opts->tos >= 0) in nvme_rdma_addr_resolved()
1883 rdma_set_service_type(queue->cm_id, ctrl->opts->tos); in nvme_rdma_addr_resolved()
1886 dev_err(ctrl->device, "rdma_resolve_route failed (%d).\n", in nvme_rdma_addr_resolved()
1900 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_route_resolved() local
1931 priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize); in nvme_rdma_route_resolved()
1936 dev_err(ctrl->ctrl.device, in nvme_rdma_route_resolved()
1950 dev_dbg(queue->ctrl->ctrl.device, "%s (%d): status %d id %p\n", in nvme_rdma_cm_handler()
1973 dev_dbg(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1980 dev_dbg(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1982 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_cm_handler()
1988 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_cm_handler()
1990 nvme_rdma_error_recovery(queue->ctrl); in nvme_rdma_cm_handler()
2019 struct nvme_rdma_ctrl *ctrl = queue->ctrl; in nvme_rdma_timeout() local
2021 dev_warn(ctrl->ctrl.device, "I/O %d QID %d timeout\n", in nvme_rdma_timeout()
2024 if (ctrl->ctrl.state != NVME_CTRL_LIVE) { in nvme_rdma_timeout()
2029 * - ctrl disable/shutdown fabrics requests in nvme_rdma_timeout()
2046 nvme_rdma_error_recovery(ctrl); in nvme_rdma_timeout()
2066 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_rdma_queue_rq()
2067 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_rdma_queue_rq()
2098 dev_err(queue->ctrl->ctrl.device, in nvme_rdma_queue_rq()
2184 struct nvme_rdma_ctrl *ctrl = set->driver_data; in nvme_rdma_map_queues() local
2185 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_rdma_map_queues()
2187 if (opts->nr_write_queues && ctrl->io_queues[HCTX_TYPE_READ]) { in nvme_rdma_map_queues()
2190 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_rdma_map_queues()
2193 ctrl->io_queues[HCTX_TYPE_READ]; in nvme_rdma_map_queues()
2195 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_rdma_map_queues()
2199 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_rdma_map_queues()
2202 ctrl->io_queues[HCTX_TYPE_DEFAULT]; in nvme_rdma_map_queues()
2206 ctrl->device->dev, 0); in nvme_rdma_map_queues()
2208 ctrl->device->dev, 0); in nvme_rdma_map_queues()
2210 if (opts->nr_poll_queues && ctrl->io_queues[HCTX_TYPE_POLL]) { in nvme_rdma_map_queues()
2213 ctrl->io_queues[HCTX_TYPE_POLL]; in nvme_rdma_map_queues()
2215 ctrl->io_queues[HCTX_TYPE_DEFAULT] + in nvme_rdma_map_queues()
2216 ctrl->io_queues[HCTX_TYPE_READ]; in nvme_rdma_map_queues()
2220 dev_info(ctrl->ctrl.device, in nvme_rdma_map_queues()
2222 ctrl->io_queues[HCTX_TYPE_DEFAULT], in nvme_rdma_map_queues()
2223 ctrl->io_queues[HCTX_TYPE_READ], in nvme_rdma_map_queues()
2224 ctrl->io_queues[HCTX_TYPE_POLL]); in nvme_rdma_map_queues()
2249 static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) in nvme_rdma_shutdown_ctrl() argument
2251 nvme_rdma_teardown_io_queues(ctrl, shutdown); in nvme_rdma_shutdown_ctrl()
2252 nvme_stop_admin_queue(&ctrl->ctrl); in nvme_rdma_shutdown_ctrl()
2254 nvme_shutdown_ctrl(&ctrl->ctrl); in nvme_rdma_shutdown_ctrl()
2256 nvme_disable_ctrl(&ctrl->ctrl); in nvme_rdma_shutdown_ctrl()
2257 nvme_rdma_teardown_admin_queue(ctrl, shutdown); in nvme_rdma_shutdown_ctrl()
2260 static void nvme_rdma_delete_ctrl(struct nvme_ctrl *ctrl) in nvme_rdma_delete_ctrl() argument
2262 nvme_rdma_shutdown_ctrl(to_rdma_ctrl(ctrl), true); in nvme_rdma_delete_ctrl()
2267 struct nvme_rdma_ctrl *ctrl = in nvme_rdma_reset_ctrl_work() local
2268 container_of(work, struct nvme_rdma_ctrl, ctrl.reset_work); in nvme_rdma_reset_ctrl_work()
2270 nvme_stop_ctrl(&ctrl->ctrl); in nvme_rdma_reset_ctrl_work()
2271 nvme_rdma_shutdown_ctrl(ctrl, false); in nvme_rdma_reset_ctrl_work()
2273 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_rdma_reset_ctrl_work()
2279 if (nvme_rdma_setup_ctrl(ctrl, false)) in nvme_rdma_reset_ctrl_work()
2285 ++ctrl->ctrl.nr_reconnects; in nvme_rdma_reset_ctrl_work()
2286 nvme_rdma_reconnect_or_remove(ctrl); in nvme_rdma_reset_ctrl_work()
2318 struct nvme_rdma_ctrl *ctrl; in nvme_rdma_existing_controller() local
2322 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) { in nvme_rdma_existing_controller()
2323 found = nvmf_ip_options_match(&ctrl->ctrl, opts); in nvme_rdma_existing_controller()
2335 struct nvme_rdma_ctrl *ctrl; in nvme_rdma_create_ctrl() local
2339 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); in nvme_rdma_create_ctrl()
2340 if (!ctrl) in nvme_rdma_create_ctrl()
2342 ctrl->ctrl.opts = opts; in nvme_rdma_create_ctrl()
2343 INIT_LIST_HEAD(&ctrl->list); in nvme_rdma_create_ctrl()
2356 opts->traddr, opts->trsvcid, &ctrl->addr); in nvme_rdma_create_ctrl()
2365 opts->host_traddr, NULL, &ctrl->src_addr); in nvme_rdma_create_ctrl()
2378 INIT_DELAYED_WORK(&ctrl->reconnect_work, in nvme_rdma_create_ctrl()
2380 INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work); in nvme_rdma_create_ctrl()
2381 INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work); in nvme_rdma_create_ctrl()
2383 ctrl->ctrl.queue_count = opts->nr_io_queues + opts->nr_write_queues + in nvme_rdma_create_ctrl()
2385 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_rdma_create_ctrl()
2386 ctrl->ctrl.kato = opts->kato; in nvme_rdma_create_ctrl()
2389 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), in nvme_rdma_create_ctrl()
2391 if (!ctrl->queues) in nvme_rdma_create_ctrl()
2394 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops, in nvme_rdma_create_ctrl()
2399 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING); in nvme_rdma_create_ctrl()
2402 ret = nvme_rdma_setup_ctrl(ctrl, true); in nvme_rdma_create_ctrl()
2406 dev_info(ctrl->ctrl.device, "new ctrl: NQN \"%s\", addr %pISpcs\n", in nvme_rdma_create_ctrl()
2407 ctrl->ctrl.opts->subsysnqn, &ctrl->addr); in nvme_rdma_create_ctrl()
2410 list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); in nvme_rdma_create_ctrl()
2413 return &ctrl->ctrl; in nvme_rdma_create_ctrl()
2416 nvme_uninit_ctrl(&ctrl->ctrl); in nvme_rdma_create_ctrl()
2417 nvme_put_ctrl(&ctrl->ctrl); in nvme_rdma_create_ctrl()
2422 kfree(ctrl->queues); in nvme_rdma_create_ctrl()
2424 kfree(ctrl); in nvme_rdma_create_ctrl()
2441 struct nvme_rdma_ctrl *ctrl; in nvme_rdma_remove_one() local
2459 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) { in nvme_rdma_remove_one()
2460 if (ctrl->device->dev != ib_device) in nvme_rdma_remove_one()
2462 nvme_delete_ctrl(&ctrl->ctrl); in nvme_rdma_remove_one()
2495 struct nvme_rdma_ctrl *ctrl; in nvme_rdma_cleanup_module() local
2501 list_for_each_entry(ctrl, &nvme_rdma_ctrl_list, list) in nvme_rdma_cleanup_module()
2502 nvme_delete_ctrl(&ctrl->ctrl); in nvme_rdma_cleanup_module()