Lines Matching refs:ctrl
35 struct nvme_fc_ctrl *ctrl; member
99 struct nvme_fc_ctrl *ctrl; member
179 struct nvme_ctrl ctrl; member
183 to_fc_ctrl(struct nvme_ctrl *ctrl) in to_fc_ctrl() argument
185 return container_of(ctrl, struct nvme_fc_ctrl, ctrl); in to_fc_ctrl()
557 nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl) in nvme_fc_resume_controller() argument
559 switch (ctrl->ctrl.state) { in nvme_fc_resume_controller()
566 dev_info(ctrl->ctrl.device, in nvme_fc_resume_controller()
568 "Attempting reconnect\n", ctrl->cnum); in nvme_fc_resume_controller()
570 queue_delayed_work(nvme_wq, &ctrl->connect_work, 0); in nvme_fc_resume_controller()
592 struct nvme_fc_ctrl *ctrl; in nvme_fc_attach_to_suspended_rport() local
628 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) in nvme_fc_attach_to_suspended_rport()
629 nvme_fc_resume_controller(ctrl); in nvme_fc_attach_to_suspended_rport()
789 nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl) in nvme_fc_ctrl_connectivity_loss() argument
791 dev_info(ctrl->ctrl.device, in nvme_fc_ctrl_connectivity_loss()
793 "Reconnect", ctrl->cnum); in nvme_fc_ctrl_connectivity_loss()
795 switch (ctrl->ctrl.state) { in nvme_fc_ctrl_connectivity_loss()
805 if (nvme_reset_ctrl(&ctrl->ctrl)) { in nvme_fc_ctrl_connectivity_loss()
806 dev_warn(ctrl->ctrl.device, in nvme_fc_ctrl_connectivity_loss()
808 ctrl->cnum); in nvme_fc_ctrl_connectivity_loss()
809 nvme_delete_ctrl(&ctrl->ctrl); in nvme_fc_ctrl_connectivity_loss()
855 struct nvme_fc_ctrl *ctrl; in nvme_fc_unregister_remoteport() local
871 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { in nvme_fc_unregister_remoteport()
874 dev_warn(ctrl->ctrl.device, in nvme_fc_unregister_remoteport()
876 ctrl->cnum); in nvme_fc_unregister_remoteport()
877 nvme_delete_ctrl(&ctrl->ctrl); in nvme_fc_unregister_remoteport()
879 nvme_fc_ctrl_connectivity_loss(ctrl); in nvme_fc_unregister_remoteport()
1034 static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
1176 nvme_fc_connect_admin_queue(struct nvme_fc_ctrl *ctrl, in nvme_fc_connect_admin_queue() argument
1188 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); in nvme_fc_connect_admin_queue()
1190 dev_info(ctrl->ctrl.device, in nvme_fc_connect_admin_queue()
1192 ctrl->cnum); in nvme_fc_connect_admin_queue()
1200 if (ctrl->lport->ops->lsrqst_priv_sz) in nvme_fc_connect_admin_queue()
1219 uuid_copy(&assoc_rqst->assoc_cmd.hostid, &ctrl->ctrl.opts->host->id); in nvme_fc_connect_admin_queue()
1220 strncpy(assoc_rqst->assoc_cmd.hostnqn, ctrl->ctrl.opts->host->nqn, in nvme_fc_connect_admin_queue()
1222 strncpy(assoc_rqst->assoc_cmd.subnqn, ctrl->ctrl.opts->subsysnqn, in nvme_fc_connect_admin_queue()
1232 ret = nvme_fc_send_ls_req(ctrl->rport, lsop); in nvme_fc_connect_admin_queue()
1269 dev_err(ctrl->dev, in nvme_fc_connect_admin_queue()
1273 spin_lock_irqsave(&ctrl->lock, flags); in nvme_fc_connect_admin_queue()
1274 ctrl->association_id = in nvme_fc_connect_admin_queue()
1279 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_connect_admin_queue()
1286 dev_err(ctrl->dev, in nvme_fc_connect_admin_queue()
1293 nvme_fc_connect_queue(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, in nvme_fc_connect_queue() argument
1304 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); in nvme_fc_connect_queue()
1306 dev_info(ctrl->ctrl.device, in nvme_fc_connect_queue()
1308 ctrl->cnum); in nvme_fc_connect_queue()
1316 if (ctrl->lport->ops->lsrqst_priv_sz) in nvme_fc_connect_queue()
1330 conn_rqst->associd.association_id = cpu_to_be64(ctrl->association_id); in nvme_fc_connect_queue()
1347 ret = nvme_fc_send_ls_req(ctrl->rport, lsop); in nvme_fc_connect_queue()
1375 dev_err(ctrl->dev, in nvme_fc_connect_queue()
1388 dev_err(ctrl->dev, in nvme_fc_connect_queue()
1424 nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl) in nvme_fc_xmt_disconnect_assoc() argument
1434 ctrl->lport->ops->lsrqst_priv_sz), GFP_KERNEL); in nvme_fc_xmt_disconnect_assoc()
1436 dev_info(ctrl->ctrl.device, in nvme_fc_xmt_disconnect_assoc()
1439 ctrl->cnum); in nvme_fc_xmt_disconnect_assoc()
1446 if (ctrl->lport->ops->lsrqst_priv_sz) in nvme_fc_xmt_disconnect_assoc()
1452 ctrl->association_id); in nvme_fc_xmt_disconnect_assoc()
1454 ret = nvme_fc_send_ls_req_async(ctrl->rport, lsop, in nvme_fc_xmt_disconnect_assoc()
1510 struct nvme_fc_ctrl *ctrl, *ret = NULL; in nvme_fc_match_disconn_ls() local
1517 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { in nvme_fc_match_disconn_ls()
1518 if (!nvme_fc_ctrl_get(ctrl)) in nvme_fc_match_disconn_ls()
1520 spin_lock(&ctrl->lock); in nvme_fc_match_disconn_ls()
1521 if (association_id == ctrl->association_id) { in nvme_fc_match_disconn_ls()
1522 oldls = ctrl->rcv_disconn; in nvme_fc_match_disconn_ls()
1523 ctrl->rcv_disconn = lsop; in nvme_fc_match_disconn_ls()
1524 ret = ctrl; in nvme_fc_match_disconn_ls()
1526 spin_unlock(&ctrl->lock); in nvme_fc_match_disconn_ls()
1530 nvme_fc_ctrl_put(ctrl); in nvme_fc_match_disconn_ls()
1539 "LS's received\n", ctrl->cnum); in nvme_fc_match_disconn_ls()
1565 struct nvme_fc_ctrl *ctrl = NULL; in nvme_fc_ls_disconnect_assoc() local
1573 ctrl = nvme_fc_match_disconn_ls(rport, lsop); in nvme_fc_ls_disconnect_assoc()
1574 if (!ctrl) in nvme_fc_ls_disconnect_assoc()
1607 nvme_fc_error_recovery(ctrl, "Disconnect Association LS received"); in nvme_fc_ls_disconnect_assoc()
1610 nvme_fc_ctrl_put(ctrl); in nvme_fc_ls_disconnect_assoc()
1814 __nvme_fc_exit_request(struct nvme_fc_ctrl *ctrl, in __nvme_fc_exit_request() argument
1817 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.rspdma, in __nvme_fc_exit_request()
1819 fc_dma_unmap_single(ctrl->lport->dev, op->fcp_req.cmddma, in __nvme_fc_exit_request()
1835 __nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op) in __nvme_fc_abort_op() argument
1840 spin_lock_irqsave(&ctrl->lock, flags); in __nvme_fc_abort_op()
1844 else if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) { in __nvme_fc_abort_op()
1846 ctrl->iocnt++; in __nvme_fc_abort_op()
1848 spin_unlock_irqrestore(&ctrl->lock, flags); in __nvme_fc_abort_op()
1853 ctrl->lport->ops->fcp_abort(&ctrl->lport->localport, in __nvme_fc_abort_op()
1854 &ctrl->rport->remoteport, in __nvme_fc_abort_op()
1862 nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl) in nvme_fc_abort_aen_ops() argument
1864 struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops; in nvme_fc_abort_aen_ops()
1872 __nvme_fc_abort_op(ctrl, aen_op); in nvme_fc_abort_aen_ops()
1876 __nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl, in __nvme_fc_fcpop_chk_teardowns() argument
1882 spin_lock_irqsave(&ctrl->lock, flags); in __nvme_fc_fcpop_chk_teardowns()
1883 if (test_bit(FCCTRL_TERMIO, &ctrl->flags) && in __nvme_fc_fcpop_chk_teardowns()
1885 if (!--ctrl->iocnt) in __nvme_fc_fcpop_chk_teardowns()
1886 wake_up(&ctrl->ioabort_wait); in __nvme_fc_fcpop_chk_teardowns()
1888 spin_unlock_irqrestore(&ctrl->lock, flags); in __nvme_fc_fcpop_chk_teardowns()
1895 struct nvme_fc_ctrl *ctrl = in nvme_fc_ctrl_ioerr_work() local
1898 nvme_fc_error_recovery(ctrl, "transport detected io error"); in nvme_fc_ctrl_ioerr_work()
1907 struct nvme_fc_ctrl *ctrl = op->ctrl; in nvme_fc_fcpio_done() local
1955 fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma, in nvme_fc_fcpio_done()
1962 dev_info(ctrl->ctrl.device, in nvme_fc_fcpio_done()
1964 ctrl->cnum, freq->status); in nvme_fc_fcpio_done()
1994 dev_info(ctrl->ctrl.device, in nvme_fc_fcpio_done()
1997 ctrl->cnum, freq->transferred_length, in nvme_fc_fcpio_done()
2016 dev_info(ctrl->ctrl.device, in nvme_fc_fcpio_done()
2020 ctrl->cnum, be16_to_cpu(op->rsp_iu.iu_len), in nvme_fc_fcpio_done()
2034 dev_info(ctrl->ctrl.device, in nvme_fc_fcpio_done()
2037 ctrl->cnum, freq->rcv_rsplen); in nvme_fc_fcpio_done()
2045 nvme_complete_async_event(&queue->ctrl->ctrl, status, &result); in nvme_fc_fcpio_done()
2046 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); in nvme_fc_fcpio_done()
2049 nvme_fc_ctrl_put(ctrl); in nvme_fc_fcpio_done()
2053 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); in nvme_fc_fcpio_done()
2058 if (terminate_assoc && ctrl->ctrl.state != NVME_CTRL_RESETTING) in nvme_fc_fcpio_done()
2059 queue_work(nvme_reset_wq, &ctrl->ioerr_work); in nvme_fc_fcpio_done()
2063 __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl, in __nvme_fc_init_request() argument
2078 op->ctrl = ctrl; in __nvme_fc_init_request()
2092 op->fcp_req.cmddma = fc_dma_map_single(ctrl->lport->dev, in __nvme_fc_init_request()
2094 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) { in __nvme_fc_init_request()
2095 dev_err(ctrl->dev, in __nvme_fc_init_request()
2101 op->fcp_req.rspdma = fc_dma_map_single(ctrl->lport->dev, in __nvme_fc_init_request()
2104 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) { in __nvme_fc_init_request()
2105 dev_err(ctrl->dev, in __nvme_fc_init_request()
2119 struct nvme_fc_ctrl *ctrl = set->driver_data; in nvme_fc_init_request() local
2121 int queue_idx = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0; in nvme_fc_init_request()
2122 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx]; in nvme_fc_init_request()
2125 res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++); in nvme_fc_init_request()
2130 nvme_req(rq)->ctrl = &ctrl->ctrl; in nvme_fc_init_request()
2135 nvme_fc_init_aen_ops(struct nvme_fc_ctrl *ctrl) in nvme_fc_init_aen_ops() argument
2143 aen_op = ctrl->aen_ops; in nvme_fc_init_aen_ops()
2145 if (ctrl->lport->ops->fcprqst_priv_sz) { in nvme_fc_init_aen_ops()
2146 private = kzalloc(ctrl->lport->ops->fcprqst_priv_sz, in nvme_fc_init_aen_ops()
2154 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0], in nvme_fc_init_aen_ops()
2174 nvme_fc_term_aen_ops(struct nvme_fc_ctrl *ctrl) in nvme_fc_term_aen_ops() argument
2179 cancel_work_sync(&ctrl->ctrl.async_event_work); in nvme_fc_term_aen_ops()
2180 aen_op = ctrl->aen_ops; in nvme_fc_term_aen_ops()
2182 __nvme_fc_exit_request(ctrl, aen_op); in nvme_fc_term_aen_ops()
2190 __nvme_fc_init_hctx(struct blk_mq_hw_ctx *hctx, struct nvme_fc_ctrl *ctrl, in __nvme_fc_init_hctx() argument
2193 struct nvme_fc_queue *queue = &ctrl->queues[qidx]; in __nvme_fc_init_hctx()
2203 struct nvme_fc_ctrl *ctrl = data; in nvme_fc_init_hctx() local
2205 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx + 1); in nvme_fc_init_hctx()
2214 struct nvme_fc_ctrl *ctrl = data; in nvme_fc_init_admin_hctx() local
2216 __nvme_fc_init_hctx(hctx, ctrl, hctx_idx); in nvme_fc_init_admin_hctx()
2222 nvme_fc_init_queue(struct nvme_fc_ctrl *ctrl, int idx) in nvme_fc_init_queue() argument
2226 queue = &ctrl->queues[idx]; in nvme_fc_init_queue()
2228 queue->ctrl = ctrl; in nvme_fc_init_queue()
2231 queue->dev = ctrl->dev; in nvme_fc_init_queue()
2234 queue->cmnd_capsule_len = ctrl->ctrl.ioccsz * 16; in nvme_fc_init_queue()
2276 __nvme_fc_delete_hw_queue(struct nvme_fc_ctrl *ctrl, in __nvme_fc_delete_hw_queue() argument
2279 if (ctrl->lport->ops->delete_queue) in __nvme_fc_delete_hw_queue()
2280 ctrl->lport->ops->delete_queue(&ctrl->lport->localport, qidx, in __nvme_fc_delete_hw_queue()
2286 nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl) in nvme_fc_free_io_queues() argument
2290 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_fc_free_io_queues()
2291 nvme_fc_free_queue(&ctrl->queues[i]); in nvme_fc_free_io_queues()
2295 __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl, in __nvme_fc_create_hw_queue() argument
2301 if (ctrl->lport->ops->create_queue) in __nvme_fc_create_hw_queue()
2302 ret = ctrl->lport->ops->create_queue(&ctrl->lport->localport, in __nvme_fc_create_hw_queue()
2309 nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl) in nvme_fc_delete_hw_io_queues() argument
2311 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1]; in nvme_fc_delete_hw_io_queues()
2314 for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--) in nvme_fc_delete_hw_io_queues()
2315 __nvme_fc_delete_hw_queue(ctrl, queue, i); in nvme_fc_delete_hw_io_queues()
2319 nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) in nvme_fc_create_hw_io_queues() argument
2321 struct nvme_fc_queue *queue = &ctrl->queues[1]; in nvme_fc_create_hw_io_queues()
2324 for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) { in nvme_fc_create_hw_io_queues()
2325 ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize); in nvme_fc_create_hw_io_queues()
2334 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i); in nvme_fc_create_hw_io_queues()
2339 nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) in nvme_fc_connect_io_queues() argument
2343 for (i = 1; i < ctrl->ctrl.queue_count; i++) { in nvme_fc_connect_io_queues()
2344 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize, in nvme_fc_connect_io_queues()
2348 ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false); in nvme_fc_connect_io_queues()
2352 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags); in nvme_fc_connect_io_queues()
2359 nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl) in nvme_fc_init_io_queues() argument
2363 for (i = 1; i < ctrl->ctrl.queue_count; i++) in nvme_fc_init_io_queues()
2364 nvme_fc_init_queue(ctrl, i); in nvme_fc_init_io_queues()
2370 struct nvme_fc_ctrl *ctrl = in nvme_fc_ctrl_free() local
2374 if (ctrl->ctrl.tagset) { in nvme_fc_ctrl_free()
2375 blk_cleanup_queue(ctrl->ctrl.connect_q); in nvme_fc_ctrl_free()
2376 blk_mq_free_tag_set(&ctrl->tag_set); in nvme_fc_ctrl_free()
2380 spin_lock_irqsave(&ctrl->rport->lock, flags); in nvme_fc_ctrl_free()
2381 list_del(&ctrl->ctrl_list); in nvme_fc_ctrl_free()
2382 spin_unlock_irqrestore(&ctrl->rport->lock, flags); in nvme_fc_ctrl_free()
2384 nvme_start_admin_queue(&ctrl->ctrl); in nvme_fc_ctrl_free()
2385 blk_cleanup_queue(ctrl->ctrl.admin_q); in nvme_fc_ctrl_free()
2386 blk_cleanup_queue(ctrl->ctrl.fabrics_q); in nvme_fc_ctrl_free()
2387 blk_mq_free_tag_set(&ctrl->admin_tag_set); in nvme_fc_ctrl_free()
2389 kfree(ctrl->queues); in nvme_fc_ctrl_free()
2391 put_device(ctrl->dev); in nvme_fc_ctrl_free()
2392 nvme_fc_rport_put(ctrl->rport); in nvme_fc_ctrl_free()
2394 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); in nvme_fc_ctrl_free()
2395 if (ctrl->ctrl.opts) in nvme_fc_ctrl_free()
2396 nvmf_free_options(ctrl->ctrl.opts); in nvme_fc_ctrl_free()
2397 kfree(ctrl); in nvme_fc_ctrl_free()
2401 nvme_fc_ctrl_put(struct nvme_fc_ctrl *ctrl) in nvme_fc_ctrl_put() argument
2403 kref_put(&ctrl->ref, nvme_fc_ctrl_free); in nvme_fc_ctrl_put()
2407 nvme_fc_ctrl_get(struct nvme_fc_ctrl *ctrl) in nvme_fc_ctrl_get() argument
2409 return kref_get_unless_zero(&ctrl->ref); in nvme_fc_ctrl_get()
2419 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); in nvme_fc_nvme_ctrl_freed() local
2421 WARN_ON(nctrl != &ctrl->ctrl); in nvme_fc_nvme_ctrl_freed()
2423 nvme_fc_ctrl_put(ctrl); in nvme_fc_nvme_ctrl_freed()
2443 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); in nvme_fc_terminate_exchange() local
2447 __nvme_fc_abort_op(ctrl, op); in nvme_fc_terminate_exchange()
2461 __nvme_fc_abort_outstanding_ios(struct nvme_fc_ctrl *ctrl, bool start_queues) in __nvme_fc_abort_outstanding_ios() argument
2469 if (ctrl->ctrl.queue_count > 1) { in __nvme_fc_abort_outstanding_ios()
2470 for (q = 1; q < ctrl->ctrl.queue_count; q++) in __nvme_fc_abort_outstanding_ios()
2471 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[q].flags); in __nvme_fc_abort_outstanding_ios()
2473 clear_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); in __nvme_fc_abort_outstanding_ios()
2487 if (ctrl->ctrl.queue_count > 1) { in __nvme_fc_abort_outstanding_ios()
2488 nvme_stop_queues(&ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2489 nvme_sync_io_queues(&ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2490 blk_mq_tagset_busy_iter(&ctrl->tag_set, in __nvme_fc_abort_outstanding_ios()
2491 nvme_fc_terminate_exchange, &ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2492 blk_mq_tagset_wait_completed_request(&ctrl->tag_set); in __nvme_fc_abort_outstanding_ios()
2494 nvme_start_queues(&ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2512 nvme_stop_admin_queue(&ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2513 blk_sync_queue(ctrl->ctrl.admin_q); in __nvme_fc_abort_outstanding_ios()
2514 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, in __nvme_fc_abort_outstanding_ios()
2515 nvme_fc_terminate_exchange, &ctrl->ctrl); in __nvme_fc_abort_outstanding_ios()
2516 blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set); in __nvme_fc_abort_outstanding_ios()
2520 nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg) in nvme_fc_error_recovery() argument
2529 if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) { in nvme_fc_error_recovery()
2530 __nvme_fc_abort_outstanding_ios(ctrl, true); in nvme_fc_error_recovery()
2531 set_bit(ASSOC_FAILED, &ctrl->flags); in nvme_fc_error_recovery()
2536 if (ctrl->ctrl.state != NVME_CTRL_LIVE) in nvme_fc_error_recovery()
2539 dev_warn(ctrl->ctrl.device, in nvme_fc_error_recovery()
2541 ctrl->cnum, errmsg); in nvme_fc_error_recovery()
2542 dev_warn(ctrl->ctrl.device, in nvme_fc_error_recovery()
2543 "NVME-FC{%d}: resetting controller\n", ctrl->cnum); in nvme_fc_error_recovery()
2545 nvme_reset_ctrl(&ctrl->ctrl); in nvme_fc_error_recovery()
2552 struct nvme_fc_ctrl *ctrl = op->ctrl; in nvme_fc_timeout() local
2560 dev_info(ctrl->ctrl.device, in nvme_fc_timeout()
2563 ctrl->cnum, op->queue->qnum, sqe->common.opcode, in nvme_fc_timeout()
2565 if (__nvme_fc_abort_op(ctrl, op)) in nvme_fc_timeout()
2566 nvme_fc_error_recovery(ctrl, "io timeout abort failed"); in nvme_fc_timeout()
2577 nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, in nvme_fc_map_data() argument
2597 freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, in nvme_fc_map_data()
2612 nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq, in nvme_fc_unmap_data() argument
2620 fc_dma_unmap_sg(ctrl->lport->dev, freq->sg_table.sgl, op->nents, in nvme_fc_unmap_data()
2652 nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, in nvme_fc_start_fcp_op() argument
2664 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) in nvme_fc_start_fcp_op()
2667 if (!nvme_fc_ctrl_get(ctrl)) in nvme_fc_start_fcp_op()
2711 ret = nvme_fc_map_data(ctrl, op->rq, op); in nvme_fc_start_fcp_op()
2714 nvme_fc_ctrl_put(ctrl); in nvme_fc_start_fcp_op()
2721 fc_dma_sync_single_for_device(ctrl->lport->dev, op->fcp_req.cmddma, in nvme_fc_start_fcp_op()
2730 ret = ctrl->lport->ops->fcp_io(&ctrl->lport->localport, in nvme_fc_start_fcp_op()
2731 &ctrl->rport->remoteport, in nvme_fc_start_fcp_op()
2748 __nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate); in nvme_fc_start_fcp_op()
2751 nvme_fc_unmap_data(ctrl, op->rq, op); in nvme_fc_start_fcp_op()
2755 nvme_fc_ctrl_put(ctrl); in nvme_fc_start_fcp_op()
2757 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE && in nvme_fc_start_fcp_op()
2773 struct nvme_fc_ctrl *ctrl = queue->ctrl; in nvme_fc_queue_rq() local
2783 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || in nvme_fc_queue_rq()
2784 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) in nvme_fc_queue_rq()
2785 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq); in nvme_fc_queue_rq()
2809 return nvme_fc_start_fcp_op(ctrl, queue, op, data_len, io_dir); in nvme_fc_queue_rq()
2815 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(arg); in nvme_fc_submit_async_event() local
2819 if (test_bit(FCCTRL_TERMIO, &ctrl->flags)) in nvme_fc_submit_async_event()
2822 aen_op = &ctrl->aen_ops[0]; in nvme_fc_submit_async_event()
2824 ret = nvme_fc_start_fcp_op(ctrl, aen_op->queue, aen_op, 0, in nvme_fc_submit_async_event()
2827 dev_err(ctrl->ctrl.device, in nvme_fc_submit_async_event()
2835 struct nvme_fc_ctrl *ctrl = op->ctrl; in nvme_fc_complete_rq() local
2840 nvme_fc_unmap_data(ctrl, rq, op); in nvme_fc_complete_rq()
2842 nvme_fc_ctrl_put(ctrl); in nvme_fc_complete_rq()
2856 nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) in nvme_fc_create_io_queues() argument
2858 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_fc_create_io_queues()
2863 ctrl->lport->ops->max_hw_queues); in nvme_fc_create_io_queues()
2864 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); in nvme_fc_create_io_queues()
2866 dev_info(ctrl->ctrl.device, in nvme_fc_create_io_queues()
2871 ctrl->ctrl.queue_count = nr_io_queues + 1; in nvme_fc_create_io_queues()
2875 nvme_fc_init_io_queues(ctrl); in nvme_fc_create_io_queues()
2877 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); in nvme_fc_create_io_queues()
2878 ctrl->tag_set.ops = &nvme_fc_mq_ops; in nvme_fc_create_io_queues()
2879 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; in nvme_fc_create_io_queues()
2880 ctrl->tag_set.reserved_tags = 1; /* fabric connect */ in nvme_fc_create_io_queues()
2881 ctrl->tag_set.numa_node = ctrl->ctrl.numa_node; in nvme_fc_create_io_queues()
2882 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in nvme_fc_create_io_queues()
2883 ctrl->tag_set.cmd_size = in nvme_fc_create_io_queues()
2885 ctrl->lport->ops->fcprqst_priv_sz); in nvme_fc_create_io_queues()
2886 ctrl->tag_set.driver_data = ctrl; in nvme_fc_create_io_queues()
2887 ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1; in nvme_fc_create_io_queues()
2888 ctrl->tag_set.timeout = NVME_IO_TIMEOUT; in nvme_fc_create_io_queues()
2890 ret = blk_mq_alloc_tag_set(&ctrl->tag_set); in nvme_fc_create_io_queues()
2894 ctrl->ctrl.tagset = &ctrl->tag_set; in nvme_fc_create_io_queues()
2896 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); in nvme_fc_create_io_queues()
2897 if (IS_ERR(ctrl->ctrl.connect_q)) { in nvme_fc_create_io_queues()
2898 ret = PTR_ERR(ctrl->ctrl.connect_q); in nvme_fc_create_io_queues()
2902 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_create_io_queues()
2906 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_create_io_queues()
2910 ctrl->ioq_live = true; in nvme_fc_create_io_queues()
2915 nvme_fc_delete_hw_io_queues(ctrl); in nvme_fc_create_io_queues()
2917 blk_cleanup_queue(ctrl->ctrl.connect_q); in nvme_fc_create_io_queues()
2919 blk_mq_free_tag_set(&ctrl->tag_set); in nvme_fc_create_io_queues()
2920 nvme_fc_free_io_queues(ctrl); in nvme_fc_create_io_queues()
2923 ctrl->ctrl.tagset = NULL; in nvme_fc_create_io_queues()
2929 nvme_fc_recreate_io_queues(struct nvme_fc_ctrl *ctrl) in nvme_fc_recreate_io_queues() argument
2931 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_fc_recreate_io_queues()
2932 u32 prior_ioq_cnt = ctrl->ctrl.queue_count - 1; in nvme_fc_recreate_io_queues()
2937 ctrl->lport->ops->max_hw_queues); in nvme_fc_recreate_io_queues()
2938 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); in nvme_fc_recreate_io_queues()
2940 dev_info(ctrl->ctrl.device, in nvme_fc_recreate_io_queues()
2946 dev_info(ctrl->ctrl.device, in nvme_fc_recreate_io_queues()
2952 ctrl->ctrl.queue_count = nr_io_queues + 1; in nvme_fc_recreate_io_queues()
2954 if (ctrl->ctrl.queue_count == 1) in nvme_fc_recreate_io_queues()
2958 dev_info(ctrl->ctrl.device, in nvme_fc_recreate_io_queues()
2961 nvme_wait_freeze(&ctrl->ctrl); in nvme_fc_recreate_io_queues()
2962 blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues); in nvme_fc_recreate_io_queues()
2963 nvme_unfreeze(&ctrl->ctrl); in nvme_fc_recreate_io_queues()
2966 ret = nvme_fc_create_hw_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_recreate_io_queues()
2970 ret = nvme_fc_connect_io_queues(ctrl, ctrl->ctrl.sqsize + 1); in nvme_fc_recreate_io_queues()
2977 nvme_fc_delete_hw_io_queues(ctrl); in nvme_fc_recreate_io_queues()
2979 nvme_fc_free_io_queues(ctrl); in nvme_fc_recreate_io_queues()
3003 nvme_fc_ctlr_active_on_rport(struct nvme_fc_ctrl *ctrl) in nvme_fc_ctlr_active_on_rport() argument
3005 struct nvme_fc_rport *rport = ctrl->rport; in nvme_fc_ctlr_active_on_rport()
3008 if (test_and_set_bit(ASSOC_ACTIVE, &ctrl->flags)) in nvme_fc_ctlr_active_on_rport()
3019 nvme_fc_ctlr_inactive_on_rport(struct nvme_fc_ctrl *ctrl) in nvme_fc_ctlr_inactive_on_rport() argument
3021 struct nvme_fc_rport *rport = ctrl->rport; in nvme_fc_ctlr_inactive_on_rport()
3042 nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) in nvme_fc_create_association() argument
3044 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_fc_create_association()
3050 ++ctrl->ctrl.nr_reconnects; in nvme_fc_create_association()
3052 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) in nvme_fc_create_association()
3055 if (nvme_fc_ctlr_active_on_rport(ctrl)) in nvme_fc_create_association()
3058 dev_info(ctrl->ctrl.device, in nvme_fc_create_association()
3061 ctrl->cnum, ctrl->lport->localport.port_name, in nvme_fc_create_association()
3062 ctrl->rport->remoteport.port_name, ctrl->ctrl.opts->subsysnqn); in nvme_fc_create_association()
3064 clear_bit(ASSOC_FAILED, &ctrl->flags); in nvme_fc_create_association()
3070 ret = __nvme_fc_create_hw_queue(ctrl, &ctrl->queues[0], 0, in nvme_fc_create_association()
3075 ret = nvme_fc_connect_admin_queue(ctrl, &ctrl->queues[0], in nvme_fc_create_association()
3080 ret = nvmf_connect_admin_queue(&ctrl->ctrl); in nvme_fc_create_association()
3084 set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags); in nvme_fc_create_association()
3093 ret = nvme_enable_ctrl(&ctrl->ctrl); in nvme_fc_create_association()
3094 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) in nvme_fc_create_association()
3097 ctrl->ctrl.max_segments = ctrl->lport->ops->max_sgl_segments; in nvme_fc_create_association()
3098 ctrl->ctrl.max_hw_sectors = ctrl->ctrl.max_segments << in nvme_fc_create_association()
3101 nvme_start_admin_queue(&ctrl->ctrl); in nvme_fc_create_association()
3103 ret = nvme_init_identify(&ctrl->ctrl); in nvme_fc_create_association()
3104 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) in nvme_fc_create_association()
3110 if (ctrl->ctrl.icdoff) { in nvme_fc_create_association()
3111 dev_err(ctrl->ctrl.device, "icdoff %d is not supported!\n", in nvme_fc_create_association()
3112 ctrl->ctrl.icdoff); in nvme_fc_create_association()
3118 if (opts->queue_size > ctrl->ctrl.maxcmd) { in nvme_fc_create_association()
3120 dev_warn(ctrl->ctrl.device, in nvme_fc_create_association()
3123 opts->queue_size, ctrl->ctrl.maxcmd); in nvme_fc_create_association()
3124 opts->queue_size = ctrl->ctrl.maxcmd; in nvme_fc_create_association()
3127 if (opts->queue_size > ctrl->ctrl.sqsize + 1) { in nvme_fc_create_association()
3129 dev_warn(ctrl->ctrl.device, in nvme_fc_create_association()
3132 opts->queue_size, ctrl->ctrl.sqsize + 1); in nvme_fc_create_association()
3133 opts->queue_size = ctrl->ctrl.sqsize + 1; in nvme_fc_create_association()
3136 ret = nvme_fc_init_aen_ops(ctrl); in nvme_fc_create_association()
3144 if (ctrl->ctrl.queue_count > 1) { in nvme_fc_create_association()
3145 if (!ctrl->ioq_live) in nvme_fc_create_association()
3146 ret = nvme_fc_create_io_queues(ctrl); in nvme_fc_create_association()
3148 ret = nvme_fc_recreate_io_queues(ctrl); in nvme_fc_create_association()
3150 if (ret || test_bit(ASSOC_FAILED, &ctrl->flags)) in nvme_fc_create_association()
3153 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); in nvme_fc_create_association()
3155 ctrl->ctrl.nr_reconnects = 0; in nvme_fc_create_association()
3158 nvme_start_ctrl(&ctrl->ctrl); in nvme_fc_create_association()
3163 nvme_fc_term_aen_ops(ctrl); in nvme_fc_create_association()
3166 nvme_fc_xmt_disconnect_assoc(ctrl); in nvme_fc_create_association()
3167 spin_lock_irqsave(&ctrl->lock, flags); in nvme_fc_create_association()
3168 ctrl->association_id = 0; in nvme_fc_create_association()
3169 disls = ctrl->rcv_disconn; in nvme_fc_create_association()
3170 ctrl->rcv_disconn = NULL; in nvme_fc_create_association()
3171 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_create_association()
3175 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); in nvme_fc_create_association()
3177 nvme_fc_free_queue(&ctrl->queues[0]); in nvme_fc_create_association()
3178 clear_bit(ASSOC_ACTIVE, &ctrl->flags); in nvme_fc_create_association()
3179 nvme_fc_ctlr_inactive_on_rport(ctrl); in nvme_fc_create_association()
3192 nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) in nvme_fc_delete_association() argument
3197 if (!test_and_clear_bit(ASSOC_ACTIVE, &ctrl->flags)) in nvme_fc_delete_association()
3200 spin_lock_irqsave(&ctrl->lock, flags); in nvme_fc_delete_association()
3201 set_bit(FCCTRL_TERMIO, &ctrl->flags); in nvme_fc_delete_association()
3202 ctrl->iocnt = 0; in nvme_fc_delete_association()
3203 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_delete_association()
3205 __nvme_fc_abort_outstanding_ios(ctrl, false); in nvme_fc_delete_association()
3208 nvme_fc_abort_aen_ops(ctrl); in nvme_fc_delete_association()
3211 spin_lock_irq(&ctrl->lock); in nvme_fc_delete_association()
3212 wait_event_lock_irq(ctrl->ioabort_wait, ctrl->iocnt == 0, ctrl->lock); in nvme_fc_delete_association()
3213 clear_bit(FCCTRL_TERMIO, &ctrl->flags); in nvme_fc_delete_association()
3214 spin_unlock_irq(&ctrl->lock); in nvme_fc_delete_association()
3216 nvme_fc_term_aen_ops(ctrl); in nvme_fc_delete_association()
3224 if (ctrl->association_id) in nvme_fc_delete_association()
3225 nvme_fc_xmt_disconnect_assoc(ctrl); in nvme_fc_delete_association()
3227 spin_lock_irqsave(&ctrl->lock, flags); in nvme_fc_delete_association()
3228 ctrl->association_id = 0; in nvme_fc_delete_association()
3229 disls = ctrl->rcv_disconn; in nvme_fc_delete_association()
3230 ctrl->rcv_disconn = NULL; in nvme_fc_delete_association()
3231 spin_unlock_irqrestore(&ctrl->lock, flags); in nvme_fc_delete_association()
3239 if (ctrl->ctrl.tagset) { in nvme_fc_delete_association()
3240 nvme_fc_delete_hw_io_queues(ctrl); in nvme_fc_delete_association()
3241 nvme_fc_free_io_queues(ctrl); in nvme_fc_delete_association()
3244 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[0], 0); in nvme_fc_delete_association()
3245 nvme_fc_free_queue(&ctrl->queues[0]); in nvme_fc_delete_association()
3248 nvme_start_admin_queue(&ctrl->ctrl); in nvme_fc_delete_association()
3251 nvme_start_queues(&ctrl->ctrl); in nvme_fc_delete_association()
3253 nvme_fc_ctlr_inactive_on_rport(ctrl); in nvme_fc_delete_association()
3259 struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl); in nvme_fc_delete_ctrl() local
3261 cancel_work_sync(&ctrl->ioerr_work); in nvme_fc_delete_ctrl()
3262 cancel_delayed_work_sync(&ctrl->connect_work); in nvme_fc_delete_ctrl()
3267 nvme_fc_delete_association(ctrl); in nvme_fc_delete_ctrl()
3271 nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status) in nvme_fc_reconnect_or_delete() argument
3273 struct nvme_fc_rport *rport = ctrl->rport; in nvme_fc_reconnect_or_delete()
3275 unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ; in nvme_fc_reconnect_or_delete()
3278 if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) in nvme_fc_reconnect_or_delete()
3282 dev_info(ctrl->ctrl.device, in nvme_fc_reconnect_or_delete()
3284 ctrl->cnum, status); in nvme_fc_reconnect_or_delete()
3288 if (recon && nvmf_should_reconnect(&ctrl->ctrl)) { in nvme_fc_reconnect_or_delete()
3290 dev_info(ctrl->ctrl.device, in nvme_fc_reconnect_or_delete()
3293 ctrl->cnum, recon_delay / HZ); in nvme_fc_reconnect_or_delete()
3297 queue_delayed_work(nvme_wq, &ctrl->connect_work, recon_delay); in nvme_fc_reconnect_or_delete()
3300 dev_warn(ctrl->ctrl.device, in nvme_fc_reconnect_or_delete()
3303 ctrl->cnum, ctrl->ctrl.nr_reconnects); in nvme_fc_reconnect_or_delete()
3305 dev_warn(ctrl->ctrl.device, in nvme_fc_reconnect_or_delete()
3308 ctrl->cnum, min_t(int, portptr->dev_loss_tmo, in nvme_fc_reconnect_or_delete()
3309 (ctrl->ctrl.opts->max_reconnects * in nvme_fc_reconnect_or_delete()
3310 ctrl->ctrl.opts->reconnect_delay))); in nvme_fc_reconnect_or_delete()
3311 WARN_ON(nvme_delete_ctrl(&ctrl->ctrl)); in nvme_fc_reconnect_or_delete()
3318 struct nvme_fc_ctrl *ctrl = in nvme_fc_reset_ctrl_work() local
3319 container_of(work, struct nvme_fc_ctrl, ctrl.reset_work); in nvme_fc_reset_ctrl_work()
3321 nvme_stop_ctrl(&ctrl->ctrl); in nvme_fc_reset_ctrl_work()
3324 nvme_fc_delete_association(ctrl); in nvme_fc_reset_ctrl_work()
3326 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) in nvme_fc_reset_ctrl_work()
3327 dev_err(ctrl->ctrl.device, in nvme_fc_reset_ctrl_work()
3329 "to CONNECTING\n", ctrl->cnum); in nvme_fc_reset_ctrl_work()
3331 if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE) { in nvme_fc_reset_ctrl_work()
3332 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { in nvme_fc_reset_ctrl_work()
3333 dev_err(ctrl->ctrl.device, in nvme_fc_reset_ctrl_work()
3335 "after reset\n", ctrl->cnum); in nvme_fc_reset_ctrl_work()
3337 flush_delayed_work(&ctrl->connect_work); in nvme_fc_reset_ctrl_work()
3340 nvme_fc_reconnect_or_delete(ctrl, -ENOTCONN); in nvme_fc_reset_ctrl_work()
3363 struct nvme_fc_ctrl *ctrl = in nvme_fc_connect_ctrl_work() local
3367 ret = nvme_fc_create_association(ctrl); in nvme_fc_connect_ctrl_work()
3369 nvme_fc_reconnect_or_delete(ctrl, ret); in nvme_fc_connect_ctrl_work()
3371 dev_info(ctrl->ctrl.device, in nvme_fc_connect_ctrl_work()
3373 ctrl->cnum); in nvme_fc_connect_ctrl_work()
3399 struct nvme_fc_ctrl *ctrl; in nvme_fc_existing_controller() local
3404 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { in nvme_fc_existing_controller()
3405 found = nvmf_ctlr_matches_baseopts(&ctrl->ctrl, opts); in nvme_fc_existing_controller()
3418 struct nvme_fc_ctrl *ctrl; in nvme_fc_init_ctrl() local
3434 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); in nvme_fc_init_ctrl()
3435 if (!ctrl) { in nvme_fc_init_ctrl()
3459 ctrl->ctrl.opts = opts; in nvme_fc_init_ctrl()
3460 ctrl->ctrl.nr_reconnects = 0; in nvme_fc_init_ctrl()
3462 ctrl->ctrl.numa_node = dev_to_node(lport->dev); in nvme_fc_init_ctrl()
3464 ctrl->ctrl.numa_node = NUMA_NO_NODE; in nvme_fc_init_ctrl()
3465 INIT_LIST_HEAD(&ctrl->ctrl_list); in nvme_fc_init_ctrl()
3466 ctrl->lport = lport; in nvme_fc_init_ctrl()
3467 ctrl->rport = rport; in nvme_fc_init_ctrl()
3468 ctrl->dev = lport->dev; in nvme_fc_init_ctrl()
3469 ctrl->cnum = idx; in nvme_fc_init_ctrl()
3470 ctrl->ioq_live = false; in nvme_fc_init_ctrl()
3471 init_waitqueue_head(&ctrl->ioabort_wait); in nvme_fc_init_ctrl()
3473 get_device(ctrl->dev); in nvme_fc_init_ctrl()
3474 kref_init(&ctrl->ref); in nvme_fc_init_ctrl()
3476 INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work); in nvme_fc_init_ctrl()
3477 INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work); in nvme_fc_init_ctrl()
3478 INIT_WORK(&ctrl->ioerr_work, nvme_fc_ctrl_ioerr_work); in nvme_fc_init_ctrl()
3479 spin_lock_init(&ctrl->lock); in nvme_fc_init_ctrl()
3482 ctrl->ctrl.queue_count = min_t(unsigned int, in nvme_fc_init_ctrl()
3485 ctrl->ctrl.queue_count++; /* +1 for admin queue */ in nvme_fc_init_ctrl()
3487 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_fc_init_ctrl()
3488 ctrl->ctrl.kato = opts->kato; in nvme_fc_init_ctrl()
3489 ctrl->ctrl.cntlid = 0xffff; in nvme_fc_init_ctrl()
3492 ctrl->queues = kcalloc(ctrl->ctrl.queue_count, in nvme_fc_init_ctrl()
3494 if (!ctrl->queues) in nvme_fc_init_ctrl()
3497 nvme_fc_init_queue(ctrl, 0); in nvme_fc_init_ctrl()
3499 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); in nvme_fc_init_ctrl()
3500 ctrl->admin_tag_set.ops = &nvme_fc_admin_mq_ops; in nvme_fc_init_ctrl()
3501 ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH; in nvme_fc_init_ctrl()
3502 ctrl->admin_tag_set.reserved_tags = 2; /* fabric connect + Keep-Alive */ in nvme_fc_init_ctrl()
3503 ctrl->admin_tag_set.numa_node = ctrl->ctrl.numa_node; in nvme_fc_init_ctrl()
3504 ctrl->admin_tag_set.cmd_size = in nvme_fc_init_ctrl()
3506 ctrl->lport->ops->fcprqst_priv_sz); in nvme_fc_init_ctrl()
3507 ctrl->admin_tag_set.driver_data = ctrl; in nvme_fc_init_ctrl()
3508 ctrl->admin_tag_set.nr_hw_queues = 1; in nvme_fc_init_ctrl()
3509 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; in nvme_fc_init_ctrl()
3510 ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED; in nvme_fc_init_ctrl()
3512 ret = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); in nvme_fc_init_ctrl()
3515 ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set; in nvme_fc_init_ctrl()
3517 ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set); in nvme_fc_init_ctrl()
3518 if (IS_ERR(ctrl->ctrl.fabrics_q)) { in nvme_fc_init_ctrl()
3519 ret = PTR_ERR(ctrl->ctrl.fabrics_q); in nvme_fc_init_ctrl()
3523 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); in nvme_fc_init_ctrl()
3524 if (IS_ERR(ctrl->ctrl.admin_q)) { in nvme_fc_init_ctrl()
3525 ret = PTR_ERR(ctrl->ctrl.admin_q); in nvme_fc_init_ctrl()
3536 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_fc_ctrl_ops, 0); in nvme_fc_init_ctrl()
3543 list_add_tail(&ctrl->ctrl_list, &rport->ctrl_list); in nvme_fc_init_ctrl()
3546 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING) || in nvme_fc_init_ctrl()
3547 !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) { in nvme_fc_init_ctrl()
3548 dev_err(ctrl->ctrl.device, in nvme_fc_init_ctrl()
3549 "NVME-FC{%d}: failed to init ctrl state\n", ctrl->cnum); in nvme_fc_init_ctrl()
3553 if (!queue_delayed_work(nvme_wq, &ctrl->connect_work, 0)) { in nvme_fc_init_ctrl()
3554 dev_err(ctrl->ctrl.device, in nvme_fc_init_ctrl()
3556 ctrl->cnum); in nvme_fc_init_ctrl()
3560 flush_delayed_work(&ctrl->connect_work); in nvme_fc_init_ctrl()
3562 dev_info(ctrl->ctrl.device, in nvme_fc_init_ctrl()
3564 ctrl->cnum, ctrl->ctrl.opts->subsysnqn); in nvme_fc_init_ctrl()
3566 return &ctrl->ctrl; in nvme_fc_init_ctrl()
3569 nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING); in nvme_fc_init_ctrl()
3570 cancel_work_sync(&ctrl->ioerr_work); in nvme_fc_init_ctrl()
3571 cancel_work_sync(&ctrl->ctrl.reset_work); in nvme_fc_init_ctrl()
3572 cancel_delayed_work_sync(&ctrl->connect_work); in nvme_fc_init_ctrl()
3574 ctrl->ctrl.opts = NULL; in nvme_fc_init_ctrl()
3577 nvme_uninit_ctrl(&ctrl->ctrl); in nvme_fc_init_ctrl()
3580 nvme_put_ctrl(&ctrl->ctrl); in nvme_fc_init_ctrl()
3594 blk_cleanup_queue(ctrl->ctrl.admin_q); in nvme_fc_init_ctrl()
3596 blk_cleanup_queue(ctrl->ctrl.fabrics_q); in nvme_fc_init_ctrl()
3598 blk_mq_free_tag_set(&ctrl->admin_tag_set); in nvme_fc_init_ctrl()
3600 kfree(ctrl->queues); in nvme_fc_init_ctrl()
3602 put_device(ctrl->dev); in nvme_fc_init_ctrl()
3603 ida_simple_remove(&nvme_fc_ctrl_cnt, ctrl->cnum); in nvme_fc_init_ctrl()
3605 kfree(ctrl); in nvme_fc_init_ctrl()
3682 struct nvme_ctrl *ctrl; in nvme_fc_create_ctrl() local
3716 ctrl = nvme_fc_init_ctrl(dev, opts, lport, rport); in nvme_fc_create_ctrl()
3717 if (IS_ERR(ctrl)) in nvme_fc_create_ctrl()
3719 return ctrl; in nvme_fc_create_ctrl()
3880 struct nvme_fc_ctrl *ctrl; in nvme_fc_delete_controllers() local
3883 list_for_each_entry(ctrl, &rport->ctrl_list, ctrl_list) { in nvme_fc_delete_controllers()
3884 dev_warn(ctrl->ctrl.device, in nvme_fc_delete_controllers()
3886 ctrl->cnum); in nvme_fc_delete_controllers()
3887 nvme_delete_ctrl(&ctrl->ctrl); in nvme_fc_delete_controllers()