• Home
  • Raw
  • Download

Lines Matching refs:ctrl

62 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)  in nvmet_async_events_free()  argument
67 mutex_lock(&ctrl->lock); in nvmet_async_events_free()
68 if (!ctrl->nr_async_event_cmds) { in nvmet_async_events_free()
69 mutex_unlock(&ctrl->lock); in nvmet_async_events_free()
73 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; in nvmet_async_events_free()
74 mutex_unlock(&ctrl->lock); in nvmet_async_events_free()
81 struct nvmet_ctrl *ctrl = in nvmet_async_event_work() local
87 mutex_lock(&ctrl->lock); in nvmet_async_event_work()
88 aen = list_first_entry_or_null(&ctrl->async_events, in nvmet_async_event_work()
90 if (!aen || !ctrl->nr_async_event_cmds) { in nvmet_async_event_work()
91 mutex_unlock(&ctrl->lock); in nvmet_async_event_work()
95 req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds]; in nvmet_async_event_work()
101 mutex_unlock(&ctrl->lock); in nvmet_async_event_work()
106 static void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type, in nvmet_add_async_event() argument
119 mutex_lock(&ctrl->lock); in nvmet_add_async_event()
120 list_add_tail(&aen->entry, &ctrl->async_events); in nvmet_add_async_event()
121 mutex_unlock(&ctrl->lock); in nvmet_add_async_event()
123 schedule_work(&ctrl->async_event_work); in nvmet_add_async_event()
197 struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work), in nvmet_keep_alive_timer() local
201 ctrl->cntlid, ctrl->kato); in nvmet_keep_alive_timer()
203 ctrl->ops->delete_ctrl(ctrl); in nvmet_keep_alive_timer()
206 static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) in nvmet_start_keep_alive_timer() argument
209 ctrl->cntlid, ctrl->kato); in nvmet_start_keep_alive_timer()
211 INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer); in nvmet_start_keep_alive_timer()
212 schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); in nvmet_start_keep_alive_timer()
215 static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) in nvmet_stop_keep_alive_timer() argument
217 pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid); in nvmet_stop_keep_alive_timer()
219 cancel_delayed_work_sync(&ctrl->ka_work); in nvmet_stop_keep_alive_timer()
222 static struct nvmet_ns *__nvmet_find_namespace(struct nvmet_ctrl *ctrl, in __nvmet_find_namespace() argument
227 list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) { in __nvmet_find_namespace()
235 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid) in nvmet_find_namespace() argument
240 ns = __nvmet_find_namespace(ctrl, nsid); in nvmet_find_namespace()
263 struct nvmet_ctrl *ctrl; in nvmet_ns_enable() local
309 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) in nvmet_ns_enable()
310 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0); in nvmet_ns_enable()
326 struct nvmet_ctrl *ctrl; in nvmet_ns_disable() local
350 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) in nvmet_ns_disable()
351 nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE, 0, 0); in nvmet_ns_disable()
407 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq, in nvmet_cq_setup() argument
413 ctrl->cqs[qid] = cq; in nvmet_cq_setup()
416 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq, in nvmet_sq_setup() argument
422 ctrl->sqs[qid] = sq; in nvmet_sq_setup()
438 if (sq->ctrl && sq->ctrl->sqs && sq->ctrl->sqs[0] == sq) in nvmet_sq_destroy()
439 nvmet_async_events_free(sq->ctrl); in nvmet_sq_destroy()
445 if (sq->ctrl) { in nvmet_sq_destroy()
446 nvmet_ctrl_put(sq->ctrl); in nvmet_sq_destroy()
447 sq->ctrl = NULL; /* allows reusing the queue later */ in nvmet_sq_destroy()
501 if (unlikely(!req->sq->ctrl)) in nvmet_req_init()
508 else if (req->sq->ctrl->subsys->type == NVME_NQN_DISC) in nvmet_req_init()
564 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) in nvmet_start_ctrl() argument
566 lockdep_assert_held(&ctrl->lock); in nvmet_start_ctrl()
568 if (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES || in nvmet_start_ctrl()
569 nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES || in nvmet_start_ctrl()
570 nvmet_cc_mps(ctrl->cc) != 0 || in nvmet_start_ctrl()
571 nvmet_cc_ams(ctrl->cc) != 0 || in nvmet_start_ctrl()
572 nvmet_cc_css(ctrl->cc) != 0) { in nvmet_start_ctrl()
573 ctrl->csts = NVME_CSTS_CFS; in nvmet_start_ctrl()
577 ctrl->csts = NVME_CSTS_RDY; in nvmet_start_ctrl()
580 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) in nvmet_clear_ctrl() argument
582 lockdep_assert_held(&ctrl->lock); in nvmet_clear_ctrl()
585 ctrl->csts &= ~NVME_CSTS_RDY; in nvmet_clear_ctrl()
586 ctrl->cc = 0; in nvmet_clear_ctrl()
589 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new) in nvmet_update_cc() argument
593 mutex_lock(&ctrl->lock); in nvmet_update_cc()
594 old = ctrl->cc; in nvmet_update_cc()
595 ctrl->cc = new; in nvmet_update_cc()
598 nvmet_start_ctrl(ctrl); in nvmet_update_cc()
600 nvmet_clear_ctrl(ctrl); in nvmet_update_cc()
602 nvmet_clear_ctrl(ctrl); in nvmet_update_cc()
603 ctrl->csts |= NVME_CSTS_SHST_CMPLT; in nvmet_update_cc()
606 ctrl->csts &= ~NVME_CSTS_SHST_CMPLT; in nvmet_update_cc()
607 mutex_unlock(&ctrl->lock); in nvmet_update_cc()
610 static void nvmet_init_cap(struct nvmet_ctrl *ctrl) in nvmet_init_cap() argument
613 ctrl->cap = (1ULL << 37); in nvmet_init_cap()
615 ctrl->cap |= (15ULL << 24); in nvmet_init_cap()
617 ctrl->cap |= NVMET_QUEUE_SIZE - 1; in nvmet_init_cap()
624 struct nvmet_ctrl *ctrl; in nvmet_ctrl_find_get() local
636 list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) { in nvmet_ctrl_find_get()
637 if (ctrl->cntlid == cntlid) { in nvmet_ctrl_find_get()
638 if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) { in nvmet_ctrl_find_get()
642 if (!kref_get_unless_zero(&ctrl->ref)) in nvmet_ctrl_find_get()
645 *ret = ctrl; in nvmet_ctrl_find_get()
705 struct nvmet_ctrl *ctrl; in nvmet_alloc_ctrl() local
730 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); in nvmet_alloc_ctrl()
731 if (!ctrl) in nvmet_alloc_ctrl()
733 mutex_init(&ctrl->lock); in nvmet_alloc_ctrl()
735 nvmet_init_cap(ctrl); in nvmet_alloc_ctrl()
737 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work); in nvmet_alloc_ctrl()
738 INIT_LIST_HEAD(&ctrl->async_events); in nvmet_alloc_ctrl()
740 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); in nvmet_alloc_ctrl()
741 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); in nvmet_alloc_ctrl()
744 get_random_bytes(&ctrl->serial, sizeof(ctrl->serial)); in nvmet_alloc_ctrl()
746 kref_init(&ctrl->ref); in nvmet_alloc_ctrl()
747 ctrl->subsys = subsys; in nvmet_alloc_ctrl()
749 ctrl->cqs = kcalloc(subsys->max_qid + 1, in nvmet_alloc_ctrl()
752 if (!ctrl->cqs) in nvmet_alloc_ctrl()
755 ctrl->sqs = kcalloc(subsys->max_qid + 1, in nvmet_alloc_ctrl()
758 if (!ctrl->sqs) in nvmet_alloc_ctrl()
768 ctrl->cntlid = ret; in nvmet_alloc_ctrl()
770 ctrl->ops = req->ops; in nvmet_alloc_ctrl()
771 if (ctrl->subsys->type == NVME_NQN_DISC) { in nvmet_alloc_ctrl()
791 ctrl->kato = NVMET_DISC_KATO; in nvmet_alloc_ctrl()
794 ctrl->kato = DIV_ROUND_UP(kato, 1000); in nvmet_alloc_ctrl()
796 nvmet_start_keep_alive_timer(ctrl); in nvmet_alloc_ctrl()
799 list_add_tail(&ctrl->subsys_entry, &subsys->ctrls); in nvmet_alloc_ctrl()
802 *ctrlp = ctrl; in nvmet_alloc_ctrl()
806 kfree(ctrl->sqs); in nvmet_alloc_ctrl()
808 kfree(ctrl->cqs); in nvmet_alloc_ctrl()
810 kfree(ctrl); in nvmet_alloc_ctrl()
819 struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref); in nvmet_ctrl_free() local
820 struct nvmet_subsys *subsys = ctrl->subsys; in nvmet_ctrl_free()
822 nvmet_stop_keep_alive_timer(ctrl); in nvmet_ctrl_free()
825 list_del(&ctrl->subsys_entry); in nvmet_ctrl_free()
828 flush_work(&ctrl->async_event_work); in nvmet_ctrl_free()
829 cancel_work_sync(&ctrl->fatal_err_work); in nvmet_ctrl_free()
831 ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid); in nvmet_ctrl_free()
834 kfree(ctrl->sqs); in nvmet_ctrl_free()
835 kfree(ctrl->cqs); in nvmet_ctrl_free()
836 kfree(ctrl); in nvmet_ctrl_free()
839 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl) in nvmet_ctrl_put() argument
841 kref_put(&ctrl->ref, nvmet_ctrl_free); in nvmet_ctrl_put()
846 struct nvmet_ctrl *ctrl = in nvmet_fatal_error_handler() local
849 pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid); in nvmet_fatal_error_handler()
850 ctrl->ops->delete_ctrl(ctrl); in nvmet_fatal_error_handler()
853 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl) in nvmet_ctrl_fatal_error() argument
855 mutex_lock(&ctrl->lock); in nvmet_ctrl_fatal_error()
856 if (!(ctrl->csts & NVME_CSTS_CFS)) { in nvmet_ctrl_fatal_error()
857 ctrl->csts |= NVME_CSTS_CFS; in nvmet_ctrl_fatal_error()
858 INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler); in nvmet_ctrl_fatal_error()
859 schedule_work(&ctrl->fatal_err_work); in nvmet_ctrl_fatal_error()
861 mutex_unlock(&ctrl->lock); in nvmet_ctrl_fatal_error()