Lines Matching +full:0 +full:ns
35 return 0; in nvme_set_iopolicy()
85 struct nvme_ns *ns = req->q->queuedata; in nvme_failover_req() local
86 u16 status = nvme_req(req)->status & 0x7ff; in nvme_failover_req()
90 nvme_mpath_clear_current_path(ns); in nvme_failover_req()
97 if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) { in nvme_failover_req()
98 set_bit(NVME_NS_ANA_PENDING, &ns->flags); in nvme_failover_req()
99 queue_work(nvme_wq, &ns->ctrl->ana_work); in nvme_failover_req()
102 spin_lock_irqsave(&ns->head->requeue_lock, flags); in nvme_failover_req()
104 bio_set_dev(bio, ns->head->disk->part0); in nvme_failover_req()
118 blk_steal_bios(&ns->head->requeue_list, req); in nvme_failover_req()
119 spin_unlock_irqrestore(&ns->head->requeue_lock, flags); in nvme_failover_req()
121 blk_mq_end_request(req, 0); in nvme_failover_req()
122 kblockd_schedule_work(&ns->head->requeue_work); in nvme_failover_req()
127 struct nvme_ns *ns = rq->q->queuedata; in nvme_mpath_start_request() local
128 struct gendisk *disk = ns->head->disk; in nvme_mpath_start_request()
141 struct nvme_ns *ns = rq->q->queuedata; in nvme_mpath_end_request() local
145 bdev_end_io_acct(ns->head->disk->part0, req_op(rq), in nvme_mpath_end_request()
152 struct nvme_ns *ns; in nvme_kick_requeue_lists() local
155 list_for_each_entry(ns, &ctrl->namespaces, list) { in nvme_kick_requeue_lists()
156 if (!ns->head->disk) in nvme_kick_requeue_lists()
158 kblockd_schedule_work(&ns->head->requeue_work); in nvme_kick_requeue_lists()
160 disk_uevent(ns->head->disk, KOBJ_CHANGE); in nvme_kick_requeue_lists()
166 [0] = "invalid state",
174 bool nvme_mpath_clear_current_path(struct nvme_ns *ns) in nvme_mpath_clear_current_path() argument
176 struct nvme_ns_head *head = ns->head; in nvme_mpath_clear_current_path()
184 if (ns == rcu_access_pointer(head->current_path[node])) { in nvme_mpath_clear_current_path()
195 struct nvme_ns *ns; in nvme_mpath_clear_ctrl_paths() local
198 list_for_each_entry(ns, &ctrl->namespaces, list) { in nvme_mpath_clear_ctrl_paths()
199 nvme_mpath_clear_current_path(ns); in nvme_mpath_clear_ctrl_paths()
200 kblockd_schedule_work(&ns->head->requeue_work); in nvme_mpath_clear_ctrl_paths()
205 void nvme_mpath_revalidate_paths(struct nvme_ns *ns) in nvme_mpath_revalidate_paths() argument
207 struct nvme_ns_head *head = ns->head; in nvme_mpath_revalidate_paths()
213 list_for_each_entry_rcu(ns, &head->list, siblings) { in nvme_mpath_revalidate_paths()
214 if (capacity != get_capacity(ns->disk)) in nvme_mpath_revalidate_paths()
215 clear_bit(NVME_NS_READY, &ns->flags); in nvme_mpath_revalidate_paths()
224 static bool nvme_path_is_disabled(struct nvme_ns *ns) in nvme_path_is_disabled() argument
231 if (ns->ctrl->state != NVME_CTRL_LIVE && in nvme_path_is_disabled()
232 ns->ctrl->state != NVME_CTRL_DELETING) in nvme_path_is_disabled()
234 if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) || in nvme_path_is_disabled()
235 !test_bit(NVME_NS_READY, &ns->flags)) in nvme_path_is_disabled()
243 struct nvme_ns *found = NULL, *fallback = NULL, *ns; in __nvme_find_path() local
245 list_for_each_entry_rcu(ns, &head->list, siblings) { in __nvme_find_path()
246 if (nvme_path_is_disabled(ns)) in __nvme_find_path()
250 distance = node_distance(node, ns->ctrl->numa_node); in __nvme_find_path()
254 switch (ns->ana_state) { in __nvme_find_path()
258 found = ns; in __nvme_find_path()
264 fallback = ns; in __nvme_find_path()
280 struct nvme_ns *ns) in nvme_next_ns() argument
282 ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns, in nvme_next_ns()
284 if (ns) in nvme_next_ns()
285 return ns; in nvme_next_ns()
292 struct nvme_ns *ns, *found = NULL; in nvme_round_robin_path() local
300 for (ns = nvme_next_ns(head, old); in nvme_round_robin_path()
301 ns && ns != old; in nvme_round_robin_path()
302 ns = nvme_next_ns(head, ns)) { in nvme_round_robin_path()
303 if (nvme_path_is_disabled(ns)) in nvme_round_robin_path()
306 if (ns->ana_state == NVME_ANA_OPTIMIZED) { in nvme_round_robin_path()
307 found = ns; in nvme_round_robin_path()
310 if (ns->ana_state == NVME_ANA_NONOPTIMIZED) in nvme_round_robin_path()
311 found = ns; in nvme_round_robin_path()
332 static inline bool nvme_path_is_optimized(struct nvme_ns *ns) in nvme_path_is_optimized() argument
334 return ns->ctrl->state == NVME_CTRL_LIVE && in nvme_path_is_optimized()
335 ns->ana_state == NVME_ANA_OPTIMIZED; in nvme_path_is_optimized()
341 struct nvme_ns *ns; in nvme_find_path() local
343 ns = srcu_dereference(head->current_path[node], &head->srcu); in nvme_find_path()
344 if (unlikely(!ns)) in nvme_find_path()
348 return nvme_round_robin_path(head, node, ns); in nvme_find_path()
349 if (unlikely(!nvme_path_is_optimized(ns))) in nvme_find_path()
351 return ns; in nvme_find_path()
356 struct nvme_ns *ns; in nvme_available_path() local
358 list_for_each_entry_rcu(ns, &head->list, siblings) { in nvme_available_path()
359 if (test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ns->ctrl->flags)) in nvme_available_path()
361 switch (ns->ctrl->state) { in nvme_available_path()
378 struct nvme_ns *ns; in nvme_ns_head_submit_bio() local
391 ns = nvme_find_path(head); in nvme_ns_head_submit_bio()
392 if (likely(ns)) { in nvme_ns_head_submit_bio()
393 bio_set_dev(bio, ns->disk->part0); in nvme_ns_head_submit_bio()
395 trace_block_bio_remap(bio, disk_devt(ns->head->disk), in nvme_ns_head_submit_bio()
417 return 0; in nvme_ns_head_open()
430 struct nvme_ns *ns; in nvme_ns_head_report_zones() local
434 ns = nvme_find_path(head); in nvme_ns_head_report_zones()
435 if (ns) in nvme_ns_head_report_zones()
436 ret = nvme_ns_report_zones(ns, sector, nr_zones, cb, data); in nvme_ns_head_report_zones()
465 return 0; in nvme_ns_head_chr_open()
471 return 0; in nvme_ns_head_chr_release()
532 return 0; in nvme_mpath_alloc_disk()
564 return 0; in nvme_mpath_alloc_disk()
567 static void nvme_mpath_set_live(struct nvme_ns *ns) in nvme_mpath_set_live() argument
569 struct nvme_ns_head *head = ns->head; in nvme_mpath_set_live()
584 clear_bit(NVME_NSHEAD_DISK_LIVE, &ns->flags); in nvme_mpath_set_live()
591 if (nvme_path_is_optimized(ns)) { in nvme_mpath_set_live()
615 for (i = 0; i < le16_to_cpu(ctrl->ana_log_buf->ngrps); i++) { in nvme_parse_ana_log()
626 if (WARN_ON_ONCE(desc->grpid == 0)) in nvme_parse_ana_log()
630 if (WARN_ON_ONCE(desc->state == 0)) in nvme_parse_ana_log()
646 return 0; in nvme_parse_ana_log()
655 struct nvme_ns *ns) in nvme_update_ns_ana_state() argument
657 ns->ana_grpid = le32_to_cpu(desc->grpid); in nvme_update_ns_ana_state()
658 ns->ana_state = desc->state; in nvme_update_ns_ana_state()
659 clear_bit(NVME_NS_ANA_PENDING, &ns->flags); in nvme_update_ns_ana_state()
669 if (nvme_state_is_live(ns->ana_state) && in nvme_update_ns_ana_state()
670 ns->ctrl->state == NVME_CTRL_LIVE) in nvme_update_ns_ana_state()
671 nvme_mpath_set_live(ns); in nvme_update_ns_ana_state()
677 u32 nr_nsids = le32_to_cpu(desc->nnsids), n = 0; in nvme_update_ana_state()
679 struct nvme_ns *ns; in nvme_update_ana_state() local
689 return 0; in nvme_update_ana_state()
692 list_for_each_entry(ns, &ctrl->namespaces, list) { in nvme_update_ana_state()
696 if (ns->head->ns_id < nsid) in nvme_update_ana_state()
698 if (ns->head->ns_id == nsid) in nvme_update_ana_state()
699 nvme_update_ns_ana_state(desc, ns); in nvme_update_ana_state()
702 if (ns->head->ns_id > nsid) in nvme_update_ana_state()
706 return 0; in nvme_update_ana_state()
711 u32 nr_change_groups = 0; in nvme_read_ana_log()
715 error = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_ANA, 0, NVME_CSI_NVM, in nvme_read_ana_log()
716 ctrl->ana_log_buf, ctrl->ana_log_size, 0); in nvme_read_ana_log()
759 u32 nr_change_groups = 0; in nvme_mpath_update()
806 for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) { in nvme_subsys_iopolicy_store()
828 struct nvme_ns *ns = nvme_get_ns_from_dev(dev); in ana_state_show() local
830 return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]); in ana_state_show()
840 return 0; in nvme_lookup_ana_group_desc()
846 void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid) in nvme_mpath_add_disk() argument
848 if (nvme_ctrl_use_ana(ns->ctrl)) { in nvme_mpath_add_disk()
851 .state = 0, in nvme_mpath_add_disk()
854 mutex_lock(&ns->ctrl->ana_lock); in nvme_mpath_add_disk()
855 ns->ana_grpid = le32_to_cpu(anagrpid); in nvme_mpath_add_disk()
856 nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc); in nvme_mpath_add_disk()
857 mutex_unlock(&ns->ctrl->ana_lock); in nvme_mpath_add_disk()
860 nvme_update_ns_ana_state(&desc, ns); in nvme_mpath_add_disk()
863 set_bit(NVME_NS_ANA_PENDING, &ns->flags); in nvme_mpath_add_disk()
864 queue_work(nvme_wq, &ns->ctrl->ana_work); in nvme_mpath_add_disk()
867 ns->ana_state = NVME_ANA_OPTIMIZED; in nvme_mpath_add_disk()
868 nvme_mpath_set_live(ns); in nvme_mpath_add_disk()
871 if (blk_queue_stable_writes(ns->queue) && ns->head->disk) in nvme_mpath_add_disk()
873 ns->head->disk->queue); in nvme_mpath_add_disk()
875 if (blk_queue_is_zoned(ns->queue) && ns->head->disk) in nvme_mpath_add_disk()
876 ns->head->disk->nr_zones = ns->disk->nr_zones; in nvme_mpath_add_disk()
904 timer_setup(&ctrl->anatt_timer, nvme_anatt_timeout, 0); in nvme_mpath_init_ctrl()
912 int error = 0; in nvme_mpath_init_identify()
917 return 0; in nvme_mpath_init_identify()
952 return 0; in nvme_mpath_init_identify()
963 ctrl->ana_log_size = 0; in nvme_mpath_uninit()