Lines Matching full:ns
53 void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, in nvme_set_disk_name() argument
57 sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); in nvme_set_disk_name()
58 } else if (ns->head->disk) { in nvme_set_disk_name()
60 ctrl->instance, ns->head->instance); in nvme_set_disk_name()
64 ns->head->instance); in nvme_set_disk_name()
70 struct nvme_ns *ns = req->q->queuedata; in nvme_failover_req() local
74 nvme_mpath_clear_current_path(ns); in nvme_failover_req()
81 if (nvme_is_ana_error(status) && ns->ctrl->ana_log_buf) { in nvme_failover_req()
82 set_bit(NVME_NS_ANA_PENDING, &ns->flags); in nvme_failover_req()
83 queue_work(nvme_wq, &ns->ctrl->ana_work); in nvme_failover_req()
86 spin_lock_irqsave(&ns->head->requeue_lock, flags); in nvme_failover_req()
87 blk_steal_bios(&ns->head->requeue_list, req); in nvme_failover_req()
88 spin_unlock_irqrestore(&ns->head->requeue_lock, flags); in nvme_failover_req()
91 kblockd_schedule_work(&ns->head->requeue_work); in nvme_failover_req()
96 struct nvme_ns *ns; in nvme_kick_requeue_lists() local
99 list_for_each_entry(ns, &ctrl->namespaces, list) { in nvme_kick_requeue_lists()
100 if (ns->head->disk) in nvme_kick_requeue_lists()
101 kblockd_schedule_work(&ns->head->requeue_work); in nvme_kick_requeue_lists()
115 bool nvme_mpath_clear_current_path(struct nvme_ns *ns) in nvme_mpath_clear_current_path() argument
117 struct nvme_ns_head *head = ns->head; in nvme_mpath_clear_current_path()
125 if (ns == rcu_access_pointer(head->current_path[node])) { in nvme_mpath_clear_current_path()
136 struct nvme_ns *ns; in nvme_mpath_clear_ctrl_paths() local
139 list_for_each_entry(ns, &ctrl->namespaces, list) { in nvme_mpath_clear_ctrl_paths()
140 nvme_mpath_clear_current_path(ns); in nvme_mpath_clear_ctrl_paths()
141 kblockd_schedule_work(&ns->head->requeue_work); in nvme_mpath_clear_ctrl_paths()
146 static bool nvme_path_is_disabled(struct nvme_ns *ns) in nvme_path_is_disabled() argument
153 if (ns->ctrl->state != NVME_CTRL_LIVE && in nvme_path_is_disabled()
154 ns->ctrl->state != NVME_CTRL_DELETING) in nvme_path_is_disabled()
156 if (test_bit(NVME_NS_ANA_PENDING, &ns->flags) || in nvme_path_is_disabled()
157 test_bit(NVME_NS_REMOVING, &ns->flags)) in nvme_path_is_disabled()
165 struct nvme_ns *found = NULL, *fallback = NULL, *ns; in __nvme_find_path() local
167 list_for_each_entry_rcu(ns, &head->list, siblings) { in __nvme_find_path()
168 if (nvme_path_is_disabled(ns)) in __nvme_find_path()
172 distance = node_distance(node, ns->ctrl->numa_node); in __nvme_find_path()
176 switch (ns->ana_state) { in __nvme_find_path()
180 found = ns; in __nvme_find_path()
186 fallback = ns; in __nvme_find_path()
202 struct nvme_ns *ns) in nvme_next_ns() argument
204 ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns, in nvme_next_ns()
206 if (ns) in nvme_next_ns()
207 return ns; in nvme_next_ns()
214 struct nvme_ns *ns, *found = NULL; in nvme_round_robin_path() local
222 for (ns = nvme_next_ns(head, old); in nvme_round_robin_path()
223 ns && ns != old; in nvme_round_robin_path()
224 ns = nvme_next_ns(head, ns)) { in nvme_round_robin_path()
225 if (nvme_path_is_disabled(ns)) in nvme_round_robin_path()
228 if (ns->ana_state == NVME_ANA_OPTIMIZED) { in nvme_round_robin_path()
229 found = ns; in nvme_round_robin_path()
232 if (ns->ana_state == NVME_ANA_NONOPTIMIZED) in nvme_round_robin_path()
233 found = ns; in nvme_round_robin_path()
254 static inline bool nvme_path_is_optimized(struct nvme_ns *ns) in nvme_path_is_optimized() argument
256 return ns->ctrl->state == NVME_CTRL_LIVE && in nvme_path_is_optimized()
257 ns->ana_state == NVME_ANA_OPTIMIZED; in nvme_path_is_optimized()
263 struct nvme_ns *ns; in nvme_find_path() local
265 ns = srcu_dereference(head->current_path[node], &head->srcu); in nvme_find_path()
266 if (unlikely(!ns)) in nvme_find_path()
270 return nvme_round_robin_path(head, node, ns); in nvme_find_path()
271 if (unlikely(!nvme_path_is_optimized(ns))) in nvme_find_path()
273 return ns; in nvme_find_path()
278 struct nvme_ns *ns; in nvme_available_path() local
280 list_for_each_entry_rcu(ns, &head->list, siblings) { in nvme_available_path()
281 switch (ns->ctrl->state) { in nvme_available_path()
298 struct nvme_ns *ns; in nvme_ns_head_submit_bio() local
310 ns = nvme_find_path(head); in nvme_ns_head_submit_bio()
311 if (likely(ns)) { in nvme_ns_head_submit_bio()
312 bio->bi_disk = ns->disk; in nvme_ns_head_submit_bio()
315 disk_devt(ns->head->disk), in nvme_ns_head_submit_bio()
406 static void nvme_mpath_set_live(struct nvme_ns *ns) in nvme_mpath_set_live() argument
408 struct nvme_ns_head *head = ns->head; in nvme_mpath_set_live()
418 if (nvme_path_is_optimized(ns)) { in nvme_mpath_set_live()
482 struct nvme_ns *ns) in nvme_update_ns_ana_state() argument
484 ns->ana_grpid = le32_to_cpu(desc->grpid); in nvme_update_ns_ana_state()
485 ns->ana_state = desc->state; in nvme_update_ns_ana_state()
486 clear_bit(NVME_NS_ANA_PENDING, &ns->flags); in nvme_update_ns_ana_state()
496 if (nvme_state_is_live(ns->ana_state) && in nvme_update_ns_ana_state()
497 ns->ctrl->state == NVME_CTRL_LIVE) in nvme_update_ns_ana_state()
498 nvme_mpath_set_live(ns); in nvme_update_ns_ana_state()
506 struct nvme_ns *ns; in nvme_update_ana_state() local
519 list_for_each_entry(ns, &ctrl->namespaces, list) { in nvme_update_ana_state()
523 if (ns->head->ns_id < nsid) in nvme_update_ana_state()
525 if (ns->head->ns_id == nsid) in nvme_update_ana_state()
526 nvme_update_ns_ana_state(desc, ns); in nvme_update_ana_state()
529 if (ns->head->ns_id > nsid) in nvme_update_ana_state()
660 struct nvme_ns *ns = nvme_get_ns_from_dev(dev); in ana_state_show() local
662 return sysfs_emit(buf, "%s\n", nvme_ana_state_names[ns->ana_state]); in ana_state_show()
678 void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id) in nvme_mpath_add_disk() argument
680 if (nvme_ctrl_use_ana(ns->ctrl)) { in nvme_mpath_add_disk()
686 mutex_lock(&ns->ctrl->ana_lock); in nvme_mpath_add_disk()
687 ns->ana_grpid = le32_to_cpu(id->anagrpid); in nvme_mpath_add_disk()
688 nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc); in nvme_mpath_add_disk()
689 mutex_unlock(&ns->ctrl->ana_lock); in nvme_mpath_add_disk()
692 nvme_update_ns_ana_state(&desc, ns); in nvme_mpath_add_disk()
695 set_bit(NVME_NS_ANA_PENDING, &ns->flags); in nvme_mpath_add_disk()
696 queue_work(nvme_wq, &ns->ctrl->ana_work); in nvme_mpath_add_disk()
699 ns->ana_state = NVME_ANA_OPTIMIZED; in nvme_mpath_add_disk()
700 nvme_mpath_set_live(ns); in nvme_mpath_add_disk()
703 if (blk_queue_stable_writes(ns->queue) && ns->head->disk) in nvme_mpath_add_disk()
705 ns->head->disk->queue); in nvme_mpath_add_disk()
707 if (blk_queue_is_zoned(ns->queue) && ns->head->disk) in nvme_mpath_add_disk()
708 ns->head->disk->queue->nr_zones = ns->queue->nr_zones; in nvme_mpath_add_disk()