Lines Matching full:ns
61 void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns, in nvme_set_disk_name() argument
65 sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); in nvme_set_disk_name()
66 } else if (ns->head->disk) { in nvme_set_disk_name()
68 ctrl->cntlid, ns->head->instance); in nvme_set_disk_name()
72 ns->head->instance); in nvme_set_disk_name()
78 struct nvme_ns *ns = req->q->queuedata; in nvme_failover_req() local
95 nvme_mpath_clear_current_path(ns); in nvme_failover_req()
96 if (ns->ctrl->ana_log_buf) { in nvme_failover_req()
97 set_bit(NVME_NS_ANA_PENDING, &ns->flags); in nvme_failover_req()
98 queue_work(nvme_wq, &ns->ctrl->ana_work); in nvme_failover_req()
106 nvme_mpath_clear_current_path(ns); in nvme_failover_req()
113 spin_lock_irqsave(&ns->head->requeue_lock, flags); in nvme_failover_req()
114 blk_steal_bios(&ns->head->requeue_list, req); in nvme_failover_req()
115 spin_unlock_irqrestore(&ns->head->requeue_lock, flags); in nvme_failover_req()
118 kblockd_schedule_work(&ns->head->requeue_work); in nvme_failover_req()
124 struct nvme_ns *ns; in nvme_kick_requeue_lists() local
127 list_for_each_entry(ns, &ctrl->namespaces, list) { in nvme_kick_requeue_lists()
128 if (ns->head->disk) in nvme_kick_requeue_lists()
129 kblockd_schedule_work(&ns->head->requeue_work); in nvme_kick_requeue_lists()
145 struct nvme_ns *ns, *fallback = NULL; in __nvme_find_path() local
147 list_for_each_entry_rcu(ns, &head->list, siblings) { in __nvme_find_path()
148 if (ns->ctrl->state != NVME_CTRL_LIVE || in __nvme_find_path()
149 test_bit(NVME_NS_ANA_PENDING, &ns->flags)) in __nvme_find_path()
151 switch (ns->ana_state) { in __nvme_find_path()
153 rcu_assign_pointer(head->current_path, ns); in __nvme_find_path()
154 return ns; in __nvme_find_path()
156 fallback = ns; in __nvme_find_path()
168 static inline bool nvme_path_is_optimized(struct nvme_ns *ns) in nvme_path_is_optimized() argument
170 return ns->ctrl->state == NVME_CTRL_LIVE && in nvme_path_is_optimized()
171 ns->ana_state == NVME_ANA_OPTIMIZED; in nvme_path_is_optimized()
176 struct nvme_ns *ns = srcu_dereference(head->current_path, &head->srcu); in nvme_find_path() local
178 if (unlikely(!ns || !nvme_path_is_optimized(ns))) in nvme_find_path()
179 ns = __nvme_find_path(head); in nvme_find_path()
180 return ns; in nvme_find_path()
188 struct nvme_ns *ns; in nvme_ns_head_make_request() local
193 ns = nvme_find_path(head); in nvme_ns_head_make_request()
194 if (likely(ns)) { in nvme_ns_head_make_request()
195 bio->bi_disk = ns->disk; in nvme_ns_head_make_request()
198 disk_devt(ns->head->disk), in nvme_ns_head_make_request()
221 struct nvme_ns *ns; in nvme_ns_head_poll() local
226 ns = srcu_dereference(head->current_path, &head->srcu); in nvme_ns_head_poll()
227 if (likely(ns && nvme_path_is_optimized(ns))) in nvme_ns_head_poll()
228 found = ns->queue->poll_fn(q, qc); in nvme_ns_head_poll()
307 static void nvme_mpath_set_live(struct nvme_ns *ns) in nvme_mpath_set_live() argument
309 struct nvme_ns_head *head = ns->head; in nvme_mpath_set_live()
311 lockdep_assert_held(&ns->head->lock); in nvme_mpath_set_live()
324 synchronize_srcu(&ns->head->srcu); in nvme_mpath_set_live()
325 kblockd_schedule_work(&ns->head->requeue_work); in nvme_mpath_set_live()
374 struct nvme_ns *ns) in nvme_update_ns_ana_state() argument
376 mutex_lock(&ns->head->lock); in nvme_update_ns_ana_state()
377 ns->ana_grpid = le32_to_cpu(desc->grpid); in nvme_update_ns_ana_state()
378 ns->ana_state = desc->state; in nvme_update_ns_ana_state()
379 clear_bit(NVME_NS_ANA_PENDING, &ns->flags); in nvme_update_ns_ana_state()
381 if (nvme_state_is_live(ns->ana_state)) in nvme_update_ns_ana_state()
382 nvme_mpath_set_live(ns); in nvme_update_ns_ana_state()
383 mutex_unlock(&ns->head->lock); in nvme_update_ns_ana_state()
391 struct nvme_ns *ns; in nvme_update_ana_state() local
404 list_for_each_entry(ns, &ctrl->namespaces, list) { in nvme_update_ana_state()
407 if (ns->head->ns_id < nsid) in nvme_update_ana_state()
409 if (ns->head->ns_id == nsid) in nvme_update_ana_state()
410 nvme_update_ns_ana_state(desc, ns); in nvme_update_ana_state()
490 struct nvme_ns *ns = nvme_get_ns_from_dev(dev); in ana_state_show() local
492 return sprintf(buf, "%s\n", nvme_ana_state_names[ns->ana_state]); in ana_state_show()
508 void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id) in nvme_mpath_add_disk() argument
510 if (nvme_ctrl_use_ana(ns->ctrl)) { in nvme_mpath_add_disk()
516 mutex_lock(&ns->ctrl->ana_lock); in nvme_mpath_add_disk()
517 ns->ana_grpid = le32_to_cpu(id->anagrpid); in nvme_mpath_add_disk()
518 nvme_parse_ana_log(ns->ctrl, &desc, nvme_lookup_ana_group_desc); in nvme_mpath_add_disk()
519 mutex_unlock(&ns->ctrl->ana_lock); in nvme_mpath_add_disk()
522 nvme_update_ns_ana_state(&desc, ns); in nvme_mpath_add_disk()
525 mutex_lock(&ns->head->lock); in nvme_mpath_add_disk()
526 ns->ana_state = NVME_ANA_OPTIMIZED; in nvme_mpath_add_disk()
527 nvme_mpath_set_live(ns); in nvme_mpath_add_disk()
528 mutex_unlock(&ns->head->lock); in nvme_mpath_add_disk()
531 if (bdi_cap_stable_pages_required(ns->queue->backing_dev_info)) { in nvme_mpath_add_disk()
532 struct gendisk *disk = ns->head->disk; in nvme_mpath_add_disk()