Lines Matching full:head
57 sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); in nvme_set_disk_name()
58 } else if (ns->head->disk) { in nvme_set_disk_name()
60 ctrl->instance, ns->head->instance); in nvme_set_disk_name()
64 ns->head->instance); in nvme_set_disk_name()
86 spin_lock_irqsave(&ns->head->requeue_lock, flags); in nvme_failover_req()
87 blk_steal_bios(&ns->head->requeue_list, req); in nvme_failover_req()
88 spin_unlock_irqrestore(&ns->head->requeue_lock, flags); in nvme_failover_req()
91 kblockd_schedule_work(&ns->head->requeue_work); in nvme_failover_req()
100 if (ns->head->disk) in nvme_kick_requeue_lists()
101 kblockd_schedule_work(&ns->head->requeue_work); in nvme_kick_requeue_lists()
117 struct nvme_ns_head *head = ns->head; in nvme_mpath_clear_current_path() local
121 if (!head) in nvme_mpath_clear_current_path()
125 if (ns == rcu_access_pointer(head->current_path[node])) { in nvme_mpath_clear_current_path()
126 rcu_assign_pointer(head->current_path[node], NULL); in nvme_mpath_clear_current_path()
141 kblockd_schedule_work(&ns->head->requeue_work); in nvme_mpath_clear_ctrl_paths()
162 static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node) in __nvme_find_path() argument
167 list_for_each_entry_rcu(ns, &head->list, siblings) { in __nvme_find_path()
171 if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA) in __nvme_find_path()
197 rcu_assign_pointer(head->current_path[node], found); in __nvme_find_path()
201 static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head, in nvme_next_ns() argument
204 ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns, in nvme_next_ns()
208 return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings); in nvme_next_ns()
211 static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head, in nvme_round_robin_path() argument
216 if (list_is_singular(&head->list)) { in nvme_round_robin_path()
222 for (ns = nvme_next_ns(head, old); in nvme_round_robin_path()
224 ns = nvme_next_ns(head, ns)) { in nvme_round_robin_path()
250 rcu_assign_pointer(head->current_path[node], found); in nvme_round_robin_path()
260 inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head) in nvme_find_path() argument
265 ns = srcu_dereference(head->current_path[node], &head->srcu); in nvme_find_path()
267 return __nvme_find_path(head, node); in nvme_find_path()
269 if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR) in nvme_find_path()
270 return nvme_round_robin_path(head, node, ns); in nvme_find_path()
272 return __nvme_find_path(head, node); in nvme_find_path()
276 static bool nvme_available_path(struct nvme_ns_head *head) in nvme_available_path() argument
280 list_for_each_entry_rcu(ns, &head->list, siblings) { in nvme_available_path()
296 struct nvme_ns_head *head = bio->bi_disk->private_data; in nvme_ns_head_submit_bio() local
297 struct device *dev = disk_to_dev(head->disk); in nvme_ns_head_submit_bio()
309 srcu_idx = srcu_read_lock(&head->srcu); in nvme_ns_head_submit_bio()
310 ns = nvme_find_path(head); in nvme_ns_head_submit_bio()
315 disk_devt(ns->head->disk), in nvme_ns_head_submit_bio()
318 } else if (nvme_available_path(head)) { in nvme_ns_head_submit_bio()
321 spin_lock_irq(&head->requeue_lock); in nvme_ns_head_submit_bio()
322 bio_list_add(&head->requeue_list, bio); in nvme_ns_head_submit_bio()
323 spin_unlock_irq(&head->requeue_lock); in nvme_ns_head_submit_bio()
331 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_ns_head_submit_bio()
337 struct nvme_ns_head *head = in nvme_requeue_work() local
341 spin_lock_irq(&head->requeue_lock); in nvme_requeue_work()
342 next = bio_list_get(&head->requeue_list); in nvme_requeue_work()
343 spin_unlock_irq(&head->requeue_lock); in nvme_requeue_work()
353 bio->bi_disk = head->disk; in nvme_requeue_work()
358 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) in nvme_mpath_alloc_disk() argument
363 mutex_init(&head->lock); in nvme_mpath_alloc_disk()
364 bio_list_init(&head->requeue_list); in nvme_mpath_alloc_disk()
365 spin_lock_init(&head->requeue_lock); in nvme_mpath_alloc_disk()
366 INIT_WORK(&head->requeue_work, nvme_requeue_work); in nvme_mpath_alloc_disk()
389 head->disk = alloc_disk(0); in nvme_mpath_alloc_disk()
390 if (!head->disk) in nvme_mpath_alloc_disk()
392 head->disk->fops = &nvme_ns_head_ops; in nvme_mpath_alloc_disk()
393 head->disk->private_data = head; in nvme_mpath_alloc_disk()
394 head->disk->queue = q; in nvme_mpath_alloc_disk()
395 head->disk->flags = GENHD_FL_EXT_DEVT; in nvme_mpath_alloc_disk()
396 sprintf(head->disk->disk_name, "nvme%dn%d", in nvme_mpath_alloc_disk()
397 ctrl->subsys->instance, head->instance); in nvme_mpath_alloc_disk()
408 struct nvme_ns_head *head = ns->head; in nvme_mpath_set_live() local
410 if (!head->disk) in nvme_mpath_set_live()
413 if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) in nvme_mpath_set_live()
414 device_add_disk(&head->subsys->dev, head->disk, in nvme_mpath_set_live()
417 mutex_lock(&head->lock); in nvme_mpath_set_live()
421 srcu_idx = srcu_read_lock(&head->srcu); in nvme_mpath_set_live()
423 __nvme_find_path(head, node); in nvme_mpath_set_live()
424 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_mpath_set_live()
426 mutex_unlock(&head->lock); in nvme_mpath_set_live()
428 synchronize_srcu(&head->srcu); in nvme_mpath_set_live()
429 kblockd_schedule_work(&head->requeue_work); in nvme_mpath_set_live()
514 if (ns->head->ns_id < nsid) in nvme_update_ana_state()
516 if (ns->head->ns_id == nsid) in nvme_update_ana_state()
520 if (ns->head->ns_id > nsid) in nvme_update_ana_state()
682 if (blk_queue_stable_writes(ns->queue) && ns->head->disk) in nvme_mpath_add_disk()
684 ns->head->disk->queue); in nvme_mpath_add_disk()
686 if (blk_queue_is_zoned(ns->queue) && ns->head->disk) in nvme_mpath_add_disk()
687 ns->head->disk->queue->nr_zones = ns->queue->nr_zones; in nvme_mpath_add_disk()
691 void nvme_mpath_remove_disk(struct nvme_ns_head *head) in nvme_mpath_remove_disk() argument
693 if (!head->disk) in nvme_mpath_remove_disk()
695 if (head->disk->flags & GENHD_FL_UP) in nvme_mpath_remove_disk()
696 del_gendisk(head->disk); in nvme_mpath_remove_disk()
697 blk_set_queue_dying(head->disk->queue); in nvme_mpath_remove_disk()
699 kblockd_schedule_work(&head->requeue_work); in nvme_mpath_remove_disk()
700 flush_work(&head->requeue_work); in nvme_mpath_remove_disk()
701 blk_cleanup_queue(head->disk->queue); in nvme_mpath_remove_disk()
702 if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { in nvme_mpath_remove_disk()
708 head->disk->queue = NULL; in nvme_mpath_remove_disk()
710 put_disk(head->disk); in nvme_mpath_remove_disk()