Lines Matching refs:head
57 if (!ns->head->disk) { in nvme_mpath_set_disk_name()
59 ns->head->instance); in nvme_mpath_set_disk_name()
63 ns->ctrl->instance, ns->head->instance); in nvme_mpath_set_disk_name()
87 spin_lock_irqsave(&ns->head->requeue_lock, flags); in nvme_failover_req()
89 bio_set_dev(bio, ns->head->disk->part0); in nvme_failover_req()
90 blk_steal_bios(&ns->head->requeue_list, req); in nvme_failover_req()
91 spin_unlock_irqrestore(&ns->head->requeue_lock, flags); in nvme_failover_req()
94 kblockd_schedule_work(&ns->head->requeue_work); in nvme_failover_req()
103 if (ns->head->disk) in nvme_kick_requeue_lists()
104 kblockd_schedule_work(&ns->head->requeue_work); in nvme_kick_requeue_lists()
120 struct nvme_ns_head *head = ns->head; in nvme_mpath_clear_current_path() local
124 if (!head) in nvme_mpath_clear_current_path()
128 if (ns == rcu_access_pointer(head->current_path[node])) { in nvme_mpath_clear_current_path()
129 rcu_assign_pointer(head->current_path[node], NULL); in nvme_mpath_clear_current_path()
144 kblockd_schedule_work(&ns->head->requeue_work); in nvme_mpath_clear_ctrl_paths()
151 struct nvme_ns_head *head = ns->head; in nvme_mpath_revalidate_paths() local
152 sector_t capacity = get_capacity(head->disk); in nvme_mpath_revalidate_paths()
156 srcu_idx = srcu_read_lock(&head->srcu); in nvme_mpath_revalidate_paths()
157 list_for_each_entry_rcu(ns, &head->list, siblings) { in nvme_mpath_revalidate_paths()
161 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_mpath_revalidate_paths()
164 rcu_assign_pointer(head->current_path[node], NULL); in nvme_mpath_revalidate_paths()
165 kblockd_schedule_work(&head->requeue_work); in nvme_mpath_revalidate_paths()
184 static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node) in __nvme_find_path() argument
189 list_for_each_entry_rcu(ns, &head->list, siblings) { in __nvme_find_path()
193 if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA) in __nvme_find_path()
219 rcu_assign_pointer(head->current_path[node], found); in __nvme_find_path()
223 static struct nvme_ns *nvme_next_ns(struct nvme_ns_head *head, in nvme_next_ns() argument
226 ns = list_next_or_null_rcu(&head->list, &ns->siblings, struct nvme_ns, in nvme_next_ns()
230 return list_first_or_null_rcu(&head->list, struct nvme_ns, siblings); in nvme_next_ns()
233 static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head, in nvme_round_robin_path() argument
238 if (list_is_singular(&head->list)) { in nvme_round_robin_path()
244 for (ns = nvme_next_ns(head, old); in nvme_round_robin_path()
246 ns = nvme_next_ns(head, ns)) { in nvme_round_robin_path()
272 rcu_assign_pointer(head->current_path[node], found); in nvme_round_robin_path()
282 inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head) in nvme_find_path() argument
287 ns = srcu_dereference(head->current_path[node], &head->srcu); in nvme_find_path()
289 return __nvme_find_path(head, node); in nvme_find_path()
291 if (READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_RR) in nvme_find_path()
292 return nvme_round_robin_path(head, node, ns); in nvme_find_path()
294 return __nvme_find_path(head, node); in nvme_find_path()
298 static bool nvme_available_path(struct nvme_ns_head *head) in nvme_available_path() argument
302 list_for_each_entry_rcu(ns, &head->list, siblings) { in nvme_available_path()
320 struct nvme_ns_head *head = bio->bi_bdev->bd_disk->private_data; in nvme_ns_head_submit_bio() local
321 struct device *dev = disk_to_dev(head->disk); in nvme_ns_head_submit_bio()
335 srcu_idx = srcu_read_lock(&head->srcu); in nvme_ns_head_submit_bio()
336 ns = nvme_find_path(head); in nvme_ns_head_submit_bio()
340 trace_block_bio_remap(bio, disk_devt(ns->head->disk), in nvme_ns_head_submit_bio()
343 } else if (nvme_available_path(head)) { in nvme_ns_head_submit_bio()
346 spin_lock_irq(&head->requeue_lock); in nvme_ns_head_submit_bio()
347 bio_list_add(&head->requeue_list, bio); in nvme_ns_head_submit_bio()
348 spin_unlock_irq(&head->requeue_lock); in nvme_ns_head_submit_bio()
356 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_ns_head_submit_bio()
376 struct nvme_ns_head *head = disk->private_data; in nvme_ns_head_report_zones() local
380 srcu_idx = srcu_read_lock(&head->srcu); in nvme_ns_head_report_zones()
381 ns = nvme_find_path(head); in nvme_ns_head_report_zones()
384 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_ns_head_report_zones()
429 static int nvme_add_ns_head_cdev(struct nvme_ns_head *head) in nvme_add_ns_head_cdev() argument
433 head->cdev_device.parent = &head->subsys->dev; in nvme_add_ns_head_cdev()
434 ret = dev_set_name(&head->cdev_device, "ng%dn%d", in nvme_add_ns_head_cdev()
435 head->subsys->instance, head->instance); in nvme_add_ns_head_cdev()
438 ret = nvme_cdev_add(&head->cdev, &head->cdev_device, in nvme_add_ns_head_cdev()
445 struct nvme_ns_head *head = in nvme_requeue_work() local
449 spin_lock_irq(&head->requeue_lock); in nvme_requeue_work()
450 next = bio_list_get(&head->requeue_list); in nvme_requeue_work()
451 spin_unlock_irq(&head->requeue_lock); in nvme_requeue_work()
461 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) in nvme_mpath_alloc_disk() argument
465 mutex_init(&head->lock); in nvme_mpath_alloc_disk()
466 bio_list_init(&head->requeue_list); in nvme_mpath_alloc_disk()
467 spin_lock_init(&head->requeue_lock); in nvme_mpath_alloc_disk()
468 INIT_WORK(&head->requeue_work, nvme_requeue_work); in nvme_mpath_alloc_disk()
476 !nvme_is_unique_nsid(ctrl, head) || !multipath) in nvme_mpath_alloc_disk()
479 head->disk = blk_alloc_disk(ctrl->numa_node); in nvme_mpath_alloc_disk()
480 if (!head->disk) in nvme_mpath_alloc_disk()
482 head->disk->fops = &nvme_ns_head_ops; in nvme_mpath_alloc_disk()
483 head->disk->private_data = head; in nvme_mpath_alloc_disk()
484 sprintf(head->disk->disk_name, "nvme%dn%d", in nvme_mpath_alloc_disk()
485 ctrl->subsys->instance, head->instance); in nvme_mpath_alloc_disk()
487 blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue); in nvme_mpath_alloc_disk()
488 blk_queue_flag_set(QUEUE_FLAG_NOWAIT, head->disk->queue); in nvme_mpath_alloc_disk()
491 blk_queue_logical_block_size(head->disk->queue, 512); in nvme_mpath_alloc_disk()
492 blk_set_stacking_limits(&head->disk->queue->limits); in nvme_mpath_alloc_disk()
497 blk_queue_write_cache(head->disk->queue, vwc, vwc); in nvme_mpath_alloc_disk()
503 struct nvme_ns_head *head = ns->head; in nvme_mpath_set_live() local
505 if (!head->disk) in nvme_mpath_set_live()
508 if (!test_and_set_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { in nvme_mpath_set_live()
509 device_add_disk(&head->subsys->dev, head->disk, in nvme_mpath_set_live()
511 nvme_add_ns_head_cdev(head); in nvme_mpath_set_live()
514 mutex_lock(&head->lock); in nvme_mpath_set_live()
518 srcu_idx = srcu_read_lock(&head->srcu); in nvme_mpath_set_live()
520 __nvme_find_path(head, node); in nvme_mpath_set_live()
521 srcu_read_unlock(&head->srcu, srcu_idx); in nvme_mpath_set_live()
523 mutex_unlock(&head->lock); in nvme_mpath_set_live()
525 synchronize_srcu(&head->srcu); in nvme_mpath_set_live()
526 kblockd_schedule_work(&head->requeue_work); in nvme_mpath_set_live()
620 if (ns->head->ns_id < nsid) in nvme_update_ana_state()
622 if (ns->head->ns_id == nsid) in nvme_update_ana_state()
626 if (ns->head->ns_id > nsid) in nvme_update_ana_state()
800 if (blk_queue_stable_writes(ns->queue) && ns->head->disk) in nvme_mpath_add_disk()
802 ns->head->disk->queue); in nvme_mpath_add_disk()
804 if (blk_queue_is_zoned(ns->queue) && ns->head->disk) in nvme_mpath_add_disk()
805 ns->head->disk->queue->nr_zones = ns->queue->nr_zones; in nvme_mpath_add_disk()
809 void nvme_mpath_shutdown_disk(struct nvme_ns_head *head) in nvme_mpath_shutdown_disk() argument
811 if (!head->disk) in nvme_mpath_shutdown_disk()
813 kblockd_schedule_work(&head->requeue_work); in nvme_mpath_shutdown_disk()
814 if (test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) { in nvme_mpath_shutdown_disk()
815 nvme_cdev_del(&head->cdev, &head->cdev_device); in nvme_mpath_shutdown_disk()
816 del_gendisk(head->disk); in nvme_mpath_shutdown_disk()
820 void nvme_mpath_remove_disk(struct nvme_ns_head *head) in nvme_mpath_remove_disk() argument
822 if (!head->disk) in nvme_mpath_remove_disk()
825 kblockd_schedule_work(&head->requeue_work); in nvme_mpath_remove_disk()
826 flush_work(&head->requeue_work); in nvme_mpath_remove_disk()
827 blk_cleanup_disk(head->disk); in nvme_mpath_remove_disk()