• Home
  • Raw
  • Download

Lines Matching refs:ctrl

59 	struct nvme_ctrl	ctrl;  member
66 static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl) in to_loop_ctrl() argument
68 return container_of(ctrl, struct nvme_loop_ctrl, ctrl); in to_loop_ctrl()
74 struct nvme_loop_ctrl *ctrl; member
83 static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
89 return queue - queue->ctrl->queues; in nvme_loop_queue_idx()
129 nvme_complete_async_event(&iod->queue->ctrl->ctrl, cqe); in nvme_loop_queue_response()
153 schedule_work(&iod->queue->ctrl->reset_work); in nvme_loop_timeout()
205 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg); in nvme_loop_submit_async_event() local
206 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_submit_async_event()
207 struct nvme_loop_iod *iod = &ctrl->async_event_iod; in nvme_loop_submit_async_event()
216 dev_err(ctrl->ctrl.device, "failed async event work\n"); in nvme_loop_submit_async_event()
223 static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl, in nvme_loop_init_iod() argument
228 iod->queue = &ctrl->queues[queue_idx]; in nvme_loop_init_iod()
250 struct nvme_loop_ctrl *ctrl = data; in nvme_loop_init_hctx() local
251 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_loop_init_hctx()
253 BUG_ON(hctx_idx >= ctrl->queue_count); in nvme_loop_init_hctx()
262 struct nvme_loop_ctrl *ctrl = data; in nvme_loop_init_admin_hctx() local
263 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_init_admin_hctx()
287 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl) in nvme_loop_destroy_admin_queue() argument
289 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); in nvme_loop_destroy_admin_queue()
290 blk_cleanup_queue(ctrl->ctrl.admin_q); in nvme_loop_destroy_admin_queue()
291 blk_mq_free_tag_set(&ctrl->admin_tag_set); in nvme_loop_destroy_admin_queue()
296 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl); in nvme_loop_free_ctrl() local
298 if (list_empty(&ctrl->list)) in nvme_loop_free_ctrl()
302 list_del(&ctrl->list); in nvme_loop_free_ctrl()
306 blk_cleanup_queue(ctrl->ctrl.connect_q); in nvme_loop_free_ctrl()
307 blk_mq_free_tag_set(&ctrl->tag_set); in nvme_loop_free_ctrl()
309 kfree(ctrl->queues); in nvme_loop_free_ctrl()
312 kfree(ctrl); in nvme_loop_free_ctrl()
315 static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) in nvme_loop_destroy_io_queues() argument
319 for (i = 1; i < ctrl->queue_count; i++) in nvme_loop_destroy_io_queues()
320 nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); in nvme_loop_destroy_io_queues()
323 static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) in nvme_loop_init_io_queues() argument
325 struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; in nvme_loop_init_io_queues()
330 ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); in nvme_loop_init_io_queues()
334 dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues); in nvme_loop_init_io_queues()
337 ctrl->queues[i].ctrl = ctrl; in nvme_loop_init_io_queues()
338 ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq); in nvme_loop_init_io_queues()
342 ctrl->queue_count++; in nvme_loop_init_io_queues()
348 nvme_loop_destroy_io_queues(ctrl); in nvme_loop_init_io_queues()
352 static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) in nvme_loop_configure_admin_queue() argument
356 memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set)); in nvme_loop_configure_admin_queue()
357 ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops; in nvme_loop_configure_admin_queue()
358 ctrl->admin_tag_set.queue_depth = NVME_LOOP_AQ_BLKMQ_DEPTH; in nvme_loop_configure_admin_queue()
359 ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */ in nvme_loop_configure_admin_queue()
360 ctrl->admin_tag_set.numa_node = NUMA_NO_NODE; in nvme_loop_configure_admin_queue()
361 ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) + in nvme_loop_configure_admin_queue()
363 ctrl->admin_tag_set.driver_data = ctrl; in nvme_loop_configure_admin_queue()
364 ctrl->admin_tag_set.nr_hw_queues = 1; in nvme_loop_configure_admin_queue()
365 ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT; in nvme_loop_configure_admin_queue()
367 ctrl->queues[0].ctrl = ctrl; in nvme_loop_configure_admin_queue()
368 error = nvmet_sq_init(&ctrl->queues[0].nvme_sq); in nvme_loop_configure_admin_queue()
371 ctrl->queue_count = 1; in nvme_loop_configure_admin_queue()
373 error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); in nvme_loop_configure_admin_queue()
377 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); in nvme_loop_configure_admin_queue()
378 if (IS_ERR(ctrl->ctrl.admin_q)) { in nvme_loop_configure_admin_queue()
379 error = PTR_ERR(ctrl->ctrl.admin_q); in nvme_loop_configure_admin_queue()
383 error = nvmf_connect_admin_queue(&ctrl->ctrl); in nvme_loop_configure_admin_queue()
387 error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap); in nvme_loop_configure_admin_queue()
389 dev_err(ctrl->ctrl.device, in nvme_loop_configure_admin_queue()
394 ctrl->ctrl.sqsize = in nvme_loop_configure_admin_queue()
395 min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); in nvme_loop_configure_admin_queue()
397 error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); in nvme_loop_configure_admin_queue()
401 ctrl->ctrl.max_hw_sectors = in nvme_loop_configure_admin_queue()
404 error = nvme_init_identify(&ctrl->ctrl); in nvme_loop_configure_admin_queue()
408 nvme_start_keep_alive(&ctrl->ctrl); in nvme_loop_configure_admin_queue()
413 blk_cleanup_queue(ctrl->ctrl.admin_q); in nvme_loop_configure_admin_queue()
415 blk_mq_free_tag_set(&ctrl->admin_tag_set); in nvme_loop_configure_admin_queue()
417 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); in nvme_loop_configure_admin_queue()
421 static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) in nvme_loop_shutdown_ctrl() argument
423 nvme_stop_keep_alive(&ctrl->ctrl); in nvme_loop_shutdown_ctrl()
425 if (ctrl->queue_count > 1) { in nvme_loop_shutdown_ctrl()
426 nvme_stop_queues(&ctrl->ctrl); in nvme_loop_shutdown_ctrl()
427 blk_mq_tagset_busy_iter(&ctrl->tag_set, in nvme_loop_shutdown_ctrl()
428 nvme_cancel_request, &ctrl->ctrl); in nvme_loop_shutdown_ctrl()
429 nvme_loop_destroy_io_queues(ctrl); in nvme_loop_shutdown_ctrl()
432 if (ctrl->ctrl.state == NVME_CTRL_LIVE) in nvme_loop_shutdown_ctrl()
433 nvme_shutdown_ctrl(&ctrl->ctrl); in nvme_loop_shutdown_ctrl()
435 blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); in nvme_loop_shutdown_ctrl()
436 blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, in nvme_loop_shutdown_ctrl()
437 nvme_cancel_request, &ctrl->ctrl); in nvme_loop_shutdown_ctrl()
438 nvme_loop_destroy_admin_queue(ctrl); in nvme_loop_shutdown_ctrl()
443 struct nvme_loop_ctrl *ctrl = container_of(work, in nvme_loop_del_ctrl_work() local
446 nvme_uninit_ctrl(&ctrl->ctrl); in nvme_loop_del_ctrl_work()
447 nvme_loop_shutdown_ctrl(ctrl); in nvme_loop_del_ctrl_work()
448 nvme_put_ctrl(&ctrl->ctrl); in nvme_loop_del_ctrl_work()
451 static int __nvme_loop_del_ctrl(struct nvme_loop_ctrl *ctrl) in __nvme_loop_del_ctrl() argument
453 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING)) in __nvme_loop_del_ctrl()
456 if (!schedule_work(&ctrl->delete_work)) in __nvme_loop_del_ctrl()
464 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl); in nvme_loop_del_ctrl() local
467 ret = __nvme_loop_del_ctrl(ctrl); in nvme_loop_del_ctrl()
471 flush_work(&ctrl->delete_work); in nvme_loop_del_ctrl()
478 struct nvme_loop_ctrl *ctrl; in nvme_loop_delete_ctrl() local
481 list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) { in nvme_loop_delete_ctrl()
482 if (ctrl->ctrl.cntlid == nctrl->cntlid) in nvme_loop_delete_ctrl()
483 __nvme_loop_del_ctrl(ctrl); in nvme_loop_delete_ctrl()
490 struct nvme_loop_ctrl *ctrl = container_of(work, in nvme_loop_reset_ctrl_work() local
495 nvme_loop_shutdown_ctrl(ctrl); in nvme_loop_reset_ctrl_work()
497 ret = nvme_loop_configure_admin_queue(ctrl); in nvme_loop_reset_ctrl_work()
501 ret = nvme_loop_init_io_queues(ctrl); in nvme_loop_reset_ctrl_work()
505 for (i = 1; i < ctrl->queue_count; i++) { in nvme_loop_reset_ctrl_work()
506 ret = nvmf_connect_io_queue(&ctrl->ctrl, i); in nvme_loop_reset_ctrl_work()
511 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); in nvme_loop_reset_ctrl_work()
514 nvme_queue_scan(&ctrl->ctrl); in nvme_loop_reset_ctrl_work()
515 nvme_queue_async_events(&ctrl->ctrl); in nvme_loop_reset_ctrl_work()
517 nvme_start_queues(&ctrl->ctrl); in nvme_loop_reset_ctrl_work()
522 nvme_loop_destroy_io_queues(ctrl); in nvme_loop_reset_ctrl_work()
524 nvme_loop_destroy_admin_queue(ctrl); in nvme_loop_reset_ctrl_work()
526 dev_warn(ctrl->ctrl.device, "Removing after reset failure\n"); in nvme_loop_reset_ctrl_work()
527 nvme_uninit_ctrl(&ctrl->ctrl); in nvme_loop_reset_ctrl_work()
528 nvme_put_ctrl(&ctrl->ctrl); in nvme_loop_reset_ctrl_work()
533 struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl); in nvme_loop_reset_ctrl() local
535 if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING)) in nvme_loop_reset_ctrl()
538 if (!schedule_work(&ctrl->reset_work)) in nvme_loop_reset_ctrl()
541 flush_work(&ctrl->reset_work); in nvme_loop_reset_ctrl()
560 static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) in nvme_loop_create_io_queues() argument
564 ret = nvme_loop_init_io_queues(ctrl); in nvme_loop_create_io_queues()
568 memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); in nvme_loop_create_io_queues()
569 ctrl->tag_set.ops = &nvme_loop_mq_ops; in nvme_loop_create_io_queues()
570 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; in nvme_loop_create_io_queues()
571 ctrl->tag_set.reserved_tags = 1; /* fabric connect */ in nvme_loop_create_io_queues()
572 ctrl->tag_set.numa_node = NUMA_NO_NODE; in nvme_loop_create_io_queues()
573 ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; in nvme_loop_create_io_queues()
574 ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) + in nvme_loop_create_io_queues()
576 ctrl->tag_set.driver_data = ctrl; in nvme_loop_create_io_queues()
577 ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1; in nvme_loop_create_io_queues()
578 ctrl->tag_set.timeout = NVME_IO_TIMEOUT; in nvme_loop_create_io_queues()
579 ctrl->ctrl.tagset = &ctrl->tag_set; in nvme_loop_create_io_queues()
581 ret = blk_mq_alloc_tag_set(&ctrl->tag_set); in nvme_loop_create_io_queues()
585 ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set); in nvme_loop_create_io_queues()
586 if (IS_ERR(ctrl->ctrl.connect_q)) { in nvme_loop_create_io_queues()
587 ret = PTR_ERR(ctrl->ctrl.connect_q); in nvme_loop_create_io_queues()
591 for (i = 1; i < ctrl->queue_count; i++) { in nvme_loop_create_io_queues()
592 ret = nvmf_connect_io_queue(&ctrl->ctrl, i); in nvme_loop_create_io_queues()
600 blk_cleanup_queue(ctrl->ctrl.connect_q); in nvme_loop_create_io_queues()
602 blk_mq_free_tag_set(&ctrl->tag_set); in nvme_loop_create_io_queues()
604 nvme_loop_destroy_io_queues(ctrl); in nvme_loop_create_io_queues()
611 struct nvme_loop_ctrl *ctrl; in nvme_loop_create_ctrl() local
615 ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); in nvme_loop_create_ctrl()
616 if (!ctrl) in nvme_loop_create_ctrl()
618 ctrl->ctrl.opts = opts; in nvme_loop_create_ctrl()
619 INIT_LIST_HEAD(&ctrl->list); in nvme_loop_create_ctrl()
621 INIT_WORK(&ctrl->delete_work, nvme_loop_del_ctrl_work); in nvme_loop_create_ctrl()
622 INIT_WORK(&ctrl->reset_work, nvme_loop_reset_ctrl_work); in nvme_loop_create_ctrl()
624 ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops, in nvme_loop_create_ctrl()
629 spin_lock_init(&ctrl->lock); in nvme_loop_create_ctrl()
633 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_loop_create_ctrl()
634 ctrl->ctrl.kato = opts->kato; in nvme_loop_create_ctrl()
636 ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues), in nvme_loop_create_ctrl()
638 if (!ctrl->queues) in nvme_loop_create_ctrl()
641 ret = nvme_loop_configure_admin_queue(ctrl); in nvme_loop_create_ctrl()
645 if (opts->queue_size > ctrl->ctrl.maxcmd) { in nvme_loop_create_ctrl()
647 dev_warn(ctrl->ctrl.device, in nvme_loop_create_ctrl()
649 opts->queue_size, ctrl->ctrl.maxcmd); in nvme_loop_create_ctrl()
650 opts->queue_size = ctrl->ctrl.maxcmd; in nvme_loop_create_ctrl()
654 ret = nvme_loop_create_io_queues(ctrl); in nvme_loop_create_ctrl()
659 nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0); in nvme_loop_create_ctrl()
661 dev_info(ctrl->ctrl.device, in nvme_loop_create_ctrl()
662 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn); in nvme_loop_create_ctrl()
664 kref_get(&ctrl->ctrl.kref); in nvme_loop_create_ctrl()
666 changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); in nvme_loop_create_ctrl()
670 list_add_tail(&ctrl->list, &nvme_loop_ctrl_list); in nvme_loop_create_ctrl()
674 nvme_queue_scan(&ctrl->ctrl); in nvme_loop_create_ctrl()
675 nvme_queue_async_events(&ctrl->ctrl); in nvme_loop_create_ctrl()
678 return &ctrl->ctrl; in nvme_loop_create_ctrl()
681 nvme_loop_destroy_admin_queue(ctrl); in nvme_loop_create_ctrl()
683 kfree(ctrl->queues); in nvme_loop_create_ctrl()
685 nvme_uninit_ctrl(&ctrl->ctrl); in nvme_loop_create_ctrl()
687 nvme_put_ctrl(&ctrl->ctrl); in nvme_loop_create_ctrl()
745 struct nvme_loop_ctrl *ctrl, *next; in nvme_loop_cleanup_module() local
751 list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list) in nvme_loop_cleanup_module()
752 __nvme_loop_del_ctrl(ctrl); in nvme_loop_cleanup_module()