Lines Matching +full:blk +full:- +full:ctrl
1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (c) 2011-2014, Intel Corporation.
13 #include <linux/blk-mq.h>
15 #include <linux/sed-opal.h>
16 #include <linux/fault-inject.h>
19 #include <linux/t10-pi.h>
127 * Use non-standard 128 bytes SQEs.
162 * this structure as the first member of their request-private data.
171 struct nvme_ctrl *ctrl; member
191 if (!req->q->queuedata) in nvme_req_qid()
217 * @NVME_CTRL_DEAD: Controller is non-present/unresponsive during
359 static inline enum nvme_ctrl_state nvme_ctrl_state(struct nvme_ctrl *ctrl) in nvme_ctrl_state() argument
361 return READ_ONCE(ctrl->state); in nvme_ctrl_state()
408 * there is a 1:1 relation to our namespace structures, that is ->list
442 struct nvme_ctrl *ctrl; member
476 return ns->pi_type && ns->ms == sizeof(struct t10_pi_tuple); in nvme_ns_has_pi()
486 int (*reg_read32)(struct nvme_ctrl *ctrl, u32 off, u32 *val);
487 int (*reg_write32)(struct nvme_ctrl *ctrl, u32 off, u32 val);
488 int (*reg_read64)(struct nvme_ctrl *ctrl, u32 off, u64 *val);
489 void (*free_ctrl)(struct nvme_ctrl *ctrl);
490 void (*submit_async_event)(struct nvme_ctrl *ctrl);
491 void (*delete_ctrl)(struct nvme_ctrl *ctrl);
492 void (*stop_ctrl)(struct nvme_ctrl *ctrl);
493 int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size);
508 return nvme_cid_install_genctr(nvme_req(rq)->genctr) | rq->tag; in nvme_cid()
524 if (unlikely(nvme_genctr_mask(nvme_req(rq)->genctr) != genctr)) { in nvme_find_rq()
525 dev_err(nvme_req(rq)->ctrl->device, in nvme_find_rq()
527 tag, genctr, nvme_genctr_mask(nvme_req(rq)->genctr)); in nvme_find_rq()
555 bool nvme_wait_reset(struct nvme_ctrl *ctrl);
556 int nvme_try_sched_reset(struct nvme_ctrl *ctrl);
558 static inline int nvme_reset_subsystem(struct nvme_ctrl *ctrl) in nvme_reset_subsystem() argument
562 if (!ctrl->subsystem) in nvme_reset_subsystem()
563 return -ENOTTY; in nvme_reset_subsystem()
564 if (!nvme_wait_reset(ctrl)) in nvme_reset_subsystem()
565 return -EBUSY; in nvme_reset_subsystem()
567 ret = ctrl->ops->reg_write32(ctrl, NVME_REG_NSSR, 0x4E564D65); in nvme_reset_subsystem()
571 return nvme_try_sched_reset(ctrl); in nvme_reset_subsystem()
579 return sector >> (ns->lba_shift - SECTOR_SHIFT); in nvme_sect_to_lba()
587 return lba << (ns->lba_shift - SECTOR_SHIFT); in nvme_lba_to_sect()
591 * Convert byte length to nvme's 0-based num dwords
595 return (len >> 2) - 1; in nvme_bytes_to_numd()
618 * if blk-mq will need to use IPI magic to complete the request, and if yes do
627 rq->status = le16_to_cpu(status) >> 1; in nvme_try_complete_req()
628 rq->result = result; in nvme_try_complete_req()
631 if (unlikely(blk_should_fake_timeout(req->q))) in nvme_try_complete_req()
636 static inline void nvme_get_ctrl(struct nvme_ctrl *ctrl) in nvme_get_ctrl() argument
638 get_device(ctrl->device); in nvme_get_ctrl()
641 static inline void nvme_put_ctrl(struct nvme_ctrl *ctrl) in nvme_put_ctrl() argument
643 put_device(ctrl->device); in nvme_put_ctrl()
654 void nvme_cancel_tagset(struct nvme_ctrl *ctrl);
655 void nvme_cancel_admin_tagset(struct nvme_ctrl *ctrl);
656 bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
658 int nvme_disable_ctrl(struct nvme_ctrl *ctrl);
659 int nvme_enable_ctrl(struct nvme_ctrl *ctrl);
660 int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl);
661 int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
663 void nvme_uninit_ctrl(struct nvme_ctrl *ctrl);
664 void nvme_start_ctrl(struct nvme_ctrl *ctrl);
665 void nvme_stop_ctrl(struct nvme_ctrl *ctrl);
666 int nvme_init_identify(struct nvme_ctrl *ctrl);
668 void nvme_remove_namespaces(struct nvme_ctrl *ctrl);
673 void nvme_complete_async_event(struct nvme_ctrl *ctrl, __le16 status,
676 void nvme_stop_queues(struct nvme_ctrl *ctrl);
677 void nvme_start_queues(struct nvme_ctrl *ctrl);
678 void nvme_stop_admin_queue(struct nvme_ctrl *ctrl);
679 void nvme_start_admin_queue(struct nvme_ctrl *ctrl);
680 void nvme_kill_queues(struct nvme_ctrl *ctrl);
681 void nvme_sync_queues(struct nvme_ctrl *ctrl);
682 void nvme_sync_io_queues(struct nvme_ctrl *ctrl);
683 void nvme_unfreeze(struct nvme_ctrl *ctrl);
684 void nvme_wait_freeze(struct nvme_ctrl *ctrl);
685 int nvme_wait_freeze_timeout(struct nvme_ctrl *ctrl, long timeout);
686 void nvme_start_freeze(struct nvme_ctrl *ctrl);
688 #define NVME_QID_ANY -1
708 int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count);
709 void nvme_stop_keep_alive(struct nvme_ctrl *ctrl);
710 int nvme_reset_ctrl(struct nvme_ctrl *ctrl);
711 int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl);
712 int nvme_delete_ctrl(struct nvme_ctrl *ctrl);
714 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp, u8 csi,
724 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) in nvme_ctrl_use_ana() argument
726 return ctrl->ana_log_buf != NULL; in nvme_ctrl_use_ana()
733 struct nvme_ctrl *ctrl, int *flags);
735 void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
736 int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
739 int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id);
740 void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl);
741 void nvme_mpath_update(struct nvme_ctrl *ctrl);
742 void nvme_mpath_uninit(struct nvme_ctrl *ctrl);
743 void nvme_mpath_stop(struct nvme_ctrl *ctrl);
745 void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl);
751 struct nvme_ns_head *head = ns->head; in nvme_mpath_check_last_path()
753 if (head->disk && list_empty(&head->list)) in nvme_mpath_check_last_path()
754 kblockd_schedule_work(&head->requeue_work); in nvme_mpath_check_last_path()
760 struct nvme_ns *ns = req->q->queuedata; in nvme_trace_bio_complete()
762 if ((req->cmd_flags & REQ_NVME_MPATH) && req->bio) in nvme_trace_bio_complete()
763 trace_block_bio_complete(ns->head->disk->queue, req->bio); in nvme_trace_bio_complete()
771 static inline bool nvme_ctrl_use_ana(struct nvme_ctrl *ctrl) in nvme_ctrl_use_ana() argument
780 struct nvme_ctrl *ctrl, int *flags) in nvme_set_disk_name() argument
782 sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance); in nvme_set_disk_name()
788 static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) in nvme_kick_requeue_lists() argument
791 static inline int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, in nvme_mpath_alloc_disk() argument
807 static inline void nvme_mpath_clear_ctrl_paths(struct nvme_ctrl *ctrl) in nvme_mpath_clear_ctrl_paths() argument
817 static inline void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl) in nvme_mpath_init_ctrl() argument
820 static inline int nvme_mpath_init_identify(struct nvme_ctrl *ctrl, in nvme_mpath_init_identify() argument
823 if (ctrl->subsys->cmic & (1 << 3)) in nvme_mpath_init_identify()
824 dev_warn(ctrl->device, in nvme_mpath_init_identify()
825 "Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n"); in nvme_mpath_init_identify()
828 static inline void nvme_mpath_update(struct nvme_ctrl *ctrl) in nvme_mpath_update() argument
831 static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl) in nvme_mpath_uninit() argument
834 static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl) in nvme_mpath_stop() argument
869 dev_warn(ns->ctrl->device, in nvme_update_zone_info()
871 return -EPROTONOSUPPORT; in nvme_update_zone_info()
891 return -ENOTTY; in nvme_nvm_ioctl()
897 return dev_to_disk(dev)->private_data; in nvme_get_ns_from_dev()
901 int nvme_hwmon_init(struct nvme_ctrl *ctrl);
902 void nvme_hwmon_exit(struct nvme_ctrl *ctrl);
904 static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl) in nvme_hwmon_init() argument
909 static inline void nvme_hwmon_exit(struct nvme_ctrl *ctrl) in nvme_hwmon_exit() argument
914 u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
918 struct nvme_ns *nvme_find_get_ns(struct nvme_ctrl *ctrl, unsigned nsid);