Lines Matching full:req
27 static inline void nvmet_clear_aen(struct nvmet_req *req, u32 aen_bit) in nvmet_clear_aen() argument
29 int rae = le32_to_cpu(req->cmd->common.cdw10[0]) & 1 << 15; in nvmet_clear_aen()
32 clear_bit(aen_bit, &req->sq->ctrl->aen_masked); in nvmet_clear_aen()
48 static void nvmet_execute_get_log_page_noop(struct nvmet_req *req) in nvmet_execute_get_log_page_noop() argument
50 nvmet_req_complete(req, nvmet_zero_sgl(req, 0, req->data_len)); in nvmet_execute_get_log_page_noop()
53 static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, in nvmet_get_smart_log_nsid() argument
59 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid); in nvmet_get_smart_log_nsid()
62 le32_to_cpu(req->cmd->get_log_page.nsid)); in nvmet_get_smart_log_nsid()
87 static u16 nvmet_get_smart_log_all(struct nvmet_req *req, in nvmet_get_smart_log_all() argument
95 ctrl = req->sq->ctrl; in nvmet_get_smart_log_all()
120 static void nvmet_execute_get_log_page_smart(struct nvmet_req *req) in nvmet_execute_get_log_page_smart() argument
125 if (req->data_len != sizeof(*log)) in nvmet_execute_get_log_page_smart()
132 if (req->cmd->get_log_page.nsid == cpu_to_le32(NVME_NSID_ALL)) in nvmet_execute_get_log_page_smart()
133 status = nvmet_get_smart_log_all(req, log); in nvmet_execute_get_log_page_smart()
135 status = nvmet_get_smart_log_nsid(req, log); in nvmet_execute_get_log_page_smart()
139 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log)); in nvmet_execute_get_log_page_smart()
143 nvmet_req_complete(req, status); in nvmet_execute_get_log_page_smart()
146 static void nvmet_execute_get_log_cmd_effects_ns(struct nvmet_req *req) in nvmet_execute_get_log_cmd_effects_ns() argument
169 status = nvmet_copy_to_sgl(req, 0, log, sizeof(*log)); in nvmet_execute_get_log_cmd_effects_ns()
173 nvmet_req_complete(req, status); in nvmet_execute_get_log_cmd_effects_ns()
176 static void nvmet_execute_get_log_changed_ns(struct nvmet_req *req) in nvmet_execute_get_log_changed_ns() argument
178 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_get_log_changed_ns()
182 if (req->data_len != NVME_MAX_CHANGED_NAMESPACES * sizeof(__le32)) in nvmet_execute_get_log_changed_ns()
190 status = nvmet_copy_to_sgl(req, 0, ctrl->changed_ns_list, len); in nvmet_execute_get_log_changed_ns()
192 status = nvmet_zero_sgl(req, len, req->data_len - len); in nvmet_execute_get_log_changed_ns()
194 nvmet_clear_aen(req, NVME_AEN_CFG_NS_ATTR); in nvmet_execute_get_log_changed_ns()
197 nvmet_req_complete(req, status); in nvmet_execute_get_log_changed_ns()
200 static u32 nvmet_format_ana_group(struct nvmet_req *req, u32 grpid, in nvmet_format_ana_group() argument
203 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_format_ana_group()
207 if (!(req->cmd->get_log_page.lsp & NVME_ANA_LOG_RGO)) { in nvmet_format_ana_group()
218 desc->state = req->port->ana_state[grpid]; in nvmet_format_ana_group()
223 static void nvmet_execute_get_log_page_ana(struct nvmet_req *req) in nvmet_execute_get_log_page_ana() argument
243 len = nvmet_format_ana_group(req, grpid, desc); in nvmet_execute_get_log_page_ana()
244 status = nvmet_copy_to_sgl(req, offset, desc, len); in nvmet_execute_get_log_page_ana()
257 nvmet_clear_aen(req, NVME_AEN_CFG_ANA_CHANGE); in nvmet_execute_get_log_page_ana()
263 status = nvmet_copy_to_sgl(req, 0, &hdr, sizeof(hdr)); in nvmet_execute_get_log_page_ana()
265 nvmet_req_complete(req, status); in nvmet_execute_get_log_page_ana()
268 static void nvmet_execute_identify_ctrl(struct nvmet_req *req) in nvmet_execute_identify_ctrl() argument
270 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_identify_ctrl()
355 if (req->port->inline_data_size) in nvmet_execute_identify_ctrl()
362 req->port->inline_data_size) / 16); in nvmet_execute_identify_ctrl()
383 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); in nvmet_execute_identify_ctrl()
387 nvmet_req_complete(req, status); in nvmet_execute_identify_ctrl()
390 static void nvmet_execute_identify_ns(struct nvmet_req *req) in nvmet_execute_identify_ns() argument
396 if (le32_to_cpu(req->cmd->identify.nsid) == NVME_NSID_ALL) { in nvmet_execute_identify_ns()
408 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid); in nvmet_execute_identify_ns()
417 switch (req->port->ana_state[ns->anagrpid]) { in nvmet_execute_identify_ns()
448 status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); in nvmet_execute_identify_ns()
451 nvmet_req_complete(req, status); in nvmet_execute_identify_ns()
454 static void nvmet_execute_identify_nslist(struct nvmet_req *req) in nvmet_execute_identify_nslist() argument
457 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_identify_nslist()
459 u32 min_nsid = le32_to_cpu(req->cmd->identify.nsid); in nvmet_execute_identify_nslist()
480 status = nvmet_copy_to_sgl(req, 0, list, buf_size); in nvmet_execute_identify_nslist()
484 nvmet_req_complete(req, status); in nvmet_execute_identify_nslist()
487 static u16 nvmet_copy_ns_identifier(struct nvmet_req *req, u8 type, u8 len, in nvmet_copy_ns_identifier() argument
496 status = nvmet_copy_to_sgl(req, *off, &desc, sizeof(desc)); in nvmet_copy_ns_identifier()
501 status = nvmet_copy_to_sgl(req, *off, id, len); in nvmet_copy_ns_identifier()
509 static void nvmet_execute_identify_desclist(struct nvmet_req *req) in nvmet_execute_identify_desclist() argument
515 ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->identify.nsid); in nvmet_execute_identify_desclist()
522 status = nvmet_copy_ns_identifier(req, NVME_NIDT_UUID, in nvmet_execute_identify_desclist()
529 status = nvmet_copy_ns_identifier(req, NVME_NIDT_NGUID, in nvmet_execute_identify_desclist()
536 if (sg_zero_buffer(req->sg, req->sg_cnt, NVME_IDENTIFY_DATA_SIZE - off, in nvmet_execute_identify_desclist()
542 nvmet_req_complete(req, status); in nvmet_execute_identify_desclist()
552 static void nvmet_execute_abort(struct nvmet_req *req) in nvmet_execute_abort() argument
554 nvmet_set_result(req, 1); in nvmet_execute_abort()
555 nvmet_req_complete(req, 0); in nvmet_execute_abort()
558 static u16 nvmet_write_protect_flush_sync(struct nvmet_req *req) in nvmet_write_protect_flush_sync() argument
562 if (req->ns->file) in nvmet_write_protect_flush_sync()
563 status = nvmet_file_flush(req); in nvmet_write_protect_flush_sync()
565 status = nvmet_bdev_flush(req); in nvmet_write_protect_flush_sync()
568 pr_err("write protect flush failed nsid: %u\n", req->ns->nsid); in nvmet_write_protect_flush_sync()
572 static u16 nvmet_set_feat_write_protect(struct nvmet_req *req) in nvmet_set_feat_write_protect() argument
574 u32 write_protect = le32_to_cpu(req->cmd->common.cdw10[1]); in nvmet_set_feat_write_protect()
575 struct nvmet_subsys *subsys = req->sq->ctrl->subsys; in nvmet_set_feat_write_protect()
578 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->rw.nsid); in nvmet_set_feat_write_protect()
579 if (unlikely(!req->ns)) in nvmet_set_feat_write_protect()
585 req->ns->readonly = true; in nvmet_set_feat_write_protect()
586 status = nvmet_write_protect_flush_sync(req); in nvmet_set_feat_write_protect()
588 req->ns->readonly = false; in nvmet_set_feat_write_protect()
591 req->ns->readonly = false; in nvmet_set_feat_write_protect()
599 nvmet_ns_changed(subsys, req->ns->nsid); in nvmet_set_feat_write_protect()
604 static void nvmet_execute_set_features(struct nvmet_req *req) in nvmet_execute_set_features() argument
606 struct nvmet_subsys *subsys = req->sq->ctrl->subsys; in nvmet_execute_set_features()
607 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]); in nvmet_execute_set_features()
613 nvmet_set_result(req, in nvmet_execute_set_features()
617 val32 = le32_to_cpu(req->cmd->common.cdw10[1]); in nvmet_execute_set_features()
618 req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000); in nvmet_execute_set_features()
619 nvmet_set_result(req, req->sq->ctrl->kato); in nvmet_execute_set_features()
622 val32 = le32_to_cpu(req->cmd->common.cdw10[1]); in nvmet_execute_set_features()
628 WRITE_ONCE(req->sq->ctrl->aen_enabled, val32); in nvmet_execute_set_features()
629 nvmet_set_result(req, val32); in nvmet_execute_set_features()
635 status = nvmet_set_feat_write_protect(req); in nvmet_execute_set_features()
642 nvmet_req_complete(req, status); in nvmet_execute_set_features()
645 static u16 nvmet_get_feat_write_protect(struct nvmet_req *req) in nvmet_get_feat_write_protect() argument
647 struct nvmet_subsys *subsys = req->sq->ctrl->subsys; in nvmet_get_feat_write_protect()
650 req->ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->common.nsid); in nvmet_get_feat_write_protect()
651 if (!req->ns) in nvmet_get_feat_write_protect()
655 if (req->ns->readonly == true) in nvmet_get_feat_write_protect()
659 nvmet_set_result(req, result); in nvmet_get_feat_write_protect()
665 static void nvmet_execute_get_features(struct nvmet_req *req) in nvmet_execute_get_features() argument
667 struct nvmet_subsys *subsys = req->sq->ctrl->subsys; in nvmet_execute_get_features()
668 u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]); in nvmet_execute_get_features()
694 nvmet_set_result(req, READ_ONCE(req->sq->ctrl->aen_enabled)); in nvmet_execute_get_features()
697 nvmet_set_result(req, 1); in nvmet_execute_get_features()
700 nvmet_set_result(req, in nvmet_execute_get_features()
704 nvmet_set_result(req, req->sq->ctrl->kato * 1000); in nvmet_execute_get_features()
708 if (!(req->cmd->common.cdw10[1] & cpu_to_le32(1 << 0))) { in nvmet_execute_get_features()
713 status = nvmet_copy_to_sgl(req, 0, &req->sq->ctrl->hostid, in nvmet_execute_get_features()
714 sizeof(req->sq->ctrl->hostid)); in nvmet_execute_get_features()
717 status = nvmet_get_feat_write_protect(req); in nvmet_execute_get_features()
724 nvmet_req_complete(req, status); in nvmet_execute_get_features()
727 static void nvmet_execute_async_event(struct nvmet_req *req) in nvmet_execute_async_event() argument
729 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_async_event()
734 nvmet_req_complete(req, NVME_SC_ASYNC_LIMIT | NVME_SC_DNR); in nvmet_execute_async_event()
737 ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req; in nvmet_execute_async_event()
743 static void nvmet_execute_keep_alive(struct nvmet_req *req) in nvmet_execute_keep_alive() argument
745 struct nvmet_ctrl *ctrl = req->sq->ctrl; in nvmet_execute_keep_alive()
751 nvmet_req_complete(req, 0); in nvmet_execute_keep_alive()
754 u16 nvmet_parse_admin_cmd(struct nvmet_req *req) in nvmet_parse_admin_cmd() argument
756 struct nvme_command *cmd = req->cmd; in nvmet_parse_admin_cmd()
759 ret = nvmet_check_ctrl_status(req, cmd); in nvmet_parse_admin_cmd()
765 req->data_len = nvmet_get_log_page_len(cmd); in nvmet_parse_admin_cmd()
775 req->execute = nvmet_execute_get_log_page_noop; in nvmet_parse_admin_cmd()
778 req->execute = nvmet_execute_get_log_page_smart; in nvmet_parse_admin_cmd()
787 req->execute = nvmet_execute_get_log_page_noop; in nvmet_parse_admin_cmd()
790 req->execute = nvmet_execute_get_log_changed_ns; in nvmet_parse_admin_cmd()
793 req->execute = nvmet_execute_get_log_cmd_effects_ns; in nvmet_parse_admin_cmd()
796 req->execute = nvmet_execute_get_log_page_ana; in nvmet_parse_admin_cmd()
801 req->data_len = NVME_IDENTIFY_DATA_SIZE; in nvmet_parse_admin_cmd()
804 req->execute = nvmet_execute_identify_ns; in nvmet_parse_admin_cmd()
807 req->execute = nvmet_execute_identify_ctrl; in nvmet_parse_admin_cmd()
810 req->execute = nvmet_execute_identify_nslist; in nvmet_parse_admin_cmd()
813 req->execute = nvmet_execute_identify_desclist; in nvmet_parse_admin_cmd()
818 req->execute = nvmet_execute_abort; in nvmet_parse_admin_cmd()
819 req->data_len = 0; in nvmet_parse_admin_cmd()
822 req->execute = nvmet_execute_set_features; in nvmet_parse_admin_cmd()
823 req->data_len = 0; in nvmet_parse_admin_cmd()
826 req->execute = nvmet_execute_get_features; in nvmet_parse_admin_cmd()
827 req->data_len = 0; in nvmet_parse_admin_cmd()
830 req->execute = nvmet_execute_async_event; in nvmet_parse_admin_cmd()
831 req->data_len = 0; in nvmet_parse_admin_cmd()
834 req->execute = nvmet_execute_keep_alive; in nvmet_parse_admin_cmd()
835 req->data_len = 0; in nvmet_parse_admin_cmd()
840 req->sq->qid); in nvmet_parse_admin_cmd()