• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe Fabrics command implementation.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/blkdev.h>
8 #include "nvmet.h"
9 
nvmet_execute_prop_set(struct nvmet_req * req)10 static void nvmet_execute_prop_set(struct nvmet_req *req)
11 {
12 	u64 val = le64_to_cpu(req->cmd->prop_set.value);
13 	u16 status = 0;
14 
15 	if (req->cmd->prop_set.attrib & 1) {
16 		req->error_loc =
17 			offsetof(struct nvmf_property_set_command, attrib);
18 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
19 		goto out;
20 	}
21 
22 	switch (le32_to_cpu(req->cmd->prop_set.offset)) {
23 	case NVME_REG_CC:
24 		nvmet_update_cc(req->sq->ctrl, val);
25 		break;
26 	default:
27 		req->error_loc =
28 			offsetof(struct nvmf_property_set_command, offset);
29 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
30 	}
31 out:
32 	nvmet_req_complete(req, status);
33 }
34 
nvmet_execute_prop_get(struct nvmet_req * req)35 static void nvmet_execute_prop_get(struct nvmet_req *req)
36 {
37 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
38 	u16 status = 0;
39 	u64 val = 0;
40 
41 	if (req->cmd->prop_get.attrib & 1) {
42 		switch (le32_to_cpu(req->cmd->prop_get.offset)) {
43 		case NVME_REG_CAP:
44 			val = ctrl->cap;
45 			break;
46 		default:
47 			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
48 			break;
49 		}
50 	} else {
51 		switch (le32_to_cpu(req->cmd->prop_get.offset)) {
52 		case NVME_REG_VS:
53 			val = ctrl->subsys->ver;
54 			break;
55 		case NVME_REG_CC:
56 			val = ctrl->cc;
57 			break;
58 		case NVME_REG_CSTS:
59 			val = ctrl->csts;
60 			break;
61 		default:
62 			status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
63 			break;
64 		}
65 	}
66 
67 	if (status && req->cmd->prop_get.attrib & 1) {
68 		req->error_loc =
69 			offsetof(struct nvmf_property_get_command, offset);
70 	} else {
71 		req->error_loc =
72 			offsetof(struct nvmf_property_get_command, attrib);
73 	}
74 
75 	req->cqe->result.u64 = cpu_to_le64(val);
76 	nvmet_req_complete(req, status);
77 }
78 
nvmet_parse_fabrics_cmd(struct nvmet_req * req)79 u16 nvmet_parse_fabrics_cmd(struct nvmet_req *req)
80 {
81 	struct nvme_command *cmd = req->cmd;
82 
83 	switch (cmd->fabrics.fctype) {
84 	case nvme_fabrics_type_property_set:
85 		req->data_len = 0;
86 		req->execute = nvmet_execute_prop_set;
87 		break;
88 	case nvme_fabrics_type_property_get:
89 		req->data_len = 0;
90 		req->execute = nvmet_execute_prop_get;
91 		break;
92 	default:
93 		pr_err("received unknown capsule type 0x%x\n",
94 			cmd->fabrics.fctype);
95 		req->error_loc = offsetof(struct nvmf_common_command, fctype);
96 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
97 	}
98 
99 	return 0;
100 }
101 
nvmet_install_queue(struct nvmet_ctrl * ctrl,struct nvmet_req * req)102 static u16 nvmet_install_queue(struct nvmet_ctrl *ctrl, struct nvmet_req *req)
103 {
104 	struct nvmf_connect_command *c = &req->cmd->connect;
105 	u16 qid = le16_to_cpu(c->qid);
106 	u16 sqsize = le16_to_cpu(c->sqsize);
107 	struct nvmet_ctrl *old;
108 	u16 ret;
109 
110 	old = cmpxchg(&req->sq->ctrl, NULL, ctrl);
111 	if (old) {
112 		pr_warn("queue already connected!\n");
113 		req->error_loc = offsetof(struct nvmf_connect_command, opcode);
114 		return NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
115 	}
116 	if (!sqsize) {
117 		pr_warn("queue size zero!\n");
118 		req->error_loc = offsetof(struct nvmf_connect_command, sqsize);
119 		req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(sqsize);
120 		ret = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
121 		goto err;
122 	}
123 
124 	/* note: convert queue size from 0's-based value to 1's-based value */
125 	nvmet_cq_setup(ctrl, req->cq, qid, sqsize + 1);
126 	nvmet_sq_setup(ctrl, req->sq, qid, sqsize + 1);
127 
128 	if (c->cattr & NVME_CONNECT_DISABLE_SQFLOW) {
129 		req->sq->sqhd_disabled = true;
130 		req->cqe->sq_head = cpu_to_le16(0xffff);
131 	}
132 
133 	if (ctrl->ops->install_queue) {
134 		ret = ctrl->ops->install_queue(req->sq);
135 		if (ret) {
136 			pr_err("failed to install queue %d cntlid %d ret %x\n",
137 				qid, ctrl->cntlid, ret);
138 			goto err;
139 		}
140 	}
141 
142 	return 0;
143 
144 err:
145 	req->sq->ctrl = NULL;
146 	return ret;
147 }
148 
nvmet_execute_admin_connect(struct nvmet_req * req)149 static void nvmet_execute_admin_connect(struct nvmet_req *req)
150 {
151 	struct nvmf_connect_command *c = &req->cmd->connect;
152 	struct nvmf_connect_data *d;
153 	struct nvmet_ctrl *ctrl = NULL;
154 	u16 status = 0;
155 
156 	d = kmalloc(sizeof(*d), GFP_KERNEL);
157 	if (!d) {
158 		status = NVME_SC_INTERNAL;
159 		goto complete;
160 	}
161 
162 	status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
163 	if (status)
164 		goto out;
165 
166 	/* zero out initial completion result, assign values as needed */
167 	req->cqe->result.u32 = 0;
168 
169 	if (c->recfmt != 0) {
170 		pr_warn("invalid connect version (%d).\n",
171 			le16_to_cpu(c->recfmt));
172 		req->error_loc = offsetof(struct nvmf_connect_command, recfmt);
173 		status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
174 		goto out;
175 	}
176 
177 	if (unlikely(d->cntlid != cpu_to_le16(0xffff))) {
178 		pr_warn("connect attempt for invalid controller ID %#x\n",
179 			d->cntlid);
180 		status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
181 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
182 		goto out;
183 	}
184 
185 	d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
186 	d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
187 	status = nvmet_alloc_ctrl(d->subsysnqn, d->hostnqn, req,
188 				  le32_to_cpu(c->kato), &ctrl);
189 	if (status) {
190 		if (status == (NVME_SC_INVALID_FIELD | NVME_SC_DNR))
191 			req->error_loc =
192 				offsetof(struct nvme_common_command, opcode);
193 		goto out;
194 	}
195 
196 	uuid_copy(&ctrl->hostid, &d->hostid);
197 
198 	status = nvmet_install_queue(ctrl, req);
199 	if (status) {
200 		nvmet_ctrl_put(ctrl);
201 		goto out;
202 	}
203 
204 	pr_info("creating controller %d for subsystem %s for NQN %s.\n",
205 		ctrl->cntlid, ctrl->subsys->subsysnqn, ctrl->hostnqn);
206 	req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
207 
208 out:
209 	kfree(d);
210 complete:
211 	nvmet_req_complete(req, status);
212 }
213 
nvmet_execute_io_connect(struct nvmet_req * req)214 static void nvmet_execute_io_connect(struct nvmet_req *req)
215 {
216 	struct nvmf_connect_command *c = &req->cmd->connect;
217 	struct nvmf_connect_data *d;
218 	struct nvmet_ctrl *ctrl;
219 	u16 qid = le16_to_cpu(c->qid);
220 	u16 status = 0;
221 
222 	d = kmalloc(sizeof(*d), GFP_KERNEL);
223 	if (!d) {
224 		status = NVME_SC_INTERNAL;
225 		goto complete;
226 	}
227 
228 	status = nvmet_copy_from_sgl(req, 0, d, sizeof(*d));
229 	if (status)
230 		goto out;
231 
232 	/* zero out initial completion result, assign values as needed */
233 	req->cqe->result.u32 = 0;
234 
235 	if (c->recfmt != 0) {
236 		pr_warn("invalid connect version (%d).\n",
237 			le16_to_cpu(c->recfmt));
238 		status = NVME_SC_CONNECT_FORMAT | NVME_SC_DNR;
239 		goto out;
240 	}
241 
242 	d->subsysnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
243 	d->hostnqn[NVMF_NQN_FIELD_LEN - 1] = '\0';
244 	ctrl = nvmet_ctrl_find_get(d->subsysnqn, d->hostnqn,
245 				   le16_to_cpu(d->cntlid), req);
246 	if (!ctrl) {
247 		status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
248 		goto out;
249 	}
250 
251 	if (unlikely(qid > ctrl->subsys->max_qid)) {
252 		pr_warn("invalid queue id (%d)\n", qid);
253 		status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
254 		req->cqe->result.u32 = IPO_IATTR_CONNECT_SQE(qid);
255 		goto out_ctrl_put;
256 	}
257 
258 	status = nvmet_install_queue(ctrl, req);
259 	if (status)
260 		goto out_ctrl_put;
261 
262 	/* pass back cntlid for successful completion */
263 	req->cqe->result.u16 = cpu_to_le16(ctrl->cntlid);
264 
265 	pr_debug("adding queue %d to ctrl %d.\n", qid, ctrl->cntlid);
266 
267 out:
268 	kfree(d);
269 complete:
270 	nvmet_req_complete(req, status);
271 	return;
272 
273 out_ctrl_put:
274 	nvmet_ctrl_put(ctrl);
275 	goto out;
276 }
277 
nvmet_parse_connect_cmd(struct nvmet_req * req)278 u16 nvmet_parse_connect_cmd(struct nvmet_req *req)
279 {
280 	struct nvme_command *cmd = req->cmd;
281 
282 	if (!nvme_is_fabrics(cmd)) {
283 		pr_err("invalid command 0x%x on unconnected queue.\n",
284 			cmd->fabrics.opcode);
285 		req->error_loc = offsetof(struct nvme_common_command, opcode);
286 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
287 	}
288 	if (cmd->fabrics.fctype != nvme_fabrics_type_connect) {
289 		pr_err("invalid capsule type 0x%x on unconnected queue.\n",
290 			cmd->fabrics.fctype);
291 		req->error_loc = offsetof(struct nvmf_common_command, fctype);
292 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
293 	}
294 
295 	req->data_len = sizeof(struct nvmf_connect_data);
296 	if (cmd->connect.qid == 0)
297 		req->execute = nvmet_execute_admin_connect;
298 	else
299 		req->execute = nvmet_execute_io_connect;
300 	return 0;
301 }
302