• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Discovery service for the NVMe over Fabrics target.
4  * Copyright (C) 2016 Intel Corporation. All rights reserved.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/slab.h>
8 #include <generated/utsrelease.h>
9 #include "nvmet.h"
10 
11 struct nvmet_subsys *nvmet_disc_subsys;
12 
13 static u64 nvmet_genctr;
14 
__nvmet_disc_changed(struct nvmet_port * port,struct nvmet_ctrl * ctrl)15 static void __nvmet_disc_changed(struct nvmet_port *port,
16 				 struct nvmet_ctrl *ctrl)
17 {
18 	if (ctrl->port != port)
19 		return;
20 
21 	if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_DISC_CHANGE))
22 		return;
23 
24 	nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
25 			      NVME_AER_NOTICE_DISC_CHANGED, NVME_LOG_DISC);
26 }
27 
nvmet_port_disc_changed(struct nvmet_port * port,struct nvmet_subsys * subsys)28 void nvmet_port_disc_changed(struct nvmet_port *port,
29 			     struct nvmet_subsys *subsys)
30 {
31 	struct nvmet_ctrl *ctrl;
32 
33 	lockdep_assert_held(&nvmet_config_sem);
34 	nvmet_genctr++;
35 
36 	mutex_lock(&nvmet_disc_subsys->lock);
37 	list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
38 		if (subsys && !nvmet_host_allowed(subsys, ctrl->hostnqn))
39 			continue;
40 
41 		__nvmet_disc_changed(port, ctrl);
42 	}
43 	mutex_unlock(&nvmet_disc_subsys->lock);
44 
45 	/* If transport can signal change, notify transport */
46 	if (port->tr_ops && port->tr_ops->discovery_chg)
47 		port->tr_ops->discovery_chg(port);
48 }
49 
__nvmet_subsys_disc_changed(struct nvmet_port * port,struct nvmet_subsys * subsys,struct nvmet_host * host)50 static void __nvmet_subsys_disc_changed(struct nvmet_port *port,
51 					struct nvmet_subsys *subsys,
52 					struct nvmet_host *host)
53 {
54 	struct nvmet_ctrl *ctrl;
55 
56 	mutex_lock(&nvmet_disc_subsys->lock);
57 	list_for_each_entry(ctrl, &nvmet_disc_subsys->ctrls, subsys_entry) {
58 		if (host && strcmp(nvmet_host_name(host), ctrl->hostnqn))
59 			continue;
60 
61 		__nvmet_disc_changed(port, ctrl);
62 	}
63 	mutex_unlock(&nvmet_disc_subsys->lock);
64 }
65 
nvmet_subsys_disc_changed(struct nvmet_subsys * subsys,struct nvmet_host * host)66 void nvmet_subsys_disc_changed(struct nvmet_subsys *subsys,
67 			       struct nvmet_host *host)
68 {
69 	struct nvmet_port *port;
70 	struct nvmet_subsys_link *s;
71 
72 	nvmet_genctr++;
73 
74 	list_for_each_entry(port, nvmet_ports, global_entry)
75 		list_for_each_entry(s, &port->subsystems, entry) {
76 			if (s->subsys != subsys)
77 				continue;
78 			__nvmet_subsys_disc_changed(port, subsys, host);
79 		}
80 }
81 
nvmet_referral_enable(struct nvmet_port * parent,struct nvmet_port * port)82 void nvmet_referral_enable(struct nvmet_port *parent, struct nvmet_port *port)
83 {
84 	down_write(&nvmet_config_sem);
85 	if (list_empty(&port->entry)) {
86 		list_add_tail(&port->entry, &parent->referrals);
87 		port->enabled = true;
88 		nvmet_port_disc_changed(parent, NULL);
89 	}
90 	up_write(&nvmet_config_sem);
91 }
92 
nvmet_referral_disable(struct nvmet_port * parent,struct nvmet_port * port)93 void nvmet_referral_disable(struct nvmet_port *parent, struct nvmet_port *port)
94 {
95 	down_write(&nvmet_config_sem);
96 	if (!list_empty(&port->entry)) {
97 		port->enabled = false;
98 		list_del_init(&port->entry);
99 		nvmet_port_disc_changed(parent, NULL);
100 	}
101 	up_write(&nvmet_config_sem);
102 }
103 
nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr * hdr,struct nvmet_port * port,char * subsys_nqn,char * traddr,u8 type,u32 numrec)104 static void nvmet_format_discovery_entry(struct nvmf_disc_rsp_page_hdr *hdr,
105 		struct nvmet_port *port, char *subsys_nqn, char *traddr,
106 		u8 type, u32 numrec)
107 {
108 	struct nvmf_disc_rsp_page_entry *e = &hdr->entries[numrec];
109 
110 	e->trtype = port->disc_addr.trtype;
111 	e->adrfam = port->disc_addr.adrfam;
112 	e->treq = port->disc_addr.treq;
113 	e->portid = port->disc_addr.portid;
114 	/* we support only dynamic controllers */
115 	e->cntlid = cpu_to_le16(NVME_CNTLID_DYNAMIC);
116 	e->asqsz = cpu_to_le16(NVME_AQ_DEPTH);
117 	e->subtype = type;
118 	memcpy(e->trsvcid, port->disc_addr.trsvcid, NVMF_TRSVCID_SIZE);
119 	memcpy(e->traddr, traddr, NVMF_TRADDR_SIZE);
120 	memcpy(e->tsas.common, port->disc_addr.tsas.common, NVMF_TSAS_SIZE);
121 	strncpy(e->subnqn, subsys_nqn, NVMF_NQN_SIZE);
122 }
123 
124 /*
125  * nvmet_set_disc_traddr - set a correct discovery log entry traddr
126  *
127  * IP based transports (e.g RDMA) can listen on "any" ipv4/ipv6 addresses
128  * (INADDR_ANY or IN6ADDR_ANY_INIT). The discovery log page traddr reply
129  * must not contain that "any" IP address. If the transport implements
130  * .disc_traddr, use it. this callback will set the discovery traddr
131  * from the req->port address in case the port in question listens
132  * "any" IP address.
133  */
nvmet_set_disc_traddr(struct nvmet_req * req,struct nvmet_port * port,char * traddr)134 static void nvmet_set_disc_traddr(struct nvmet_req *req, struct nvmet_port *port,
135 		char *traddr)
136 {
137 	if (req->ops->disc_traddr)
138 		req->ops->disc_traddr(req, port, traddr);
139 	else
140 		memcpy(traddr, port->disc_addr.traddr, NVMF_TRADDR_SIZE);
141 }
142 
discovery_log_entries(struct nvmet_req * req)143 static size_t discovery_log_entries(struct nvmet_req *req)
144 {
145 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
146 	struct nvmet_subsys_link *p;
147 	struct nvmet_port *r;
148 	size_t entries = 0;
149 
150 	list_for_each_entry(p, &req->port->subsystems, entry) {
151 		if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
152 			continue;
153 		entries++;
154 	}
155 	list_for_each_entry(r, &req->port->referrals, entry)
156 		entries++;
157 	return entries;
158 }
159 
nvmet_execute_disc_get_log_page(struct nvmet_req * req)160 static void nvmet_execute_disc_get_log_page(struct nvmet_req *req)
161 {
162 	const int entry_size = sizeof(struct nvmf_disc_rsp_page_entry);
163 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
164 	struct nvmf_disc_rsp_page_hdr *hdr;
165 	u64 offset = nvmet_get_log_page_offset(req->cmd);
166 	size_t data_len = nvmet_get_log_page_len(req->cmd);
167 	size_t alloc_len;
168 	struct nvmet_subsys_link *p;
169 	struct nvmet_port *r;
170 	u32 numrec = 0;
171 	u16 status = 0;
172 	void *buffer;
173 
174 	if (!nvmet_check_transfer_len(req, data_len))
175 		return;
176 
177 	if (req->cmd->get_log_page.lid != NVME_LOG_DISC) {
178 		req->error_loc =
179 			offsetof(struct nvme_get_log_page_command, lid);
180 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
181 		goto out;
182 	}
183 
184 	/* Spec requires dword aligned offsets */
185 	if (offset & 0x3) {
186 		req->error_loc =
187 			offsetof(struct nvme_get_log_page_command, lpo);
188 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
189 		goto out;
190 	}
191 
192 	/*
193 	 * Make sure we're passing at least a buffer of response header size.
194 	 * If host provided data len is less than the header size, only the
195 	 * number of bytes requested by host will be sent to host.
196 	 */
197 	down_read(&nvmet_config_sem);
198 	alloc_len = sizeof(*hdr) + entry_size * discovery_log_entries(req);
199 	buffer = kzalloc(alloc_len, GFP_KERNEL);
200 	if (!buffer) {
201 		up_read(&nvmet_config_sem);
202 		status = NVME_SC_INTERNAL;
203 		goto out;
204 	}
205 
206 	hdr = buffer;
207 	list_for_each_entry(p, &req->port->subsystems, entry) {
208 		char traddr[NVMF_TRADDR_SIZE];
209 
210 		if (!nvmet_host_allowed(p->subsys, ctrl->hostnqn))
211 			continue;
212 
213 		nvmet_set_disc_traddr(req, req->port, traddr);
214 		nvmet_format_discovery_entry(hdr, req->port,
215 				p->subsys->subsysnqn, traddr,
216 				NVME_NQN_NVME, numrec);
217 		numrec++;
218 	}
219 
220 	list_for_each_entry(r, &req->port->referrals, entry) {
221 		nvmet_format_discovery_entry(hdr, r,
222 				NVME_DISC_SUBSYS_NAME,
223 				r->disc_addr.traddr,
224 				NVME_NQN_DISC, numrec);
225 		numrec++;
226 	}
227 
228 	hdr->genctr = cpu_to_le64(nvmet_genctr);
229 	hdr->numrec = cpu_to_le64(numrec);
230 	hdr->recfmt = cpu_to_le16(0);
231 
232 	nvmet_clear_aen_bit(req, NVME_AEN_BIT_DISC_CHANGE);
233 
234 	up_read(&nvmet_config_sem);
235 
236 	status = nvmet_copy_to_sgl(req, 0, buffer + offset, data_len);
237 	kfree(buffer);
238 out:
239 	nvmet_req_complete(req, status);
240 }
241 
nvmet_execute_disc_identify(struct nvmet_req * req)242 static void nvmet_execute_disc_identify(struct nvmet_req *req)
243 {
244 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
245 	struct nvme_id_ctrl *id;
246 	const char model[] = "Linux";
247 	u16 status = 0;
248 
249 	if (!nvmet_check_transfer_len(req, NVME_IDENTIFY_DATA_SIZE))
250 		return;
251 
252 	if (req->cmd->identify.cns != NVME_ID_CNS_CTRL) {
253 		req->error_loc = offsetof(struct nvme_identify, cns);
254 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
255 		goto out;
256 	}
257 
258 	id = kzalloc(sizeof(*id), GFP_KERNEL);
259 	if (!id) {
260 		status = NVME_SC_INTERNAL;
261 		goto out;
262 	}
263 
264 	memset(id->sn, ' ', sizeof(id->sn));
265 	bin2hex(id->sn, &ctrl->subsys->serial,
266 		min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2));
267 	memset(id->fr, ' ', sizeof(id->fr));
268 	memcpy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1, ' ');
269 	memcpy_and_pad(id->fr, sizeof(id->fr),
270 		       UTS_RELEASE, strlen(UTS_RELEASE), ' ');
271 
272 	/* no limit on data transfer sizes for now */
273 	id->mdts = 0;
274 	id->cntlid = cpu_to_le16(ctrl->cntlid);
275 	id->ver = cpu_to_le32(ctrl->subsys->ver);
276 	id->lpa = (1 << 2);
277 
278 	/* no enforcement soft-limit for maxcmd - pick arbitrary high value */
279 	id->maxcmd = cpu_to_le16(NVMET_MAX_CMD);
280 
281 	id->sgls = cpu_to_le32(1 << 0);	/* we always support SGLs */
282 	if (ctrl->ops->flags & NVMF_KEYED_SGLS)
283 		id->sgls |= cpu_to_le32(1 << 2);
284 	if (req->port->inline_data_size)
285 		id->sgls |= cpu_to_le32(1 << 20);
286 
287 	id->oaes = cpu_to_le32(NVMET_DISC_AEN_CFG_OPTIONAL);
288 
289 	strlcpy(id->subnqn, ctrl->subsys->subsysnqn, sizeof(id->subnqn));
290 
291 	status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id));
292 
293 	kfree(id);
294 out:
295 	nvmet_req_complete(req, status);
296 }
297 
nvmet_execute_disc_set_features(struct nvmet_req * req)298 static void nvmet_execute_disc_set_features(struct nvmet_req *req)
299 {
300 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
301 	u16 stat;
302 
303 	if (!nvmet_check_transfer_len(req, 0))
304 		return;
305 
306 	switch (cdw10 & 0xff) {
307 	case NVME_FEAT_KATO:
308 		stat = nvmet_set_feat_kato(req);
309 		break;
310 	case NVME_FEAT_ASYNC_EVENT:
311 		stat = nvmet_set_feat_async_event(req,
312 						  NVMET_DISC_AEN_CFG_OPTIONAL);
313 		break;
314 	default:
315 		req->error_loc =
316 			offsetof(struct nvme_common_command, cdw10);
317 		stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
318 		break;
319 	}
320 
321 	nvmet_req_complete(req, stat);
322 }
323 
nvmet_execute_disc_get_features(struct nvmet_req * req)324 static void nvmet_execute_disc_get_features(struct nvmet_req *req)
325 {
326 	u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
327 	u16 stat = 0;
328 
329 	if (!nvmet_check_transfer_len(req, 0))
330 		return;
331 
332 	switch (cdw10 & 0xff) {
333 	case NVME_FEAT_KATO:
334 		nvmet_get_feat_kato(req);
335 		break;
336 	case NVME_FEAT_ASYNC_EVENT:
337 		nvmet_get_feat_async_event(req);
338 		break;
339 	default:
340 		req->error_loc =
341 			offsetof(struct nvme_common_command, cdw10);
342 		stat = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
343 		break;
344 	}
345 
346 	nvmet_req_complete(req, stat);
347 }
348 
nvmet_parse_discovery_cmd(struct nvmet_req * req)349 u16 nvmet_parse_discovery_cmd(struct nvmet_req *req)
350 {
351 	struct nvme_command *cmd = req->cmd;
352 
353 	if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
354 		pr_err("got cmd %d while not ready\n",
355 		       cmd->common.opcode);
356 		req->error_loc =
357 			offsetof(struct nvme_common_command, opcode);
358 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
359 	}
360 
361 	switch (cmd->common.opcode) {
362 	case nvme_admin_set_features:
363 		req->execute = nvmet_execute_disc_set_features;
364 		return 0;
365 	case nvme_admin_get_features:
366 		req->execute = nvmet_execute_disc_get_features;
367 		return 0;
368 	case nvme_admin_async_event:
369 		req->execute = nvmet_execute_async_event;
370 		return 0;
371 	case nvme_admin_keep_alive:
372 		req->execute = nvmet_execute_keep_alive;
373 		return 0;
374 	case nvme_admin_get_log_page:
375 		req->execute = nvmet_execute_disc_get_log_page;
376 		return 0;
377 	case nvme_admin_identify:
378 		req->execute = nvmet_execute_disc_identify;
379 		return 0;
380 	default:
381 		pr_err("unhandled cmd %d\n", cmd->common.opcode);
382 		req->error_loc = offsetof(struct nvme_common_command, opcode);
383 		return NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
384 	}
385 
386 }
387 
nvmet_init_discovery(void)388 int __init nvmet_init_discovery(void)
389 {
390 	nvmet_disc_subsys =
391 		nvmet_subsys_alloc(NVME_DISC_SUBSYS_NAME, NVME_NQN_DISC);
392 	return PTR_ERR_OR_ZERO(nvmet_disc_subsys);
393 }
394 
nvmet_exit_discovery(void)395 void nvmet_exit_discovery(void)
396 {
397 	nvmet_subsys_put(nvmet_disc_subsys);
398 }
399