• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common code for the NVMe target.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/module.h>
8 #include <linux/random.h>
9 #include <linux/rculist.h>
10 #include <linux/pci-p2pdma.h>
11 #include <linux/scatterlist.h>
12 
13 #define CREATE_TRACE_POINTS
14 #include "trace.h"
15 
16 #include "nvmet.h"
17 
18 struct workqueue_struct *buffered_io_wq;
19 static const struct nvmet_fabrics_ops *nvmet_transports[NVMF_TRTYPE_MAX];
20 static DEFINE_IDA(cntlid_ida);
21 
22 /*
23  * This read/write semaphore is used to synchronize access to configuration
24  * information on a target system that will result in discovery log page
25  * information change for at least one host.
26  * The full list of resources to protected by this semaphore is:
27  *
28  *  - subsystems list
29  *  - per-subsystem allowed hosts list
30  *  - allow_any_host subsystem attribute
31  *  - nvmet_genctr
32  *  - the nvmet_transports array
33  *
34  * When updating any of those lists/structures write lock should be obtained,
35  * while when reading (popolating discovery log page or checking host-subsystem
36  * link) read lock is obtained to allow concurrent reads.
37  */
38 DECLARE_RWSEM(nvmet_config_sem);
39 
40 u32 nvmet_ana_group_enabled[NVMET_MAX_ANAGRPS + 1];
41 u64 nvmet_ana_chgcnt;
42 DECLARE_RWSEM(nvmet_ana_sem);
43 
errno_to_nvme_status(struct nvmet_req * req,int errno)44 inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno)
45 {
46 	u16 status;
47 
48 	switch (errno) {
49 	case 0:
50 		status = NVME_SC_SUCCESS;
51 		break;
52 	case -ENOSPC:
53 		req->error_loc = offsetof(struct nvme_rw_command, length);
54 		status = NVME_SC_CAP_EXCEEDED | NVME_SC_DNR;
55 		break;
56 	case -EREMOTEIO:
57 		req->error_loc = offsetof(struct nvme_rw_command, slba);
58 		status = NVME_SC_LBA_RANGE | NVME_SC_DNR;
59 		break;
60 	case -EOPNOTSUPP:
61 		req->error_loc = offsetof(struct nvme_common_command, opcode);
62 		switch (req->cmd->common.opcode) {
63 		case nvme_cmd_dsm:
64 		case nvme_cmd_write_zeroes:
65 			status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
66 			break;
67 		default:
68 			status = NVME_SC_INVALID_OPCODE | NVME_SC_DNR;
69 		}
70 		break;
71 	case -ENODATA:
72 		req->error_loc = offsetof(struct nvme_rw_command, nsid);
73 		status = NVME_SC_ACCESS_DENIED;
74 		break;
75 	case -EIO:
76 		fallthrough;
77 	default:
78 		req->error_loc = offsetof(struct nvme_common_command, opcode);
79 		status = NVME_SC_INTERNAL | NVME_SC_DNR;
80 	}
81 
82 	return status;
83 }
84 
85 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
86 		const char *subsysnqn);
87 
nvmet_copy_to_sgl(struct nvmet_req * req,off_t off,const void * buf,size_t len)88 u16 nvmet_copy_to_sgl(struct nvmet_req *req, off_t off, const void *buf,
89 		size_t len)
90 {
91 	if (sg_pcopy_from_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
92 		req->error_loc = offsetof(struct nvme_common_command, dptr);
93 		return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
94 	}
95 	return 0;
96 }
97 
nvmet_copy_from_sgl(struct nvmet_req * req,off_t off,void * buf,size_t len)98 u16 nvmet_copy_from_sgl(struct nvmet_req *req, off_t off, void *buf, size_t len)
99 {
100 	if (sg_pcopy_to_buffer(req->sg, req->sg_cnt, buf, len, off) != len) {
101 		req->error_loc = offsetof(struct nvme_common_command, dptr);
102 		return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
103 	}
104 	return 0;
105 }
106 
nvmet_zero_sgl(struct nvmet_req * req,off_t off,size_t len)107 u16 nvmet_zero_sgl(struct nvmet_req *req, off_t off, size_t len)
108 {
109 	if (sg_zero_buffer(req->sg, req->sg_cnt, len, off) != len) {
110 		req->error_loc = offsetof(struct nvme_common_command, dptr);
111 		return NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR;
112 	}
113 	return 0;
114 }
115 
nvmet_max_nsid(struct nvmet_subsys * subsys)116 static unsigned int nvmet_max_nsid(struct nvmet_subsys *subsys)
117 {
118 	unsigned long nsid = 0;
119 	struct nvmet_ns *cur;
120 	unsigned long idx;
121 
122 	xa_for_each(&subsys->namespaces, idx, cur)
123 		nsid = cur->nsid;
124 
125 	return nsid;
126 }
127 
nvmet_async_event_result(struct nvmet_async_event * aen)128 static u32 nvmet_async_event_result(struct nvmet_async_event *aen)
129 {
130 	return aen->event_type | (aen->event_info << 8) | (aen->log_page << 16);
131 }
132 
nvmet_async_events_failall(struct nvmet_ctrl * ctrl)133 static void nvmet_async_events_failall(struct nvmet_ctrl *ctrl)
134 {
135 	u16 status = NVME_SC_INTERNAL | NVME_SC_DNR;
136 	struct nvmet_req *req;
137 
138 	mutex_lock(&ctrl->lock);
139 	while (ctrl->nr_async_event_cmds) {
140 		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
141 		mutex_unlock(&ctrl->lock);
142 		nvmet_req_complete(req, status);
143 		mutex_lock(&ctrl->lock);
144 	}
145 	mutex_unlock(&ctrl->lock);
146 }
147 
nvmet_async_events_process(struct nvmet_ctrl * ctrl)148 static void nvmet_async_events_process(struct nvmet_ctrl *ctrl)
149 {
150 	struct nvmet_async_event *aen;
151 	struct nvmet_req *req;
152 
153 	mutex_lock(&ctrl->lock);
154 	while (ctrl->nr_async_event_cmds && !list_empty(&ctrl->async_events)) {
155 		aen = list_first_entry(&ctrl->async_events,
156 				       struct nvmet_async_event, entry);
157 		req = ctrl->async_event_cmds[--ctrl->nr_async_event_cmds];
158 		nvmet_set_result(req, nvmet_async_event_result(aen));
159 
160 		list_del(&aen->entry);
161 		kfree(aen);
162 
163 		mutex_unlock(&ctrl->lock);
164 		trace_nvmet_async_event(ctrl, req->cqe->result.u32);
165 		nvmet_req_complete(req, 0);
166 		mutex_lock(&ctrl->lock);
167 	}
168 	mutex_unlock(&ctrl->lock);
169 }
170 
nvmet_async_events_free(struct nvmet_ctrl * ctrl)171 static void nvmet_async_events_free(struct nvmet_ctrl *ctrl)
172 {
173 	struct nvmet_async_event *aen, *tmp;
174 
175 	mutex_lock(&ctrl->lock);
176 	list_for_each_entry_safe(aen, tmp, &ctrl->async_events, entry) {
177 		list_del(&aen->entry);
178 		kfree(aen);
179 	}
180 	mutex_unlock(&ctrl->lock);
181 }
182 
nvmet_async_event_work(struct work_struct * work)183 static void nvmet_async_event_work(struct work_struct *work)
184 {
185 	struct nvmet_ctrl *ctrl =
186 		container_of(work, struct nvmet_ctrl, async_event_work);
187 
188 	nvmet_async_events_process(ctrl);
189 }
190 
nvmet_add_async_event(struct nvmet_ctrl * ctrl,u8 event_type,u8 event_info,u8 log_page)191 void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
192 		u8 event_info, u8 log_page)
193 {
194 	struct nvmet_async_event *aen;
195 
196 	aen = kmalloc(sizeof(*aen), GFP_KERNEL);
197 	if (!aen)
198 		return;
199 
200 	aen->event_type = event_type;
201 	aen->event_info = event_info;
202 	aen->log_page = log_page;
203 
204 	mutex_lock(&ctrl->lock);
205 	list_add_tail(&aen->entry, &ctrl->async_events);
206 	mutex_unlock(&ctrl->lock);
207 
208 	schedule_work(&ctrl->async_event_work);
209 }
210 
nvmet_add_to_changed_ns_log(struct nvmet_ctrl * ctrl,__le32 nsid)211 static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
212 {
213 	u32 i;
214 
215 	mutex_lock(&ctrl->lock);
216 	if (ctrl->nr_changed_ns > NVME_MAX_CHANGED_NAMESPACES)
217 		goto out_unlock;
218 
219 	for (i = 0; i < ctrl->nr_changed_ns; i++) {
220 		if (ctrl->changed_ns_list[i] == nsid)
221 			goto out_unlock;
222 	}
223 
224 	if (ctrl->nr_changed_ns == NVME_MAX_CHANGED_NAMESPACES) {
225 		ctrl->changed_ns_list[0] = cpu_to_le32(0xffffffff);
226 		ctrl->nr_changed_ns = U32_MAX;
227 		goto out_unlock;
228 	}
229 
230 	ctrl->changed_ns_list[ctrl->nr_changed_ns++] = nsid;
231 out_unlock:
232 	mutex_unlock(&ctrl->lock);
233 }
234 
nvmet_ns_changed(struct nvmet_subsys * subsys,u32 nsid)235 void nvmet_ns_changed(struct nvmet_subsys *subsys, u32 nsid)
236 {
237 	struct nvmet_ctrl *ctrl;
238 
239 	lockdep_assert_held(&subsys->lock);
240 
241 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
242 		nvmet_add_to_changed_ns_log(ctrl, cpu_to_le32(nsid));
243 		if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_NS_ATTR))
244 			continue;
245 		nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
246 				NVME_AER_NOTICE_NS_CHANGED,
247 				NVME_LOG_CHANGED_NS);
248 	}
249 }
250 
nvmet_send_ana_event(struct nvmet_subsys * subsys,struct nvmet_port * port)251 void nvmet_send_ana_event(struct nvmet_subsys *subsys,
252 		struct nvmet_port *port)
253 {
254 	struct nvmet_ctrl *ctrl;
255 
256 	mutex_lock(&subsys->lock);
257 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
258 		if (port && ctrl->port != port)
259 			continue;
260 		if (nvmet_aen_bit_disabled(ctrl, NVME_AEN_BIT_ANA_CHANGE))
261 			continue;
262 		nvmet_add_async_event(ctrl, NVME_AER_TYPE_NOTICE,
263 				NVME_AER_NOTICE_ANA, NVME_LOG_ANA);
264 	}
265 	mutex_unlock(&subsys->lock);
266 }
267 
nvmet_port_send_ana_event(struct nvmet_port * port)268 void nvmet_port_send_ana_event(struct nvmet_port *port)
269 {
270 	struct nvmet_subsys_link *p;
271 
272 	down_read(&nvmet_config_sem);
273 	list_for_each_entry(p, &port->subsystems, entry)
274 		nvmet_send_ana_event(p->subsys, port);
275 	up_read(&nvmet_config_sem);
276 }
277 
nvmet_register_transport(const struct nvmet_fabrics_ops * ops)278 int nvmet_register_transport(const struct nvmet_fabrics_ops *ops)
279 {
280 	int ret = 0;
281 
282 	down_write(&nvmet_config_sem);
283 	if (nvmet_transports[ops->type])
284 		ret = -EINVAL;
285 	else
286 		nvmet_transports[ops->type] = ops;
287 	up_write(&nvmet_config_sem);
288 
289 	return ret;
290 }
291 EXPORT_SYMBOL_GPL(nvmet_register_transport);
292 
nvmet_unregister_transport(const struct nvmet_fabrics_ops * ops)293 void nvmet_unregister_transport(const struct nvmet_fabrics_ops *ops)
294 {
295 	down_write(&nvmet_config_sem);
296 	nvmet_transports[ops->type] = NULL;
297 	up_write(&nvmet_config_sem);
298 }
299 EXPORT_SYMBOL_GPL(nvmet_unregister_transport);
300 
nvmet_port_del_ctrls(struct nvmet_port * port,struct nvmet_subsys * subsys)301 void nvmet_port_del_ctrls(struct nvmet_port *port, struct nvmet_subsys *subsys)
302 {
303 	struct nvmet_ctrl *ctrl;
304 
305 	mutex_lock(&subsys->lock);
306 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
307 		if (ctrl->port == port)
308 			ctrl->ops->delete_ctrl(ctrl);
309 	}
310 	mutex_unlock(&subsys->lock);
311 }
312 
nvmet_enable_port(struct nvmet_port * port)313 int nvmet_enable_port(struct nvmet_port *port)
314 {
315 	const struct nvmet_fabrics_ops *ops;
316 	int ret;
317 
318 	lockdep_assert_held(&nvmet_config_sem);
319 
320 	ops = nvmet_transports[port->disc_addr.trtype];
321 	if (!ops) {
322 		up_write(&nvmet_config_sem);
323 		request_module("nvmet-transport-%d", port->disc_addr.trtype);
324 		down_write(&nvmet_config_sem);
325 		ops = nvmet_transports[port->disc_addr.trtype];
326 		if (!ops) {
327 			pr_err("transport type %d not supported\n",
328 				port->disc_addr.trtype);
329 			return -EINVAL;
330 		}
331 	}
332 
333 	if (!try_module_get(ops->owner))
334 		return -EINVAL;
335 
336 	/*
337 	 * If the user requested PI support and the transport isn't pi capable,
338 	 * don't enable the port.
339 	 */
340 	if (port->pi_enable && !(ops->flags & NVMF_METADATA_SUPPORTED)) {
341 		pr_err("T10-PI is not supported by transport type %d\n",
342 		       port->disc_addr.trtype);
343 		ret = -EINVAL;
344 		goto out_put;
345 	}
346 
347 	ret = ops->add_port(port);
348 	if (ret)
349 		goto out_put;
350 
351 	/* If the transport didn't set inline_data_size, then disable it. */
352 	if (port->inline_data_size < 0)
353 		port->inline_data_size = 0;
354 
355 	port->enabled = true;
356 	port->tr_ops = ops;
357 	return 0;
358 
359 out_put:
360 	module_put(ops->owner);
361 	return ret;
362 }
363 
nvmet_disable_port(struct nvmet_port * port)364 void nvmet_disable_port(struct nvmet_port *port)
365 {
366 	const struct nvmet_fabrics_ops *ops;
367 
368 	lockdep_assert_held(&nvmet_config_sem);
369 
370 	port->enabled = false;
371 	port->tr_ops = NULL;
372 
373 	ops = nvmet_transports[port->disc_addr.trtype];
374 	ops->remove_port(port);
375 	module_put(ops->owner);
376 }
377 
nvmet_keep_alive_timer(struct work_struct * work)378 static void nvmet_keep_alive_timer(struct work_struct *work)
379 {
380 	struct nvmet_ctrl *ctrl = container_of(to_delayed_work(work),
381 			struct nvmet_ctrl, ka_work);
382 	bool reset_tbkas = ctrl->reset_tbkas;
383 
384 	ctrl->reset_tbkas = false;
385 	if (reset_tbkas) {
386 		pr_debug("ctrl %d reschedule traffic based keep-alive timer\n",
387 			ctrl->cntlid);
388 		schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
389 		return;
390 	}
391 
392 	pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
393 		ctrl->cntlid, ctrl->kato);
394 
395 	nvmet_ctrl_fatal_error(ctrl);
396 }
397 
nvmet_start_keep_alive_timer(struct nvmet_ctrl * ctrl)398 void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
399 {
400 	if (unlikely(ctrl->kato == 0))
401 		return;
402 
403 	pr_debug("ctrl %d start keep-alive timer for %d secs\n",
404 		ctrl->cntlid, ctrl->kato);
405 
406 	INIT_DELAYED_WORK(&ctrl->ka_work, nvmet_keep_alive_timer);
407 	schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
408 }
409 
nvmet_stop_keep_alive_timer(struct nvmet_ctrl * ctrl)410 void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl)
411 {
412 	if (unlikely(ctrl->kato == 0))
413 		return;
414 
415 	pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid);
416 
417 	cancel_delayed_work_sync(&ctrl->ka_work);
418 }
419 
nvmet_find_namespace(struct nvmet_ctrl * ctrl,__le32 nsid)420 struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid)
421 {
422 	struct nvmet_ns *ns;
423 
424 	ns = xa_load(&ctrl->subsys->namespaces, le32_to_cpu(nsid));
425 	if (ns)
426 		percpu_ref_get(&ns->ref);
427 
428 	return ns;
429 }
430 
nvmet_destroy_namespace(struct percpu_ref * ref)431 static void nvmet_destroy_namespace(struct percpu_ref *ref)
432 {
433 	struct nvmet_ns *ns = container_of(ref, struct nvmet_ns, ref);
434 
435 	complete(&ns->disable_done);
436 }
437 
nvmet_put_namespace(struct nvmet_ns * ns)438 void nvmet_put_namespace(struct nvmet_ns *ns)
439 {
440 	percpu_ref_put(&ns->ref);
441 }
442 
nvmet_ns_dev_disable(struct nvmet_ns * ns)443 static void nvmet_ns_dev_disable(struct nvmet_ns *ns)
444 {
445 	nvmet_bdev_ns_disable(ns);
446 	nvmet_file_ns_disable(ns);
447 }
448 
nvmet_p2pmem_ns_enable(struct nvmet_ns * ns)449 static int nvmet_p2pmem_ns_enable(struct nvmet_ns *ns)
450 {
451 	int ret;
452 	struct pci_dev *p2p_dev;
453 
454 	if (!ns->use_p2pmem)
455 		return 0;
456 
457 	if (!ns->bdev) {
458 		pr_err("peer-to-peer DMA is not supported by non-block device namespaces\n");
459 		return -EINVAL;
460 	}
461 
462 	if (!blk_queue_pci_p2pdma(ns->bdev->bd_disk->queue)) {
463 		pr_err("peer-to-peer DMA is not supported by the driver of %s\n",
464 		       ns->device_path);
465 		return -EINVAL;
466 	}
467 
468 	if (ns->p2p_dev) {
469 		ret = pci_p2pdma_distance(ns->p2p_dev, nvmet_ns_dev(ns), true);
470 		if (ret < 0)
471 			return -EINVAL;
472 	} else {
473 		/*
474 		 * Right now we just check that there is p2pmem available so
475 		 * we can report an error to the user right away if there
476 		 * is not. We'll find the actual device to use once we
477 		 * setup the controller when the port's device is available.
478 		 */
479 
480 		p2p_dev = pci_p2pmem_find(nvmet_ns_dev(ns));
481 		if (!p2p_dev) {
482 			pr_err("no peer-to-peer memory is available for %s\n",
483 			       ns->device_path);
484 			return -EINVAL;
485 		}
486 
487 		pci_dev_put(p2p_dev);
488 	}
489 
490 	return 0;
491 }
492 
493 /*
494  * Note: ctrl->subsys->lock should be held when calling this function
495  */
nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl * ctrl,struct nvmet_ns * ns)496 static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
497 				    struct nvmet_ns *ns)
498 {
499 	struct device *clients[2];
500 	struct pci_dev *p2p_dev;
501 	int ret;
502 
503 	if (!ctrl->p2p_client || !ns->use_p2pmem)
504 		return;
505 
506 	if (ns->p2p_dev) {
507 		ret = pci_p2pdma_distance(ns->p2p_dev, ctrl->p2p_client, true);
508 		if (ret < 0)
509 			return;
510 
511 		p2p_dev = pci_dev_get(ns->p2p_dev);
512 	} else {
513 		clients[0] = ctrl->p2p_client;
514 		clients[1] = nvmet_ns_dev(ns);
515 
516 		p2p_dev = pci_p2pmem_find_many(clients, ARRAY_SIZE(clients));
517 		if (!p2p_dev) {
518 			pr_err("no peer-to-peer memory is available that's supported by %s and %s\n",
519 			       dev_name(ctrl->p2p_client), ns->device_path);
520 			return;
521 		}
522 	}
523 
524 	ret = radix_tree_insert(&ctrl->p2p_ns_map, ns->nsid, p2p_dev);
525 	if (ret < 0)
526 		pci_dev_put(p2p_dev);
527 
528 	pr_info("using p2pmem on %s for nsid %d\n", pci_name(p2p_dev),
529 		ns->nsid);
530 }
531 
nvmet_ns_revalidate(struct nvmet_ns * ns)532 void nvmet_ns_revalidate(struct nvmet_ns *ns)
533 {
534 	loff_t oldsize = ns->size;
535 
536 	if (ns->bdev)
537 		nvmet_bdev_ns_revalidate(ns);
538 	else
539 		nvmet_file_ns_revalidate(ns);
540 
541 	if (oldsize != ns->size)
542 		nvmet_ns_changed(ns->subsys, ns->nsid);
543 }
544 
nvmet_ns_enable(struct nvmet_ns * ns)545 int nvmet_ns_enable(struct nvmet_ns *ns)
546 {
547 	struct nvmet_subsys *subsys = ns->subsys;
548 	struct nvmet_ctrl *ctrl;
549 	int ret;
550 
551 	mutex_lock(&subsys->lock);
552 	ret = 0;
553 
554 	if (nvmet_passthru_ctrl(subsys)) {
555 		pr_info("cannot enable both passthru and regular namespaces for a single subsystem");
556 		goto out_unlock;
557 	}
558 
559 	if (ns->enabled)
560 		goto out_unlock;
561 
562 	ret = -EMFILE;
563 	if (subsys->nr_namespaces == NVMET_MAX_NAMESPACES)
564 		goto out_unlock;
565 
566 	ret = nvmet_bdev_ns_enable(ns);
567 	if (ret == -ENOTBLK)
568 		ret = nvmet_file_ns_enable(ns);
569 	if (ret)
570 		goto out_unlock;
571 
572 	ret = nvmet_p2pmem_ns_enable(ns);
573 	if (ret)
574 		goto out_dev_disable;
575 
576 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
577 		nvmet_p2pmem_ns_add_p2p(ctrl, ns);
578 
579 	ret = percpu_ref_init(&ns->ref, nvmet_destroy_namespace,
580 				0, GFP_KERNEL);
581 	if (ret)
582 		goto out_dev_put;
583 
584 	if (ns->nsid > subsys->max_nsid)
585 		subsys->max_nsid = ns->nsid;
586 
587 	ret = xa_insert(&subsys->namespaces, ns->nsid, ns, GFP_KERNEL);
588 	if (ret)
589 		goto out_restore_subsys_maxnsid;
590 
591 	subsys->nr_namespaces++;
592 
593 	nvmet_ns_changed(subsys, ns->nsid);
594 	ns->enabled = true;
595 	ret = 0;
596 out_unlock:
597 	mutex_unlock(&subsys->lock);
598 	return ret;
599 
600 out_restore_subsys_maxnsid:
601 	subsys->max_nsid = nvmet_max_nsid(subsys);
602 	percpu_ref_exit(&ns->ref);
603 out_dev_put:
604 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
605 		pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
606 out_dev_disable:
607 	nvmet_ns_dev_disable(ns);
608 	goto out_unlock;
609 }
610 
nvmet_ns_disable(struct nvmet_ns * ns)611 void nvmet_ns_disable(struct nvmet_ns *ns)
612 {
613 	struct nvmet_subsys *subsys = ns->subsys;
614 	struct nvmet_ctrl *ctrl;
615 
616 	mutex_lock(&subsys->lock);
617 	if (!ns->enabled)
618 		goto out_unlock;
619 
620 	ns->enabled = false;
621 	xa_erase(&ns->subsys->namespaces, ns->nsid);
622 	if (ns->nsid == subsys->max_nsid)
623 		subsys->max_nsid = nvmet_max_nsid(subsys);
624 
625 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
626 		pci_dev_put(radix_tree_delete(&ctrl->p2p_ns_map, ns->nsid));
627 
628 	mutex_unlock(&subsys->lock);
629 
630 	/*
631 	 * Now that we removed the namespaces from the lookup list, we
632 	 * can kill the per_cpu ref and wait for any remaining references
633 	 * to be dropped, as well as a RCU grace period for anyone only
634 	 * using the namepace under rcu_read_lock().  Note that we can't
635 	 * use call_rcu here as we need to ensure the namespaces have
636 	 * been fully destroyed before unloading the module.
637 	 */
638 	percpu_ref_kill(&ns->ref);
639 	synchronize_rcu();
640 	wait_for_completion(&ns->disable_done);
641 	percpu_ref_exit(&ns->ref);
642 
643 	mutex_lock(&subsys->lock);
644 
645 	subsys->nr_namespaces--;
646 	nvmet_ns_changed(subsys, ns->nsid);
647 	nvmet_ns_dev_disable(ns);
648 out_unlock:
649 	mutex_unlock(&subsys->lock);
650 }
651 
nvmet_ns_free(struct nvmet_ns * ns)652 void nvmet_ns_free(struct nvmet_ns *ns)
653 {
654 	nvmet_ns_disable(ns);
655 
656 	down_write(&nvmet_ana_sem);
657 	nvmet_ana_group_enabled[ns->anagrpid]--;
658 	up_write(&nvmet_ana_sem);
659 
660 	kfree(ns->device_path);
661 	kfree(ns);
662 }
663 
nvmet_ns_alloc(struct nvmet_subsys * subsys,u32 nsid)664 struct nvmet_ns *nvmet_ns_alloc(struct nvmet_subsys *subsys, u32 nsid)
665 {
666 	struct nvmet_ns *ns;
667 
668 	ns = kzalloc(sizeof(*ns), GFP_KERNEL);
669 	if (!ns)
670 		return NULL;
671 
672 	init_completion(&ns->disable_done);
673 
674 	ns->nsid = nsid;
675 	ns->subsys = subsys;
676 
677 	down_write(&nvmet_ana_sem);
678 	ns->anagrpid = NVMET_DEFAULT_ANA_GRPID;
679 	nvmet_ana_group_enabled[ns->anagrpid]++;
680 	up_write(&nvmet_ana_sem);
681 
682 	uuid_gen(&ns->uuid);
683 	ns->buffered_io = false;
684 
685 	return ns;
686 }
687 
nvmet_update_sq_head(struct nvmet_req * req)688 static void nvmet_update_sq_head(struct nvmet_req *req)
689 {
690 	if (req->sq->size) {
691 		u32 old_sqhd, new_sqhd;
692 
693 		do {
694 			old_sqhd = req->sq->sqhd;
695 			new_sqhd = (old_sqhd + 1) % req->sq->size;
696 		} while (cmpxchg(&req->sq->sqhd, old_sqhd, new_sqhd) !=
697 					old_sqhd);
698 	}
699 	req->cqe->sq_head = cpu_to_le16(req->sq->sqhd & 0x0000FFFF);
700 }
701 
nvmet_set_error(struct nvmet_req * req,u16 status)702 static void nvmet_set_error(struct nvmet_req *req, u16 status)
703 {
704 	struct nvmet_ctrl *ctrl = req->sq->ctrl;
705 	struct nvme_error_slot *new_error_slot;
706 	unsigned long flags;
707 
708 	req->cqe->status = cpu_to_le16(status << 1);
709 
710 	if (!ctrl || req->error_loc == NVMET_NO_ERROR_LOC)
711 		return;
712 
713 	spin_lock_irqsave(&ctrl->error_lock, flags);
714 	ctrl->err_counter++;
715 	new_error_slot =
716 		&ctrl->slots[ctrl->err_counter % NVMET_ERROR_LOG_SLOTS];
717 
718 	new_error_slot->error_count = cpu_to_le64(ctrl->err_counter);
719 	new_error_slot->sqid = cpu_to_le16(req->sq->qid);
720 	new_error_slot->cmdid = cpu_to_le16(req->cmd->common.command_id);
721 	new_error_slot->status_field = cpu_to_le16(status << 1);
722 	new_error_slot->param_error_location = cpu_to_le16(req->error_loc);
723 	new_error_slot->lba = cpu_to_le64(req->error_slba);
724 	new_error_slot->nsid = req->cmd->common.nsid;
725 	spin_unlock_irqrestore(&ctrl->error_lock, flags);
726 
727 	/* set the more bit for this request */
728 	req->cqe->status |= cpu_to_le16(1 << 14);
729 }
730 
__nvmet_req_complete(struct nvmet_req * req,u16 status)731 static void __nvmet_req_complete(struct nvmet_req *req, u16 status)
732 {
733 	struct nvmet_ns *ns = req->ns;
734 
735 	if (!req->sq->sqhd_disabled)
736 		nvmet_update_sq_head(req);
737 	req->cqe->sq_id = cpu_to_le16(req->sq->qid);
738 	req->cqe->command_id = req->cmd->common.command_id;
739 
740 	if (unlikely(status))
741 		nvmet_set_error(req, status);
742 
743 	trace_nvmet_req_complete(req);
744 
745 	req->ops->queue_response(req);
746 	if (ns)
747 		nvmet_put_namespace(ns);
748 }
749 
nvmet_req_complete(struct nvmet_req * req,u16 status)750 void nvmet_req_complete(struct nvmet_req *req, u16 status)
751 {
752 	struct nvmet_sq *sq = req->sq;
753 
754 	__nvmet_req_complete(req, status);
755 	percpu_ref_put(&sq->ref);
756 }
757 EXPORT_SYMBOL_GPL(nvmet_req_complete);
758 
nvmet_cq_setup(struct nvmet_ctrl * ctrl,struct nvmet_cq * cq,u16 qid,u16 size)759 void nvmet_cq_setup(struct nvmet_ctrl *ctrl, struct nvmet_cq *cq,
760 		u16 qid, u16 size)
761 {
762 	cq->qid = qid;
763 	cq->size = size;
764 }
765 
nvmet_sq_setup(struct nvmet_ctrl * ctrl,struct nvmet_sq * sq,u16 qid,u16 size)766 void nvmet_sq_setup(struct nvmet_ctrl *ctrl, struct nvmet_sq *sq,
767 		u16 qid, u16 size)
768 {
769 	sq->sqhd = 0;
770 	sq->qid = qid;
771 	sq->size = size;
772 
773 	ctrl->sqs[qid] = sq;
774 }
775 
nvmet_confirm_sq(struct percpu_ref * ref)776 static void nvmet_confirm_sq(struct percpu_ref *ref)
777 {
778 	struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
779 
780 	complete(&sq->confirm_done);
781 }
782 
nvmet_sq_destroy(struct nvmet_sq * sq)783 void nvmet_sq_destroy(struct nvmet_sq *sq)
784 {
785 	struct nvmet_ctrl *ctrl = sq->ctrl;
786 
787 	/*
788 	 * If this is the admin queue, complete all AERs so that our
789 	 * queue doesn't have outstanding requests on it.
790 	 */
791 	if (ctrl && ctrl->sqs && ctrl->sqs[0] == sq)
792 		nvmet_async_events_failall(ctrl);
793 	percpu_ref_kill_and_confirm(&sq->ref, nvmet_confirm_sq);
794 	wait_for_completion(&sq->confirm_done);
795 	wait_for_completion(&sq->free_done);
796 	percpu_ref_exit(&sq->ref);
797 
798 	if (ctrl) {
799 		/*
800 		 * The teardown flow may take some time, and the host may not
801 		 * send us keep-alive during this period, hence reset the
802 		 * traffic based keep-alive timer so we don't trigger a
803 		 * controller teardown as a result of a keep-alive expiration.
804 		 */
805 		ctrl->reset_tbkas = true;
806 		nvmet_ctrl_put(ctrl);
807 		sq->ctrl = NULL; /* allows reusing the queue later */
808 	}
809 }
810 EXPORT_SYMBOL_GPL(nvmet_sq_destroy);
811 
nvmet_sq_free(struct percpu_ref * ref)812 static void nvmet_sq_free(struct percpu_ref *ref)
813 {
814 	struct nvmet_sq *sq = container_of(ref, struct nvmet_sq, ref);
815 
816 	complete(&sq->free_done);
817 }
818 
nvmet_sq_init(struct nvmet_sq * sq)819 int nvmet_sq_init(struct nvmet_sq *sq)
820 {
821 	int ret;
822 
823 	ret = percpu_ref_init(&sq->ref, nvmet_sq_free, 0, GFP_KERNEL);
824 	if (ret) {
825 		pr_err("percpu_ref init failed!\n");
826 		return ret;
827 	}
828 	init_completion(&sq->free_done);
829 	init_completion(&sq->confirm_done);
830 
831 	return 0;
832 }
833 EXPORT_SYMBOL_GPL(nvmet_sq_init);
834 
nvmet_check_ana_state(struct nvmet_port * port,struct nvmet_ns * ns)835 static inline u16 nvmet_check_ana_state(struct nvmet_port *port,
836 		struct nvmet_ns *ns)
837 {
838 	enum nvme_ana_state state = port->ana_state[ns->anagrpid];
839 
840 	if (unlikely(state == NVME_ANA_INACCESSIBLE))
841 		return NVME_SC_ANA_INACCESSIBLE;
842 	if (unlikely(state == NVME_ANA_PERSISTENT_LOSS))
843 		return NVME_SC_ANA_PERSISTENT_LOSS;
844 	if (unlikely(state == NVME_ANA_CHANGE))
845 		return NVME_SC_ANA_TRANSITION;
846 	return 0;
847 }
848 
nvmet_io_cmd_check_access(struct nvmet_req * req)849 static inline u16 nvmet_io_cmd_check_access(struct nvmet_req *req)
850 {
851 	if (unlikely(req->ns->readonly)) {
852 		switch (req->cmd->common.opcode) {
853 		case nvme_cmd_read:
854 		case nvme_cmd_flush:
855 			break;
856 		default:
857 			return NVME_SC_NS_WRITE_PROTECTED;
858 		}
859 	}
860 
861 	return 0;
862 }
863 
nvmet_parse_io_cmd(struct nvmet_req * req)864 static u16 nvmet_parse_io_cmd(struct nvmet_req *req)
865 {
866 	struct nvme_command *cmd = req->cmd;
867 	u16 ret;
868 
869 	ret = nvmet_check_ctrl_status(req, cmd);
870 	if (unlikely(ret))
871 		return ret;
872 
873 	if (nvmet_req_passthru_ctrl(req))
874 		return nvmet_parse_passthru_io_cmd(req);
875 
876 	req->ns = nvmet_find_namespace(req->sq->ctrl, cmd->rw.nsid);
877 	if (unlikely(!req->ns)) {
878 		req->error_loc = offsetof(struct nvme_common_command, nsid);
879 		return NVME_SC_INVALID_NS | NVME_SC_DNR;
880 	}
881 	ret = nvmet_check_ana_state(req->port, req->ns);
882 	if (unlikely(ret)) {
883 		req->error_loc = offsetof(struct nvme_common_command, nsid);
884 		return ret;
885 	}
886 	ret = nvmet_io_cmd_check_access(req);
887 	if (unlikely(ret)) {
888 		req->error_loc = offsetof(struct nvme_common_command, nsid);
889 		return ret;
890 	}
891 
892 	if (req->ns->file)
893 		return nvmet_file_parse_io_cmd(req);
894 	else
895 		return nvmet_bdev_parse_io_cmd(req);
896 }
897 
nvmet_req_init(struct nvmet_req * req,struct nvmet_cq * cq,struct nvmet_sq * sq,const struct nvmet_fabrics_ops * ops)898 bool nvmet_req_init(struct nvmet_req *req, struct nvmet_cq *cq,
899 		struct nvmet_sq *sq, const struct nvmet_fabrics_ops *ops)
900 {
901 	u8 flags = req->cmd->common.flags;
902 	u16 status;
903 
904 	req->cq = cq;
905 	req->sq = sq;
906 	req->ops = ops;
907 	req->sg = NULL;
908 	req->metadata_sg = NULL;
909 	req->sg_cnt = 0;
910 	req->metadata_sg_cnt = 0;
911 	req->transfer_len = 0;
912 	req->metadata_len = 0;
913 	req->cqe->status = 0;
914 	req->cqe->sq_head = 0;
915 	req->ns = NULL;
916 	req->error_loc = NVMET_NO_ERROR_LOC;
917 	req->error_slba = 0;
918 
919 	/* no support for fused commands yet */
920 	if (unlikely(flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND))) {
921 		req->error_loc = offsetof(struct nvme_common_command, flags);
922 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
923 		goto fail;
924 	}
925 
926 	/*
927 	 * For fabrics, PSDT field shall describe metadata pointer (MPTR) that
928 	 * contains an address of a single contiguous physical buffer that is
929 	 * byte aligned.
930 	 */
931 	if (unlikely((flags & NVME_CMD_SGL_ALL) != NVME_CMD_SGL_METABUF)) {
932 		req->error_loc = offsetof(struct nvme_common_command, flags);
933 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
934 		goto fail;
935 	}
936 
937 	if (unlikely(!req->sq->ctrl))
938 		/* will return an error for any non-connect command: */
939 		status = nvmet_parse_connect_cmd(req);
940 	else if (likely(req->sq->qid != 0))
941 		status = nvmet_parse_io_cmd(req);
942 	else
943 		status = nvmet_parse_admin_cmd(req);
944 
945 	if (status)
946 		goto fail;
947 
948 	trace_nvmet_req_init(req, req->cmd);
949 
950 	if (unlikely(!percpu_ref_tryget_live(&sq->ref))) {
951 		status = NVME_SC_INVALID_FIELD | NVME_SC_DNR;
952 		goto fail;
953 	}
954 
955 	if (sq->ctrl)
956 		sq->ctrl->reset_tbkas = true;
957 
958 	return true;
959 
960 fail:
961 	__nvmet_req_complete(req, status);
962 	return false;
963 }
964 EXPORT_SYMBOL_GPL(nvmet_req_init);
965 
nvmet_req_uninit(struct nvmet_req * req)966 void nvmet_req_uninit(struct nvmet_req *req)
967 {
968 	percpu_ref_put(&req->sq->ref);
969 	if (req->ns)
970 		nvmet_put_namespace(req->ns);
971 }
972 EXPORT_SYMBOL_GPL(nvmet_req_uninit);
973 
nvmet_check_transfer_len(struct nvmet_req * req,size_t len)974 bool nvmet_check_transfer_len(struct nvmet_req *req, size_t len)
975 {
976 	if (unlikely(len != req->transfer_len)) {
977 		req->error_loc = offsetof(struct nvme_common_command, dptr);
978 		nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
979 		return false;
980 	}
981 
982 	return true;
983 }
984 EXPORT_SYMBOL_GPL(nvmet_check_transfer_len);
985 
nvmet_check_data_len_lte(struct nvmet_req * req,size_t data_len)986 bool nvmet_check_data_len_lte(struct nvmet_req *req, size_t data_len)
987 {
988 	if (unlikely(data_len > req->transfer_len)) {
989 		req->error_loc = offsetof(struct nvme_common_command, dptr);
990 		nvmet_req_complete(req, NVME_SC_SGL_INVALID_DATA | NVME_SC_DNR);
991 		return false;
992 	}
993 
994 	return true;
995 }
996 
nvmet_data_transfer_len(struct nvmet_req * req)997 static unsigned int nvmet_data_transfer_len(struct nvmet_req *req)
998 {
999 	return req->transfer_len - req->metadata_len;
1000 }
1001 
nvmet_req_alloc_p2pmem_sgls(struct pci_dev * p2p_dev,struct nvmet_req * req)1002 static int nvmet_req_alloc_p2pmem_sgls(struct pci_dev *p2p_dev,
1003 		struct nvmet_req *req)
1004 {
1005 	req->sg = pci_p2pmem_alloc_sgl(p2p_dev, &req->sg_cnt,
1006 			nvmet_data_transfer_len(req));
1007 	if (!req->sg)
1008 		goto out_err;
1009 
1010 	if (req->metadata_len) {
1011 		req->metadata_sg = pci_p2pmem_alloc_sgl(p2p_dev,
1012 				&req->metadata_sg_cnt, req->metadata_len);
1013 		if (!req->metadata_sg)
1014 			goto out_free_sg;
1015 	}
1016 
1017 	req->p2p_dev = p2p_dev;
1018 
1019 	return 0;
1020 out_free_sg:
1021 	pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1022 out_err:
1023 	return -ENOMEM;
1024 }
1025 
nvmet_req_find_p2p_dev(struct nvmet_req * req)1026 static struct pci_dev *nvmet_req_find_p2p_dev(struct nvmet_req *req)
1027 {
1028 	if (!IS_ENABLED(CONFIG_PCI_P2PDMA) ||
1029 	    !req->sq->ctrl || !req->sq->qid || !req->ns)
1030 		return NULL;
1031 	return radix_tree_lookup(&req->sq->ctrl->p2p_ns_map, req->ns->nsid);
1032 }
1033 
nvmet_req_alloc_sgls(struct nvmet_req * req)1034 int nvmet_req_alloc_sgls(struct nvmet_req *req)
1035 {
1036 	struct pci_dev *p2p_dev = nvmet_req_find_p2p_dev(req);
1037 
1038 	if (p2p_dev && !nvmet_req_alloc_p2pmem_sgls(p2p_dev, req))
1039 		return 0;
1040 
1041 	req->sg = sgl_alloc(nvmet_data_transfer_len(req), GFP_KERNEL,
1042 			    &req->sg_cnt);
1043 	if (unlikely(!req->sg))
1044 		goto out;
1045 
1046 	if (req->metadata_len) {
1047 		req->metadata_sg = sgl_alloc(req->metadata_len, GFP_KERNEL,
1048 					     &req->metadata_sg_cnt);
1049 		if (unlikely(!req->metadata_sg))
1050 			goto out_free;
1051 	}
1052 
1053 	return 0;
1054 out_free:
1055 	sgl_free(req->sg);
1056 out:
1057 	return -ENOMEM;
1058 }
1059 EXPORT_SYMBOL_GPL(nvmet_req_alloc_sgls);
1060 
nvmet_req_free_sgls(struct nvmet_req * req)1061 void nvmet_req_free_sgls(struct nvmet_req *req)
1062 {
1063 	if (req->p2p_dev) {
1064 		pci_p2pmem_free_sgl(req->p2p_dev, req->sg);
1065 		if (req->metadata_sg)
1066 			pci_p2pmem_free_sgl(req->p2p_dev, req->metadata_sg);
1067 		req->p2p_dev = NULL;
1068 	} else {
1069 		sgl_free(req->sg);
1070 		if (req->metadata_sg)
1071 			sgl_free(req->metadata_sg);
1072 	}
1073 
1074 	req->sg = NULL;
1075 	req->metadata_sg = NULL;
1076 	req->sg_cnt = 0;
1077 	req->metadata_sg_cnt = 0;
1078 }
1079 EXPORT_SYMBOL_GPL(nvmet_req_free_sgls);
1080 
nvmet_cc_en(u32 cc)1081 static inline bool nvmet_cc_en(u32 cc)
1082 {
1083 	return (cc >> NVME_CC_EN_SHIFT) & 0x1;
1084 }
1085 
nvmet_cc_css(u32 cc)1086 static inline u8 nvmet_cc_css(u32 cc)
1087 {
1088 	return (cc >> NVME_CC_CSS_SHIFT) & 0x7;
1089 }
1090 
nvmet_cc_mps(u32 cc)1091 static inline u8 nvmet_cc_mps(u32 cc)
1092 {
1093 	return (cc >> NVME_CC_MPS_SHIFT) & 0xf;
1094 }
1095 
nvmet_cc_ams(u32 cc)1096 static inline u8 nvmet_cc_ams(u32 cc)
1097 {
1098 	return (cc >> NVME_CC_AMS_SHIFT) & 0x7;
1099 }
1100 
nvmet_cc_shn(u32 cc)1101 static inline u8 nvmet_cc_shn(u32 cc)
1102 {
1103 	return (cc >> NVME_CC_SHN_SHIFT) & 0x3;
1104 }
1105 
nvmet_cc_iosqes(u32 cc)1106 static inline u8 nvmet_cc_iosqes(u32 cc)
1107 {
1108 	return (cc >> NVME_CC_IOSQES_SHIFT) & 0xf;
1109 }
1110 
nvmet_cc_iocqes(u32 cc)1111 static inline u8 nvmet_cc_iocqes(u32 cc)
1112 {
1113 	return (cc >> NVME_CC_IOCQES_SHIFT) & 0xf;
1114 }
1115 
nvmet_start_ctrl(struct nvmet_ctrl * ctrl)1116 static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
1117 {
1118 	lockdep_assert_held(&ctrl->lock);
1119 
1120 	/*
1121 	 * Only I/O controllers should verify iosqes,iocqes.
1122 	 * Strictly speaking, the spec says a discovery controller
1123 	 * should verify iosqes,iocqes are zeroed, however that
1124 	 * would break backwards compatibility, so don't enforce it.
1125 	 */
1126 	if (ctrl->subsys->type != NVME_NQN_DISC &&
1127 	    (nvmet_cc_iosqes(ctrl->cc) != NVME_NVM_IOSQES ||
1128 	     nvmet_cc_iocqes(ctrl->cc) != NVME_NVM_IOCQES)) {
1129 		ctrl->csts = NVME_CSTS_CFS;
1130 		return;
1131 	}
1132 
1133 	if (nvmet_cc_mps(ctrl->cc) != 0 ||
1134 	    nvmet_cc_ams(ctrl->cc) != 0 ||
1135 	    nvmet_cc_css(ctrl->cc) != 0) {
1136 		ctrl->csts = NVME_CSTS_CFS;
1137 		return;
1138 	}
1139 
1140 	ctrl->csts = NVME_CSTS_RDY;
1141 
1142 	/*
1143 	 * Controllers that are not yet enabled should not really enforce the
1144 	 * keep alive timeout, but we still want to track a timeout and cleanup
1145 	 * in case a host died before it enabled the controller.  Hence, simply
1146 	 * reset the keep alive timer when the controller is enabled.
1147 	 */
1148 	if (ctrl->kato)
1149 		mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
1150 }
1151 
nvmet_clear_ctrl(struct nvmet_ctrl * ctrl)1152 static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
1153 {
1154 	lockdep_assert_held(&ctrl->lock);
1155 
1156 	/* XXX: tear down queues? */
1157 	ctrl->csts &= ~NVME_CSTS_RDY;
1158 	ctrl->cc = 0;
1159 }
1160 
nvmet_update_cc(struct nvmet_ctrl * ctrl,u32 new)1161 void nvmet_update_cc(struct nvmet_ctrl *ctrl, u32 new)
1162 {
1163 	u32 old;
1164 
1165 	mutex_lock(&ctrl->lock);
1166 	old = ctrl->cc;
1167 	ctrl->cc = new;
1168 
1169 	if (nvmet_cc_en(new) && !nvmet_cc_en(old))
1170 		nvmet_start_ctrl(ctrl);
1171 	if (!nvmet_cc_en(new) && nvmet_cc_en(old))
1172 		nvmet_clear_ctrl(ctrl);
1173 	if (nvmet_cc_shn(new) && !nvmet_cc_shn(old)) {
1174 		nvmet_clear_ctrl(ctrl);
1175 		ctrl->csts |= NVME_CSTS_SHST_CMPLT;
1176 	}
1177 	if (!nvmet_cc_shn(new) && nvmet_cc_shn(old))
1178 		ctrl->csts &= ~NVME_CSTS_SHST_CMPLT;
1179 	mutex_unlock(&ctrl->lock);
1180 }
1181 
nvmet_init_cap(struct nvmet_ctrl * ctrl)1182 static void nvmet_init_cap(struct nvmet_ctrl *ctrl)
1183 {
1184 	/* command sets supported: NVMe command set: */
1185 	ctrl->cap = (1ULL << 37);
1186 	/* CC.EN timeout in 500msec units: */
1187 	ctrl->cap |= (15ULL << 24);
1188 	/* maximum queue entries supported: */
1189 	ctrl->cap |= NVMET_QUEUE_SIZE - 1;
1190 }
1191 
nvmet_ctrl_find_get(const char * subsysnqn,const char * hostnqn,u16 cntlid,struct nvmet_req * req)1192 struct nvmet_ctrl *nvmet_ctrl_find_get(const char *subsysnqn,
1193 				       const char *hostnqn, u16 cntlid,
1194 				       struct nvmet_req *req)
1195 {
1196 	struct nvmet_ctrl *ctrl = NULL;
1197 	struct nvmet_subsys *subsys;
1198 
1199 	subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1200 	if (!subsys) {
1201 		pr_warn("connect request for invalid subsystem %s!\n",
1202 			subsysnqn);
1203 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1204 		goto out;
1205 	}
1206 
1207 	mutex_lock(&subsys->lock);
1208 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) {
1209 		if (ctrl->cntlid == cntlid) {
1210 			if (strncmp(hostnqn, ctrl->hostnqn, NVMF_NQN_SIZE)) {
1211 				pr_warn("hostnqn mismatch.\n");
1212 				continue;
1213 			}
1214 			if (!kref_get_unless_zero(&ctrl->ref))
1215 				continue;
1216 
1217 			/* ctrl found */
1218 			goto found;
1219 		}
1220 	}
1221 
1222 	ctrl = NULL; /* ctrl not found */
1223 	pr_warn("could not find controller %d for subsys %s / host %s\n",
1224 		cntlid, subsysnqn, hostnqn);
1225 	req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(cntlid);
1226 
1227 found:
1228 	mutex_unlock(&subsys->lock);
1229 	nvmet_subsys_put(subsys);
1230 out:
1231 	return ctrl;
1232 }
1233 
nvmet_check_ctrl_status(struct nvmet_req * req,struct nvme_command * cmd)1234 u16 nvmet_check_ctrl_status(struct nvmet_req *req, struct nvme_command *cmd)
1235 {
1236 	if (unlikely(!(req->sq->ctrl->cc & NVME_CC_ENABLE))) {
1237 		pr_err("got cmd %d while CC.EN == 0 on qid = %d\n",
1238 		       cmd->common.opcode, req->sq->qid);
1239 		return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1240 	}
1241 
1242 	if (unlikely(!(req->sq->ctrl->csts & NVME_CSTS_RDY))) {
1243 		pr_err("got cmd %d while CSTS.RDY == 0 on qid = %d\n",
1244 		       cmd->common.opcode, req->sq->qid);
1245 		return NVME_SC_CMD_SEQ_ERROR | NVME_SC_DNR;
1246 	}
1247 	return 0;
1248 }
1249 
nvmet_host_allowed(struct nvmet_subsys * subsys,const char * hostnqn)1250 bool nvmet_host_allowed(struct nvmet_subsys *subsys, const char *hostnqn)
1251 {
1252 	struct nvmet_host_link *p;
1253 
1254 	lockdep_assert_held(&nvmet_config_sem);
1255 
1256 	if (subsys->allow_any_host)
1257 		return true;
1258 
1259 	if (subsys->type == NVME_NQN_DISC) /* allow all access to disc subsys */
1260 		return true;
1261 
1262 	list_for_each_entry(p, &subsys->hosts, entry) {
1263 		if (!strcmp(nvmet_host_name(p->host), hostnqn))
1264 			return true;
1265 	}
1266 
1267 	return false;
1268 }
1269 
1270 /*
1271  * Note: ctrl->subsys->lock should be held when calling this function
1272  */
nvmet_setup_p2p_ns_map(struct nvmet_ctrl * ctrl,struct nvmet_req * req)1273 static void nvmet_setup_p2p_ns_map(struct nvmet_ctrl *ctrl,
1274 		struct nvmet_req *req)
1275 {
1276 	struct nvmet_ns *ns;
1277 	unsigned long idx;
1278 
1279 	if (!req->p2p_client)
1280 		return;
1281 
1282 	ctrl->p2p_client = get_device(req->p2p_client);
1283 
1284 	xa_for_each(&ctrl->subsys->namespaces, idx, ns)
1285 		nvmet_p2pmem_ns_add_p2p(ctrl, ns);
1286 }
1287 
1288 /*
1289  * Note: ctrl->subsys->lock should be held when calling this function
1290  */
nvmet_release_p2p_ns_map(struct nvmet_ctrl * ctrl)1291 static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
1292 {
1293 	struct radix_tree_iter iter;
1294 	void __rcu **slot;
1295 
1296 	radix_tree_for_each_slot(slot, &ctrl->p2p_ns_map, &iter, 0)
1297 		pci_dev_put(radix_tree_deref_slot(slot));
1298 
1299 	put_device(ctrl->p2p_client);
1300 }
1301 
nvmet_fatal_error_handler(struct work_struct * work)1302 static void nvmet_fatal_error_handler(struct work_struct *work)
1303 {
1304 	struct nvmet_ctrl *ctrl =
1305 			container_of(work, struct nvmet_ctrl, fatal_err_work);
1306 
1307 	pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
1308 	ctrl->ops->delete_ctrl(ctrl);
1309 }
1310 
nvmet_alloc_ctrl(const char * subsysnqn,const char * hostnqn,struct nvmet_req * req,u32 kato,struct nvmet_ctrl ** ctrlp)1311 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
1312 		struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
1313 {
1314 	struct nvmet_subsys *subsys;
1315 	struct nvmet_ctrl *ctrl;
1316 	int ret;
1317 	u16 status;
1318 
1319 	status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1320 	subsys = nvmet_find_get_subsys(req->port, subsysnqn);
1321 	if (!subsys) {
1322 		pr_warn("connect request for invalid subsystem %s!\n",
1323 			subsysnqn);
1324 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(subsysnqn);
1325 		goto out;
1326 	}
1327 
1328 	status = NVME_SC_CONNECT_INVALID_PARAM | NVME_SC_DNR;
1329 	down_read(&nvmet_config_sem);
1330 	if (!nvmet_host_allowed(subsys, hostnqn)) {
1331 		pr_info("connect by host %s for subsystem %s not allowed\n",
1332 			hostnqn, subsysnqn);
1333 		req->cqe->result.u32 = IPO_IATTR_CONNECT_DATA(hostnqn);
1334 		up_read(&nvmet_config_sem);
1335 		status = NVME_SC_CONNECT_INVALID_HOST | NVME_SC_DNR;
1336 		goto out_put_subsystem;
1337 	}
1338 	up_read(&nvmet_config_sem);
1339 
1340 	status = NVME_SC_INTERNAL;
1341 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
1342 	if (!ctrl)
1343 		goto out_put_subsystem;
1344 	mutex_init(&ctrl->lock);
1345 
1346 	nvmet_init_cap(ctrl);
1347 
1348 	ctrl->port = req->port;
1349 
1350 	INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
1351 	INIT_LIST_HEAD(&ctrl->async_events);
1352 	INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
1353 	INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
1354 
1355 	memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
1356 	memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
1357 
1358 	kref_init(&ctrl->ref);
1359 	ctrl->subsys = subsys;
1360 	WRITE_ONCE(ctrl->aen_enabled, NVMET_AEN_CFG_OPTIONAL);
1361 
1362 	ctrl->changed_ns_list = kmalloc_array(NVME_MAX_CHANGED_NAMESPACES,
1363 			sizeof(__le32), GFP_KERNEL);
1364 	if (!ctrl->changed_ns_list)
1365 		goto out_free_ctrl;
1366 
1367 	ctrl->sqs = kcalloc(subsys->max_qid + 1,
1368 			sizeof(struct nvmet_sq *),
1369 			GFP_KERNEL);
1370 	if (!ctrl->sqs)
1371 		goto out_free_changed_ns_list;
1372 
1373 	if (subsys->cntlid_min > subsys->cntlid_max)
1374 		goto out_free_sqs;
1375 
1376 	ret = ida_simple_get(&cntlid_ida,
1377 			     subsys->cntlid_min, subsys->cntlid_max,
1378 			     GFP_KERNEL);
1379 	if (ret < 0) {
1380 		status = NVME_SC_CONNECT_CTRL_BUSY | NVME_SC_DNR;
1381 		goto out_free_sqs;
1382 	}
1383 	ctrl->cntlid = ret;
1384 
1385 	ctrl->ops = req->ops;
1386 
1387 	/*
1388 	 * Discovery controllers may use some arbitrary high value
1389 	 * in order to cleanup stale discovery sessions
1390 	 */
1391 	if ((ctrl->subsys->type == NVME_NQN_DISC) && !kato)
1392 		kato = NVMET_DISC_KATO_MS;
1393 
1394 	/* keep-alive timeout in seconds */
1395 	ctrl->kato = DIV_ROUND_UP(kato, 1000);
1396 
1397 	ctrl->err_counter = 0;
1398 	spin_lock_init(&ctrl->error_lock);
1399 
1400 	nvmet_start_keep_alive_timer(ctrl);
1401 
1402 	mutex_lock(&subsys->lock);
1403 	list_add_tail(&ctrl->subsys_entry, &subsys->ctrls);
1404 	nvmet_setup_p2p_ns_map(ctrl, req);
1405 	mutex_unlock(&subsys->lock);
1406 
1407 	*ctrlp = ctrl;
1408 	return 0;
1409 
1410 out_free_sqs:
1411 	kfree(ctrl->sqs);
1412 out_free_changed_ns_list:
1413 	kfree(ctrl->changed_ns_list);
1414 out_free_ctrl:
1415 	kfree(ctrl);
1416 out_put_subsystem:
1417 	nvmet_subsys_put(subsys);
1418 out:
1419 	return status;
1420 }
1421 
nvmet_ctrl_free(struct kref * ref)1422 static void nvmet_ctrl_free(struct kref *ref)
1423 {
1424 	struct nvmet_ctrl *ctrl = container_of(ref, struct nvmet_ctrl, ref);
1425 	struct nvmet_subsys *subsys = ctrl->subsys;
1426 
1427 	mutex_lock(&subsys->lock);
1428 	nvmet_release_p2p_ns_map(ctrl);
1429 	list_del(&ctrl->subsys_entry);
1430 	mutex_unlock(&subsys->lock);
1431 
1432 	nvmet_stop_keep_alive_timer(ctrl);
1433 
1434 	flush_work(&ctrl->async_event_work);
1435 	cancel_work_sync(&ctrl->fatal_err_work);
1436 
1437 	ida_simple_remove(&cntlid_ida, ctrl->cntlid);
1438 
1439 	nvmet_async_events_free(ctrl);
1440 	kfree(ctrl->sqs);
1441 	kfree(ctrl->changed_ns_list);
1442 	kfree(ctrl);
1443 
1444 	nvmet_subsys_put(subsys);
1445 }
1446 
nvmet_ctrl_put(struct nvmet_ctrl * ctrl)1447 void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
1448 {
1449 	kref_put(&ctrl->ref, nvmet_ctrl_free);
1450 }
1451 
nvmet_ctrl_fatal_error(struct nvmet_ctrl * ctrl)1452 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
1453 {
1454 	mutex_lock(&ctrl->lock);
1455 	if (!(ctrl->csts & NVME_CSTS_CFS)) {
1456 		ctrl->csts |= NVME_CSTS_CFS;
1457 		schedule_work(&ctrl->fatal_err_work);
1458 	}
1459 	mutex_unlock(&ctrl->lock);
1460 }
1461 EXPORT_SYMBOL_GPL(nvmet_ctrl_fatal_error);
1462 
nvmet_find_get_subsys(struct nvmet_port * port,const char * subsysnqn)1463 static struct nvmet_subsys *nvmet_find_get_subsys(struct nvmet_port *port,
1464 		const char *subsysnqn)
1465 {
1466 	struct nvmet_subsys_link *p;
1467 
1468 	if (!port)
1469 		return NULL;
1470 
1471 	if (!strcmp(NVME_DISC_SUBSYS_NAME, subsysnqn)) {
1472 		if (!kref_get_unless_zero(&nvmet_disc_subsys->ref))
1473 			return NULL;
1474 		return nvmet_disc_subsys;
1475 	}
1476 
1477 	down_read(&nvmet_config_sem);
1478 	list_for_each_entry(p, &port->subsystems, entry) {
1479 		if (!strncmp(p->subsys->subsysnqn, subsysnqn,
1480 				NVMF_NQN_SIZE)) {
1481 			if (!kref_get_unless_zero(&p->subsys->ref))
1482 				break;
1483 			up_read(&nvmet_config_sem);
1484 			return p->subsys;
1485 		}
1486 	}
1487 	up_read(&nvmet_config_sem);
1488 	return NULL;
1489 }
1490 
nvmet_subsys_alloc(const char * subsysnqn,enum nvme_subsys_type type)1491 struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
1492 		enum nvme_subsys_type type)
1493 {
1494 	struct nvmet_subsys *subsys;
1495 
1496 	subsys = kzalloc(sizeof(*subsys), GFP_KERNEL);
1497 	if (!subsys)
1498 		return ERR_PTR(-ENOMEM);
1499 
1500 	subsys->ver = NVMET_DEFAULT_VS;
1501 	/* generate a random serial number as our controllers are ephemeral: */
1502 	get_random_bytes(&subsys->serial, sizeof(subsys->serial));
1503 
1504 	switch (type) {
1505 	case NVME_NQN_NVME:
1506 		subsys->max_qid = NVMET_NR_QUEUES;
1507 		break;
1508 	case NVME_NQN_DISC:
1509 		subsys->max_qid = 0;
1510 		break;
1511 	default:
1512 		pr_err("%s: Unknown Subsystem type - %d\n", __func__, type);
1513 		kfree(subsys);
1514 		return ERR_PTR(-EINVAL);
1515 	}
1516 	subsys->type = type;
1517 	subsys->subsysnqn = kstrndup(subsysnqn, NVMF_NQN_SIZE,
1518 			GFP_KERNEL);
1519 	if (!subsys->subsysnqn) {
1520 		kfree(subsys);
1521 		return ERR_PTR(-ENOMEM);
1522 	}
1523 	subsys->cntlid_min = NVME_CNTLID_MIN;
1524 	subsys->cntlid_max = NVME_CNTLID_MAX;
1525 	kref_init(&subsys->ref);
1526 
1527 	mutex_init(&subsys->lock);
1528 	xa_init(&subsys->namespaces);
1529 	INIT_LIST_HEAD(&subsys->ctrls);
1530 	INIT_LIST_HEAD(&subsys->hosts);
1531 
1532 	return subsys;
1533 }
1534 
nvmet_subsys_free(struct kref * ref)1535 static void nvmet_subsys_free(struct kref *ref)
1536 {
1537 	struct nvmet_subsys *subsys =
1538 		container_of(ref, struct nvmet_subsys, ref);
1539 
1540 	WARN_ON_ONCE(!xa_empty(&subsys->namespaces));
1541 
1542 	xa_destroy(&subsys->namespaces);
1543 	nvmet_passthru_subsys_free(subsys);
1544 
1545 	kfree(subsys->subsysnqn);
1546 	kfree_rcu(subsys->model, rcuhead);
1547 	kfree(subsys);
1548 }
1549 
nvmet_subsys_del_ctrls(struct nvmet_subsys * subsys)1550 void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
1551 {
1552 	struct nvmet_ctrl *ctrl;
1553 
1554 	mutex_lock(&subsys->lock);
1555 	list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
1556 		ctrl->ops->delete_ctrl(ctrl);
1557 	mutex_unlock(&subsys->lock);
1558 }
1559 
nvmet_subsys_put(struct nvmet_subsys * subsys)1560 void nvmet_subsys_put(struct nvmet_subsys *subsys)
1561 {
1562 	kref_put(&subsys->ref, nvmet_subsys_free);
1563 }
1564 
nvmet_init(void)1565 static int __init nvmet_init(void)
1566 {
1567 	int error;
1568 
1569 	nvmet_ana_group_enabled[NVMET_DEFAULT_ANA_GRPID] = 1;
1570 
1571 	buffered_io_wq = alloc_workqueue("nvmet-buffered-io-wq",
1572 			WQ_MEM_RECLAIM, 0);
1573 	if (!buffered_io_wq) {
1574 		error = -ENOMEM;
1575 		goto out;
1576 	}
1577 
1578 	error = nvmet_init_discovery();
1579 	if (error)
1580 		goto out_free_work_queue;
1581 
1582 	error = nvmet_init_configfs();
1583 	if (error)
1584 		goto out_exit_discovery;
1585 	return 0;
1586 
1587 out_exit_discovery:
1588 	nvmet_exit_discovery();
1589 out_free_work_queue:
1590 	destroy_workqueue(buffered_io_wq);
1591 out:
1592 	return error;
1593 }
1594 
nvmet_exit(void)1595 static void __exit nvmet_exit(void)
1596 {
1597 	nvmet_exit_configfs();
1598 	nvmet_exit_discovery();
1599 	ida_destroy(&cntlid_ida);
1600 	destroy_workqueue(buffered_io_wq);
1601 
1602 	BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_entry) != 1024);
1603 	BUILD_BUG_ON(sizeof(struct nvmf_disc_rsp_page_hdr) != 1024);
1604 }
1605 
1606 module_init(nvmet_init);
1607 module_exit(nvmet_exit);
1608 
1609 MODULE_LICENSE("GPL v2");
1610 MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver);
1611