• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * NVMe over Fabrics loopback device.
4  * Copyright (c) 2015-2016 HGST, a Western Digital Company.
5  */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7 #include <linux/scatterlist.h>
8 #include <linux/blk-mq.h>
9 #include <linux/nvme.h>
10 #include <linux/module.h>
11 #include <linux/parser.h>
12 #include "nvmet.h"
13 #include "../host/nvme.h"
14 #include "../host/fabrics.h"
15 
16 #define NVME_LOOP_MAX_SEGMENTS		256
17 
18 struct nvme_loop_iod {
19 	struct nvme_request	nvme_req;
20 	struct nvme_command	cmd;
21 	struct nvme_completion	cqe;
22 	struct nvmet_req	req;
23 	struct nvme_loop_queue	*queue;
24 	struct work_struct	work;
25 	struct sg_table		sg_table;
26 	struct scatterlist	first_sgl[];
27 };
28 
29 struct nvme_loop_ctrl {
30 	struct nvme_loop_queue	*queues;
31 
32 	struct blk_mq_tag_set	admin_tag_set;
33 
34 	struct list_head	list;
35 	struct blk_mq_tag_set	tag_set;
36 	struct nvme_loop_iod	async_event_iod;
37 	struct nvme_ctrl	ctrl;
38 
39 	struct nvmet_ctrl	*target_ctrl;
40 	struct nvmet_port	*port;
41 };
42 
to_loop_ctrl(struct nvme_ctrl * ctrl)43 static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
44 {
45 	return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
46 }
47 
48 enum nvme_loop_queue_flags {
49 	NVME_LOOP_Q_LIVE	= 0,
50 };
51 
52 struct nvme_loop_queue {
53 	struct nvmet_cq		nvme_cq;
54 	struct nvmet_sq		nvme_sq;
55 	struct nvme_loop_ctrl	*ctrl;
56 	unsigned long		flags;
57 };
58 
59 static LIST_HEAD(nvme_loop_ports);
60 static DEFINE_MUTEX(nvme_loop_ports_mutex);
61 
62 static LIST_HEAD(nvme_loop_ctrl_list);
63 static DEFINE_MUTEX(nvme_loop_ctrl_mutex);
64 
65 static void nvme_loop_queue_response(struct nvmet_req *nvme_req);
66 static void nvme_loop_delete_ctrl(struct nvmet_ctrl *ctrl);
67 
68 static const struct nvmet_fabrics_ops nvme_loop_ops;
69 
nvme_loop_queue_idx(struct nvme_loop_queue * queue)70 static inline int nvme_loop_queue_idx(struct nvme_loop_queue *queue)
71 {
72 	return queue - queue->ctrl->queues;
73 }
74 
nvme_loop_complete_rq(struct request * req)75 static void nvme_loop_complete_rq(struct request *req)
76 {
77 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
78 
79 	nvme_cleanup_cmd(req);
80 	sg_free_table_chained(&iod->sg_table, SG_CHUNK_SIZE);
81 	nvme_complete_rq(req);
82 }
83 
nvme_loop_tagset(struct nvme_loop_queue * queue)84 static struct blk_mq_tags *nvme_loop_tagset(struct nvme_loop_queue *queue)
85 {
86 	u32 queue_idx = nvme_loop_queue_idx(queue);
87 
88 	if (queue_idx == 0)
89 		return queue->ctrl->admin_tag_set.tags[queue_idx];
90 	return queue->ctrl->tag_set.tags[queue_idx - 1];
91 }
92 
nvme_loop_queue_response(struct nvmet_req * req)93 static void nvme_loop_queue_response(struct nvmet_req *req)
94 {
95 	struct nvme_loop_queue *queue =
96 		container_of(req->sq, struct nvme_loop_queue, nvme_sq);
97 	struct nvme_completion *cqe = req->cqe;
98 
99 	/*
100 	 * AEN requests are special as they don't time out and can
101 	 * survive any kind of queue freeze and often don't respond to
102 	 * aborts.  We don't even bother to allocate a struct request
103 	 * for them but rather special case them here.
104 	 */
105 	if (unlikely(nvme_loop_queue_idx(queue) == 0 &&
106 			cqe->command_id >= NVME_AQ_BLK_MQ_DEPTH)) {
107 		nvme_complete_async_event(&queue->ctrl->ctrl, cqe->status,
108 				&cqe->result);
109 	} else {
110 		struct request *rq;
111 
112 		rq = blk_mq_tag_to_rq(nvme_loop_tagset(queue), cqe->command_id);
113 		if (!rq) {
114 			dev_err(queue->ctrl->ctrl.device,
115 				"tag 0x%x on queue %d not found\n",
116 				cqe->command_id, nvme_loop_queue_idx(queue));
117 			return;
118 		}
119 
120 		nvme_end_request(rq, cqe->status, cqe->result);
121 	}
122 }
123 
nvme_loop_execute_work(struct work_struct * work)124 static void nvme_loop_execute_work(struct work_struct *work)
125 {
126 	struct nvme_loop_iod *iod =
127 		container_of(work, struct nvme_loop_iod, work);
128 
129 	nvmet_req_execute(&iod->req);
130 }
131 
nvme_loop_queue_rq(struct blk_mq_hw_ctx * hctx,const struct blk_mq_queue_data * bd)132 static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
133 		const struct blk_mq_queue_data *bd)
134 {
135 	struct nvme_ns *ns = hctx->queue->queuedata;
136 	struct nvme_loop_queue *queue = hctx->driver_data;
137 	struct request *req = bd->rq;
138 	struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
139 	bool queue_ready = test_bit(NVME_LOOP_Q_LIVE, &queue->flags);
140 	blk_status_t ret;
141 
142 	if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
143 		return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
144 
145 	ret = nvme_setup_cmd(ns, req, &iod->cmd);
146 	if (ret)
147 		return ret;
148 
149 	blk_mq_start_request(req);
150 	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
151 	iod->req.port = queue->ctrl->port;
152 	if (!nvmet_req_init(&iod->req, &queue->nvme_cq,
153 			&queue->nvme_sq, &nvme_loop_ops))
154 		return BLK_STS_OK;
155 
156 	if (blk_rq_nr_phys_segments(req)) {
157 		iod->sg_table.sgl = iod->first_sgl;
158 		if (sg_alloc_table_chained(&iod->sg_table,
159 				blk_rq_nr_phys_segments(req),
160 				iod->sg_table.sgl, SG_CHUNK_SIZE)) {
161 			nvme_cleanup_cmd(req);
162 			return BLK_STS_RESOURCE;
163 		}
164 
165 		iod->req.sg = iod->sg_table.sgl;
166 		iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
167 		iod->req.transfer_len = blk_rq_payload_bytes(req);
168 	}
169 
170 	schedule_work(&iod->work);
171 	return BLK_STS_OK;
172 }
173 
nvme_loop_submit_async_event(struct nvme_ctrl * arg)174 static void nvme_loop_submit_async_event(struct nvme_ctrl *arg)
175 {
176 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(arg);
177 	struct nvme_loop_queue *queue = &ctrl->queues[0];
178 	struct nvme_loop_iod *iod = &ctrl->async_event_iod;
179 
180 	memset(&iod->cmd, 0, sizeof(iod->cmd));
181 	iod->cmd.common.opcode = nvme_admin_async_event;
182 	iod->cmd.common.command_id = NVME_AQ_BLK_MQ_DEPTH;
183 	iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
184 
185 	if (!nvmet_req_init(&iod->req, &queue->nvme_cq, &queue->nvme_sq,
186 			&nvme_loop_ops)) {
187 		dev_err(ctrl->ctrl.device, "failed async event work\n");
188 		return;
189 	}
190 
191 	schedule_work(&iod->work);
192 }
193 
nvme_loop_init_iod(struct nvme_loop_ctrl * ctrl,struct nvme_loop_iod * iod,unsigned int queue_idx)194 static int nvme_loop_init_iod(struct nvme_loop_ctrl *ctrl,
195 		struct nvme_loop_iod *iod, unsigned int queue_idx)
196 {
197 	iod->req.cmd = &iod->cmd;
198 	iod->req.cqe = &iod->cqe;
199 	iod->queue = &ctrl->queues[queue_idx];
200 	INIT_WORK(&iod->work, nvme_loop_execute_work);
201 	return 0;
202 }
203 
nvme_loop_init_request(struct blk_mq_tag_set * set,struct request * req,unsigned int hctx_idx,unsigned int numa_node)204 static int nvme_loop_init_request(struct blk_mq_tag_set *set,
205 		struct request *req, unsigned int hctx_idx,
206 		unsigned int numa_node)
207 {
208 	struct nvme_loop_ctrl *ctrl = set->driver_data;
209 
210 	nvme_req(req)->ctrl = &ctrl->ctrl;
211 	return nvme_loop_init_iod(ctrl, blk_mq_rq_to_pdu(req),
212 			(set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
213 }
214 
nvme_loop_init_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)215 static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
216 		unsigned int hctx_idx)
217 {
218 	struct nvme_loop_ctrl *ctrl = data;
219 	struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1];
220 
221 	BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
222 
223 	hctx->driver_data = queue;
224 	return 0;
225 }
226 
nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx * hctx,void * data,unsigned int hctx_idx)227 static int nvme_loop_init_admin_hctx(struct blk_mq_hw_ctx *hctx, void *data,
228 		unsigned int hctx_idx)
229 {
230 	struct nvme_loop_ctrl *ctrl = data;
231 	struct nvme_loop_queue *queue = &ctrl->queues[0];
232 
233 	BUG_ON(hctx_idx != 0);
234 
235 	hctx->driver_data = queue;
236 	return 0;
237 }
238 
239 static const struct blk_mq_ops nvme_loop_mq_ops = {
240 	.queue_rq	= nvme_loop_queue_rq,
241 	.complete	= nvme_loop_complete_rq,
242 	.init_request	= nvme_loop_init_request,
243 	.init_hctx	= nvme_loop_init_hctx,
244 };
245 
246 static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
247 	.queue_rq	= nvme_loop_queue_rq,
248 	.complete	= nvme_loop_complete_rq,
249 	.init_request	= nvme_loop_init_request,
250 	.init_hctx	= nvme_loop_init_admin_hctx,
251 };
252 
nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl * ctrl)253 static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
254 {
255 	if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags))
256 		return;
257 	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
258 	blk_cleanup_queue(ctrl->ctrl.admin_q);
259 	blk_cleanup_queue(ctrl->ctrl.fabrics_q);
260 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
261 }
262 
nvme_loop_free_ctrl(struct nvme_ctrl * nctrl)263 static void nvme_loop_free_ctrl(struct nvme_ctrl *nctrl)
264 {
265 	struct nvme_loop_ctrl *ctrl = to_loop_ctrl(nctrl);
266 
267 	if (list_empty(&ctrl->list))
268 		goto free_ctrl;
269 
270 	mutex_lock(&nvme_loop_ctrl_mutex);
271 	list_del(&ctrl->list);
272 	mutex_unlock(&nvme_loop_ctrl_mutex);
273 
274 	if (nctrl->tagset) {
275 		blk_cleanup_queue(ctrl->ctrl.connect_q);
276 		blk_mq_free_tag_set(&ctrl->tag_set);
277 	}
278 	kfree(ctrl->queues);
279 	nvmf_free_options(nctrl->opts);
280 free_ctrl:
281 	kfree(ctrl);
282 }
283 
nvme_loop_destroy_io_queues(struct nvme_loop_ctrl * ctrl)284 static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
285 {
286 	int i;
287 
288 	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
289 		clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
290 		nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
291 	}
292 	ctrl->ctrl.queue_count = 1;
293 }
294 
nvme_loop_init_io_queues(struct nvme_loop_ctrl * ctrl)295 static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
296 {
297 	struct nvmf_ctrl_options *opts = ctrl->ctrl.opts;
298 	unsigned int nr_io_queues;
299 	int ret, i;
300 
301 	nr_io_queues = min(opts->nr_io_queues, num_online_cpus());
302 	ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues);
303 	if (ret || !nr_io_queues)
304 		return ret;
305 
306 	dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues);
307 
308 	for (i = 1; i <= nr_io_queues; i++) {
309 		ctrl->queues[i].ctrl = ctrl;
310 		ret = nvmet_sq_init(&ctrl->queues[i].nvme_sq);
311 		if (ret)
312 			goto out_destroy_queues;
313 
314 		ctrl->ctrl.queue_count++;
315 	}
316 
317 	return 0;
318 
319 out_destroy_queues:
320 	nvme_loop_destroy_io_queues(ctrl);
321 	return ret;
322 }
323 
nvme_loop_connect_io_queues(struct nvme_loop_ctrl * ctrl)324 static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
325 {
326 	int i, ret;
327 
328 	for (i = 1; i < ctrl->ctrl.queue_count; i++) {
329 		ret = nvmf_connect_io_queue(&ctrl->ctrl, i, false);
330 		if (ret)
331 			return ret;
332 		set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
333 	}
334 
335 	return 0;
336 }
337 
nvme_loop_configure_admin_queue(struct nvme_loop_ctrl * ctrl)338 static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
339 {
340 	int error;
341 
342 	memset(&ctrl->admin_tag_set, 0, sizeof(ctrl->admin_tag_set));
343 	ctrl->admin_tag_set.ops = &nvme_loop_admin_mq_ops;
344 	ctrl->admin_tag_set.queue_depth = NVME_AQ_MQ_TAG_DEPTH;
345 	ctrl->admin_tag_set.reserved_tags = 2; /* connect + keep-alive */
346 	ctrl->admin_tag_set.numa_node = NUMA_NO_NODE;
347 	ctrl->admin_tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
348 		SG_CHUNK_SIZE * sizeof(struct scatterlist);
349 	ctrl->admin_tag_set.driver_data = ctrl;
350 	ctrl->admin_tag_set.nr_hw_queues = 1;
351 	ctrl->admin_tag_set.timeout = ADMIN_TIMEOUT;
352 	ctrl->admin_tag_set.flags = BLK_MQ_F_NO_SCHED;
353 
354 	ctrl->queues[0].ctrl = ctrl;
355 	error = nvmet_sq_init(&ctrl->queues[0].nvme_sq);
356 	if (error)
357 		return error;
358 	ctrl->ctrl.queue_count = 1;
359 
360 	error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set);
361 	if (error)
362 		goto out_free_sq;
363 	ctrl->ctrl.admin_tagset = &ctrl->admin_tag_set;
364 
365 	ctrl->ctrl.fabrics_q = blk_mq_init_queue(&ctrl->admin_tag_set);
366 	if (IS_ERR(ctrl->ctrl.fabrics_q)) {
367 		error = PTR_ERR(ctrl->ctrl.fabrics_q);
368 		goto out_free_tagset;
369 	}
370 
371 	ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
372 	if (IS_ERR(ctrl->ctrl.admin_q)) {
373 		error = PTR_ERR(ctrl->ctrl.admin_q);
374 		goto out_cleanup_fabrics_q;
375 	}
376 
377 	error = nvmf_connect_admin_queue(&ctrl->ctrl);
378 	if (error)
379 		goto out_cleanup_queue;
380 
381 	set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
382 
383 	error = nvme_enable_ctrl(&ctrl->ctrl);
384 	if (error)
385 		goto out_cleanup_queue;
386 
387 	ctrl->ctrl.max_hw_sectors =
388 		(NVME_LOOP_MAX_SEGMENTS - 1) << (PAGE_SHIFT - 9);
389 
390 	blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
391 
392 	error = nvme_init_identify(&ctrl->ctrl);
393 	if (error)
394 		goto out_cleanup_queue;
395 
396 	return 0;
397 
398 out_cleanup_queue:
399 	clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
400 	blk_cleanup_queue(ctrl->ctrl.admin_q);
401 out_cleanup_fabrics_q:
402 	blk_cleanup_queue(ctrl->ctrl.fabrics_q);
403 out_free_tagset:
404 	blk_mq_free_tag_set(&ctrl->admin_tag_set);
405 out_free_sq:
406 	nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
407 	return error;
408 }
409 
nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl * ctrl)410 static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl)
411 {
412 	if (ctrl->ctrl.queue_count > 1) {
413 		nvme_stop_queues(&ctrl->ctrl);
414 		blk_mq_tagset_busy_iter(&ctrl->tag_set,
415 					nvme_cancel_request, &ctrl->ctrl);
416 		blk_mq_tagset_wait_completed_request(&ctrl->tag_set);
417 		nvme_loop_destroy_io_queues(ctrl);
418 	}
419 
420 	blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
421 	if (ctrl->ctrl.state == NVME_CTRL_LIVE)
422 		nvme_shutdown_ctrl(&ctrl->ctrl);
423 
424 	blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
425 				nvme_cancel_request, &ctrl->ctrl);
426 	blk_mq_tagset_wait_completed_request(&ctrl->admin_tag_set);
427 	nvme_loop_destroy_admin_queue(ctrl);
428 }
429 
nvme_loop_delete_ctrl_host(struct nvme_ctrl * ctrl)430 static void nvme_loop_delete_ctrl_host(struct nvme_ctrl *ctrl)
431 {
432 	nvme_loop_shutdown_ctrl(to_loop_ctrl(ctrl));
433 }
434 
nvme_loop_delete_ctrl(struct nvmet_ctrl * nctrl)435 static void nvme_loop_delete_ctrl(struct nvmet_ctrl *nctrl)
436 {
437 	struct nvme_loop_ctrl *ctrl;
438 
439 	mutex_lock(&nvme_loop_ctrl_mutex);
440 	list_for_each_entry(ctrl, &nvme_loop_ctrl_list, list) {
441 		if (ctrl->ctrl.cntlid == nctrl->cntlid)
442 			nvme_delete_ctrl(&ctrl->ctrl);
443 	}
444 	mutex_unlock(&nvme_loop_ctrl_mutex);
445 }
446 
nvme_loop_reset_ctrl_work(struct work_struct * work)447 static void nvme_loop_reset_ctrl_work(struct work_struct *work)
448 {
449 	struct nvme_loop_ctrl *ctrl =
450 		container_of(work, struct nvme_loop_ctrl, ctrl.reset_work);
451 	bool changed;
452 	int ret;
453 
454 	nvme_stop_ctrl(&ctrl->ctrl);
455 	nvme_loop_shutdown_ctrl(ctrl);
456 
457 	if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
458 		/* state change failure should never happen */
459 		WARN_ON_ONCE(1);
460 		return;
461 	}
462 
463 	ret = nvme_loop_configure_admin_queue(ctrl);
464 	if (ret)
465 		goto out_disable;
466 
467 	ret = nvme_loop_init_io_queues(ctrl);
468 	if (ret)
469 		goto out_destroy_admin;
470 
471 	ret = nvme_loop_connect_io_queues(ctrl);
472 	if (ret)
473 		goto out_destroy_io;
474 
475 	blk_mq_update_nr_hw_queues(&ctrl->tag_set,
476 			ctrl->ctrl.queue_count - 1);
477 
478 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
479 	WARN_ON_ONCE(!changed);
480 
481 	nvme_start_ctrl(&ctrl->ctrl);
482 
483 	return;
484 
485 out_destroy_io:
486 	nvme_loop_destroy_io_queues(ctrl);
487 out_destroy_admin:
488 	nvme_loop_destroy_admin_queue(ctrl);
489 out_disable:
490 	dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
491 	nvme_uninit_ctrl(&ctrl->ctrl);
492 	nvme_put_ctrl(&ctrl->ctrl);
493 }
494 
495 static const struct nvme_ctrl_ops nvme_loop_ctrl_ops = {
496 	.name			= "loop",
497 	.module			= THIS_MODULE,
498 	.flags			= NVME_F_FABRICS,
499 	.reg_read32		= nvmf_reg_read32,
500 	.reg_read64		= nvmf_reg_read64,
501 	.reg_write32		= nvmf_reg_write32,
502 	.free_ctrl		= nvme_loop_free_ctrl,
503 	.submit_async_event	= nvme_loop_submit_async_event,
504 	.delete_ctrl		= nvme_loop_delete_ctrl_host,
505 	.get_address		= nvmf_get_address,
506 };
507 
nvme_loop_create_io_queues(struct nvme_loop_ctrl * ctrl)508 static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl)
509 {
510 	int ret;
511 
512 	ret = nvme_loop_init_io_queues(ctrl);
513 	if (ret)
514 		return ret;
515 
516 	memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set));
517 	ctrl->tag_set.ops = &nvme_loop_mq_ops;
518 	ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size;
519 	ctrl->tag_set.reserved_tags = 1; /* fabric connect */
520 	ctrl->tag_set.numa_node = NUMA_NO_NODE;
521 	ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
522 	ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) +
523 		SG_CHUNK_SIZE * sizeof(struct scatterlist);
524 	ctrl->tag_set.driver_data = ctrl;
525 	ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1;
526 	ctrl->tag_set.timeout = NVME_IO_TIMEOUT;
527 	ctrl->ctrl.tagset = &ctrl->tag_set;
528 
529 	ret = blk_mq_alloc_tag_set(&ctrl->tag_set);
530 	if (ret)
531 		goto out_destroy_queues;
532 
533 	ctrl->ctrl.connect_q = blk_mq_init_queue(&ctrl->tag_set);
534 	if (IS_ERR(ctrl->ctrl.connect_q)) {
535 		ret = PTR_ERR(ctrl->ctrl.connect_q);
536 		goto out_free_tagset;
537 	}
538 
539 	ret = nvme_loop_connect_io_queues(ctrl);
540 	if (ret)
541 		goto out_cleanup_connect_q;
542 
543 	return 0;
544 
545 out_cleanup_connect_q:
546 	blk_cleanup_queue(ctrl->ctrl.connect_q);
547 out_free_tagset:
548 	blk_mq_free_tag_set(&ctrl->tag_set);
549 out_destroy_queues:
550 	nvme_loop_destroy_io_queues(ctrl);
551 	return ret;
552 }
553 
nvme_loop_find_port(struct nvme_ctrl * ctrl)554 static struct nvmet_port *nvme_loop_find_port(struct nvme_ctrl *ctrl)
555 {
556 	struct nvmet_port *p, *found = NULL;
557 
558 	mutex_lock(&nvme_loop_ports_mutex);
559 	list_for_each_entry(p, &nvme_loop_ports, entry) {
560 		/* if no transport address is specified use the first port */
561 		if ((ctrl->opts->mask & NVMF_OPT_TRADDR) &&
562 		    strcmp(ctrl->opts->traddr, p->disc_addr.traddr))
563 			continue;
564 		found = p;
565 		break;
566 	}
567 	mutex_unlock(&nvme_loop_ports_mutex);
568 	return found;
569 }
570 
nvme_loop_create_ctrl(struct device * dev,struct nvmf_ctrl_options * opts)571 static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev,
572 		struct nvmf_ctrl_options *opts)
573 {
574 	struct nvme_loop_ctrl *ctrl;
575 	bool changed;
576 	int ret;
577 
578 	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
579 	if (!ctrl)
580 		return ERR_PTR(-ENOMEM);
581 	ctrl->ctrl.opts = opts;
582 	INIT_LIST_HEAD(&ctrl->list);
583 
584 	INIT_WORK(&ctrl->ctrl.reset_work, nvme_loop_reset_ctrl_work);
585 
586 	ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops,
587 				0 /* no quirks, we're perfect! */);
588 	if (ret)
589 		goto out_put_ctrl;
590 
591 	ret = -ENOMEM;
592 
593 	ctrl->ctrl.sqsize = opts->queue_size - 1;
594 	ctrl->ctrl.kato = opts->kato;
595 	ctrl->port = nvme_loop_find_port(&ctrl->ctrl);
596 
597 	ctrl->queues = kcalloc(opts->nr_io_queues + 1, sizeof(*ctrl->queues),
598 			GFP_KERNEL);
599 	if (!ctrl->queues)
600 		goto out_uninit_ctrl;
601 
602 	ret = nvme_loop_configure_admin_queue(ctrl);
603 	if (ret)
604 		goto out_free_queues;
605 
606 	if (opts->queue_size > ctrl->ctrl.maxcmd) {
607 		/* warn if maxcmd is lower than queue_size */
608 		dev_warn(ctrl->ctrl.device,
609 			"queue_size %zu > ctrl maxcmd %u, clamping down\n",
610 			opts->queue_size, ctrl->ctrl.maxcmd);
611 		opts->queue_size = ctrl->ctrl.maxcmd;
612 	}
613 
614 	if (opts->nr_io_queues) {
615 		ret = nvme_loop_create_io_queues(ctrl);
616 		if (ret)
617 			goto out_remove_admin_queue;
618 	}
619 
620 	nvme_loop_init_iod(ctrl, &ctrl->async_event_iod, 0);
621 
622 	dev_info(ctrl->ctrl.device,
623 		 "new ctrl: \"%s\"\n", ctrl->ctrl.opts->subsysnqn);
624 
625 	changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE);
626 	WARN_ON_ONCE(!changed);
627 
628 	mutex_lock(&nvme_loop_ctrl_mutex);
629 	list_add_tail(&ctrl->list, &nvme_loop_ctrl_list);
630 	mutex_unlock(&nvme_loop_ctrl_mutex);
631 
632 	nvme_start_ctrl(&ctrl->ctrl);
633 
634 	return &ctrl->ctrl;
635 
636 out_remove_admin_queue:
637 	nvme_loop_destroy_admin_queue(ctrl);
638 out_free_queues:
639 	kfree(ctrl->queues);
640 out_uninit_ctrl:
641 	nvme_uninit_ctrl(&ctrl->ctrl);
642 	nvme_put_ctrl(&ctrl->ctrl);
643 out_put_ctrl:
644 	nvme_put_ctrl(&ctrl->ctrl);
645 	if (ret > 0)
646 		ret = -EIO;
647 	return ERR_PTR(ret);
648 }
649 
nvme_loop_add_port(struct nvmet_port * port)650 static int nvme_loop_add_port(struct nvmet_port *port)
651 {
652 	mutex_lock(&nvme_loop_ports_mutex);
653 	list_add_tail(&port->entry, &nvme_loop_ports);
654 	mutex_unlock(&nvme_loop_ports_mutex);
655 	return 0;
656 }
657 
nvme_loop_remove_port(struct nvmet_port * port)658 static void nvme_loop_remove_port(struct nvmet_port *port)
659 {
660 	mutex_lock(&nvme_loop_ports_mutex);
661 	list_del_init(&port->entry);
662 	mutex_unlock(&nvme_loop_ports_mutex);
663 
664 	/*
665 	 * Ensure any ctrls that are in the process of being
666 	 * deleted are in fact deleted before we return
667 	 * and free the port. This is to prevent active
668 	 * ctrls from using a port after it's freed.
669 	 */
670 	flush_workqueue(nvme_delete_wq);
671 }
672 
673 static const struct nvmet_fabrics_ops nvme_loop_ops = {
674 	.owner		= THIS_MODULE,
675 	.type		= NVMF_TRTYPE_LOOP,
676 	.add_port	= nvme_loop_add_port,
677 	.remove_port	= nvme_loop_remove_port,
678 	.queue_response = nvme_loop_queue_response,
679 	.delete_ctrl	= nvme_loop_delete_ctrl,
680 };
681 
682 static struct nvmf_transport_ops nvme_loop_transport = {
683 	.name		= "loop",
684 	.module		= THIS_MODULE,
685 	.create_ctrl	= nvme_loop_create_ctrl,
686 	.allowed_opts	= NVMF_OPT_TRADDR,
687 };
688 
nvme_loop_init_module(void)689 static int __init nvme_loop_init_module(void)
690 {
691 	int ret;
692 
693 	ret = nvmet_register_transport(&nvme_loop_ops);
694 	if (ret)
695 		return ret;
696 
697 	ret = nvmf_register_transport(&nvme_loop_transport);
698 	if (ret)
699 		nvmet_unregister_transport(&nvme_loop_ops);
700 
701 	return ret;
702 }
703 
nvme_loop_cleanup_module(void)704 static void __exit nvme_loop_cleanup_module(void)
705 {
706 	struct nvme_loop_ctrl *ctrl, *next;
707 
708 	nvmf_unregister_transport(&nvme_loop_transport);
709 	nvmet_unregister_transport(&nvme_loop_ops);
710 
711 	mutex_lock(&nvme_loop_ctrl_mutex);
712 	list_for_each_entry_safe(ctrl, next, &nvme_loop_ctrl_list, list)
713 		nvme_delete_ctrl(&ctrl->ctrl);
714 	mutex_unlock(&nvme_loop_ctrl_mutex);
715 
716 	flush_workqueue(nvme_delete_wq);
717 }
718 
719 module_init(nvme_loop_init_module);
720 module_exit(nvme_loop_cleanup_module);
721 
722 MODULE_LICENSE("GPL v2");
723 MODULE_ALIAS("nvmet-transport-254"); /* 254 == NVMF_TRTYPE_LOOP */
724