• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2016 Avago Technologies.  All rights reserved.
4  */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
9 
10 #include "../host/nvme.h"
11 #include "../target/nvmet.h"
12 #include <linux/nvme-fc-driver.h>
13 #include <linux/nvme-fc.h>
14 
15 
16 enum {
17 	NVMF_OPT_ERR		= 0,
18 	NVMF_OPT_WWNN		= 1 << 0,
19 	NVMF_OPT_WWPN		= 1 << 1,
20 	NVMF_OPT_ROLES		= 1 << 2,
21 	NVMF_OPT_FCADDR		= 1 << 3,
22 	NVMF_OPT_LPWWNN		= 1 << 4,
23 	NVMF_OPT_LPWWPN		= 1 << 5,
24 };
25 
26 struct fcloop_ctrl_options {
27 	int			mask;
28 	u64			wwnn;
29 	u64			wwpn;
30 	u32			roles;
31 	u32			fcaddr;
32 	u64			lpwwnn;
33 	u64			lpwwpn;
34 };
35 
36 static const match_table_t opt_tokens = {
37 	{ NVMF_OPT_WWNN,	"wwnn=%s"	},
38 	{ NVMF_OPT_WWPN,	"wwpn=%s"	},
39 	{ NVMF_OPT_ROLES,	"roles=%d"	},
40 	{ NVMF_OPT_FCADDR,	"fcaddr=%x"	},
41 	{ NVMF_OPT_LPWWNN,	"lpwwnn=%s"	},
42 	{ NVMF_OPT_LPWWPN,	"lpwwpn=%s"	},
43 	{ NVMF_OPT_ERR,		NULL		}
44 };
45 
46 static int
fcloop_parse_options(struct fcloop_ctrl_options * opts,const char * buf)47 fcloop_parse_options(struct fcloop_ctrl_options *opts,
48 		const char *buf)
49 {
50 	substring_t args[MAX_OPT_ARGS];
51 	char *options, *o, *p;
52 	int token, ret = 0;
53 	u64 token64;
54 
55 	options = o = kstrdup(buf, GFP_KERNEL);
56 	if (!options)
57 		return -ENOMEM;
58 
59 	while ((p = strsep(&o, ",\n")) != NULL) {
60 		if (!*p)
61 			continue;
62 
63 		token = match_token(p, opt_tokens, args);
64 		opts->mask |= token;
65 		switch (token) {
66 		case NVMF_OPT_WWNN:
67 			if (match_u64(args, &token64)) {
68 				ret = -EINVAL;
69 				goto out_free_options;
70 			}
71 			opts->wwnn = token64;
72 			break;
73 		case NVMF_OPT_WWPN:
74 			if (match_u64(args, &token64)) {
75 				ret = -EINVAL;
76 				goto out_free_options;
77 			}
78 			opts->wwpn = token64;
79 			break;
80 		case NVMF_OPT_ROLES:
81 			if (match_int(args, &token)) {
82 				ret = -EINVAL;
83 				goto out_free_options;
84 			}
85 			opts->roles = token;
86 			break;
87 		case NVMF_OPT_FCADDR:
88 			if (match_hex(args, &token)) {
89 				ret = -EINVAL;
90 				goto out_free_options;
91 			}
92 			opts->fcaddr = token;
93 			break;
94 		case NVMF_OPT_LPWWNN:
95 			if (match_u64(args, &token64)) {
96 				ret = -EINVAL;
97 				goto out_free_options;
98 			}
99 			opts->lpwwnn = token64;
100 			break;
101 		case NVMF_OPT_LPWWPN:
102 			if (match_u64(args, &token64)) {
103 				ret = -EINVAL;
104 				goto out_free_options;
105 			}
106 			opts->lpwwpn = token64;
107 			break;
108 		default:
109 			pr_warn("unknown parameter or missing value '%s'\n", p);
110 			ret = -EINVAL;
111 			goto out_free_options;
112 		}
113 	}
114 
115 out_free_options:
116 	kfree(options);
117 	return ret;
118 }
119 
120 
121 static int
fcloop_parse_nm_options(struct device * dev,u64 * nname,u64 * pname,const char * buf)122 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
123 		const char *buf)
124 {
125 	substring_t args[MAX_OPT_ARGS];
126 	char *options, *o, *p;
127 	int token, ret = 0;
128 	u64 token64;
129 
130 	*nname = -1;
131 	*pname = -1;
132 
133 	options = o = kstrdup(buf, GFP_KERNEL);
134 	if (!options)
135 		return -ENOMEM;
136 
137 	while ((p = strsep(&o, ",\n")) != NULL) {
138 		if (!*p)
139 			continue;
140 
141 		token = match_token(p, opt_tokens, args);
142 		switch (token) {
143 		case NVMF_OPT_WWNN:
144 			if (match_u64(args, &token64)) {
145 				ret = -EINVAL;
146 				goto out_free_options;
147 			}
148 			*nname = token64;
149 			break;
150 		case NVMF_OPT_WWPN:
151 			if (match_u64(args, &token64)) {
152 				ret = -EINVAL;
153 				goto out_free_options;
154 			}
155 			*pname = token64;
156 			break;
157 		default:
158 			pr_warn("unknown parameter or missing value '%s'\n", p);
159 			ret = -EINVAL;
160 			goto out_free_options;
161 		}
162 	}
163 
164 out_free_options:
165 	kfree(options);
166 
167 	if (!ret) {
168 		if (*nname == -1)
169 			return -EINVAL;
170 		if (*pname == -1)
171 			return -EINVAL;
172 	}
173 
174 	return ret;
175 }
176 
177 
178 #define LPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN)
179 
180 #define RPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN |  \
181 			 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
182 
183 #define TGTPORT_OPTS	(NVMF_OPT_WWNN | NVMF_OPT_WWPN)
184 
185 
186 static DEFINE_SPINLOCK(fcloop_lock);
187 static LIST_HEAD(fcloop_lports);
188 static LIST_HEAD(fcloop_nports);
189 
190 struct fcloop_lport {
191 	struct nvme_fc_local_port *localport;
192 	struct list_head lport_list;
193 	struct completion unreg_done;
194 };
195 
196 struct fcloop_lport_priv {
197 	struct fcloop_lport *lport;
198 };
199 
200 struct fcloop_rport {
201 	struct nvme_fc_remote_port *remoteport;
202 	struct nvmet_fc_target_port *targetport;
203 	struct fcloop_nport *nport;
204 	struct fcloop_lport *lport;
205 };
206 
207 struct fcloop_tport {
208 	struct nvmet_fc_target_port *targetport;
209 	struct nvme_fc_remote_port *remoteport;
210 	struct fcloop_nport *nport;
211 	struct fcloop_lport *lport;
212 };
213 
214 struct fcloop_nport {
215 	struct fcloop_rport *rport;
216 	struct fcloop_tport *tport;
217 	struct fcloop_lport *lport;
218 	struct list_head nport_list;
219 	struct kref ref;
220 	u64 node_name;
221 	u64 port_name;
222 	u32 port_role;
223 	u32 port_id;
224 };
225 
226 struct fcloop_lsreq {
227 	struct fcloop_tport		*tport;
228 	struct nvmefc_ls_req		*lsreq;
229 	struct work_struct		work;
230 	struct nvmefc_tgt_ls_req	tgt_ls_req;
231 	int				status;
232 };
233 
234 struct fcloop_rscn {
235 	struct fcloop_tport		*tport;
236 	struct work_struct		work;
237 };
238 
239 enum {
240 	INI_IO_START		= 0,
241 	INI_IO_ACTIVE		= 1,
242 	INI_IO_ABORTED		= 2,
243 	INI_IO_COMPLETED	= 3,
244 };
245 
246 struct fcloop_fcpreq {
247 	struct fcloop_tport		*tport;
248 	struct nvmefc_fcp_req		*fcpreq;
249 	spinlock_t			reqlock;
250 	u16				status;
251 	u32				inistate;
252 	bool				active;
253 	bool				aborted;
254 	struct kref			ref;
255 	struct work_struct		fcp_rcv_work;
256 	struct work_struct		abort_rcv_work;
257 	struct work_struct		tio_done_work;
258 	struct nvmefc_tgt_fcp_req	tgt_fcp_req;
259 };
260 
261 struct fcloop_ini_fcpreq {
262 	struct nvmefc_fcp_req		*fcpreq;
263 	struct fcloop_fcpreq		*tfcp_req;
264 	spinlock_t			inilock;
265 };
266 
267 static inline struct fcloop_lsreq *
tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req * tgt_lsreq)268 tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
269 {
270 	return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
271 }
272 
273 static inline struct fcloop_fcpreq *
tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req * tgt_fcpreq)274 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
275 {
276 	return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
277 }
278 
279 
280 static int
fcloop_create_queue(struct nvme_fc_local_port * localport,unsigned int qidx,u16 qsize,void ** handle)281 fcloop_create_queue(struct nvme_fc_local_port *localport,
282 			unsigned int qidx, u16 qsize,
283 			void **handle)
284 {
285 	*handle = localport;
286 	return 0;
287 }
288 
289 static void
fcloop_delete_queue(struct nvme_fc_local_port * localport,unsigned int idx,void * handle)290 fcloop_delete_queue(struct nvme_fc_local_port *localport,
291 			unsigned int idx, void *handle)
292 {
293 }
294 
295 
296 /*
297  * Transmit of LS RSP done (e.g. buffers all set). call back up
298  * initiator "done" flows.
299  */
300 static void
fcloop_tgt_lsrqst_done_work(struct work_struct * work)301 fcloop_tgt_lsrqst_done_work(struct work_struct *work)
302 {
303 	struct fcloop_lsreq *tls_req =
304 		container_of(work, struct fcloop_lsreq, work);
305 	struct fcloop_tport *tport = tls_req->tport;
306 	struct nvmefc_ls_req *lsreq = tls_req->lsreq;
307 
308 	if (!tport || tport->remoteport)
309 		lsreq->done(lsreq, tls_req->status);
310 }
311 
312 static int
fcloop_ls_req(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,struct nvmefc_ls_req * lsreq)313 fcloop_ls_req(struct nvme_fc_local_port *localport,
314 			struct nvme_fc_remote_port *remoteport,
315 			struct nvmefc_ls_req *lsreq)
316 {
317 	struct fcloop_lsreq *tls_req = lsreq->private;
318 	struct fcloop_rport *rport = remoteport->private;
319 	int ret = 0;
320 
321 	tls_req->lsreq = lsreq;
322 	INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
323 
324 	if (!rport->targetport) {
325 		tls_req->status = -ECONNREFUSED;
326 		tls_req->tport = NULL;
327 		schedule_work(&tls_req->work);
328 		return ret;
329 	}
330 
331 	tls_req->status = 0;
332 	tls_req->tport = rport->targetport->private;
333 	ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
334 				 lsreq->rqstaddr, lsreq->rqstlen);
335 
336 	return ret;
337 }
338 
339 static int
fcloop_xmt_ls_rsp(struct nvmet_fc_target_port * tport,struct nvmefc_tgt_ls_req * tgt_lsreq)340 fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
341 			struct nvmefc_tgt_ls_req *tgt_lsreq)
342 {
343 	struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
344 	struct nvmefc_ls_req *lsreq = tls_req->lsreq;
345 
346 	memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
347 		((lsreq->rsplen < tgt_lsreq->rsplen) ?
348 				lsreq->rsplen : tgt_lsreq->rsplen));
349 	tgt_lsreq->done(tgt_lsreq);
350 
351 	schedule_work(&tls_req->work);
352 
353 	return 0;
354 }
355 
356 /*
357  * Simulate reception of RSCN and converting it to a initiator transport
358  * call to rescan a remote port.
359  */
360 static void
fcloop_tgt_rscn_work(struct work_struct * work)361 fcloop_tgt_rscn_work(struct work_struct *work)
362 {
363 	struct fcloop_rscn *tgt_rscn =
364 		container_of(work, struct fcloop_rscn, work);
365 	struct fcloop_tport *tport = tgt_rscn->tport;
366 
367 	if (tport->remoteport)
368 		nvme_fc_rescan_remoteport(tport->remoteport);
369 	kfree(tgt_rscn);
370 }
371 
372 static void
fcloop_tgt_discovery_evt(struct nvmet_fc_target_port * tgtport)373 fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
374 {
375 	struct fcloop_rscn *tgt_rscn;
376 
377 	tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL);
378 	if (!tgt_rscn)
379 		return;
380 
381 	tgt_rscn->tport = tgtport->private;
382 	INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
383 
384 	schedule_work(&tgt_rscn->work);
385 }
386 
387 static void
fcloop_tfcp_req_free(struct kref * ref)388 fcloop_tfcp_req_free(struct kref *ref)
389 {
390 	struct fcloop_fcpreq *tfcp_req =
391 		container_of(ref, struct fcloop_fcpreq, ref);
392 
393 	kfree(tfcp_req);
394 }
395 
396 static void
fcloop_tfcp_req_put(struct fcloop_fcpreq * tfcp_req)397 fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
398 {
399 	kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
400 }
401 
402 static int
fcloop_tfcp_req_get(struct fcloop_fcpreq * tfcp_req)403 fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
404 {
405 	return kref_get_unless_zero(&tfcp_req->ref);
406 }
407 
408 static void
fcloop_call_host_done(struct nvmefc_fcp_req * fcpreq,struct fcloop_fcpreq * tfcp_req,int status)409 fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
410 			struct fcloop_fcpreq *tfcp_req, int status)
411 {
412 	struct fcloop_ini_fcpreq *inireq = NULL;
413 
414 	if (fcpreq) {
415 		inireq = fcpreq->private;
416 		spin_lock(&inireq->inilock);
417 		inireq->tfcp_req = NULL;
418 		spin_unlock(&inireq->inilock);
419 
420 		fcpreq->status = status;
421 		fcpreq->done(fcpreq);
422 	}
423 
424 	/* release original io reference on tgt struct */
425 	fcloop_tfcp_req_put(tfcp_req);
426 }
427 
428 static void
fcloop_fcp_recv_work(struct work_struct * work)429 fcloop_fcp_recv_work(struct work_struct *work)
430 {
431 	struct fcloop_fcpreq *tfcp_req =
432 		container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
433 	struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
434 	unsigned long flags;
435 	int ret = 0;
436 	bool aborted = false;
437 
438 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
439 	switch (tfcp_req->inistate) {
440 	case INI_IO_START:
441 		tfcp_req->inistate = INI_IO_ACTIVE;
442 		break;
443 	case INI_IO_ABORTED:
444 		aborted = true;
445 		break;
446 	default:
447 		spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
448 		WARN_ON(1);
449 		return;
450 	}
451 	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
452 
453 	if (unlikely(aborted))
454 		ret = -ECANCELED;
455 	else
456 		ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
457 				&tfcp_req->tgt_fcp_req,
458 				fcpreq->cmdaddr, fcpreq->cmdlen);
459 	if (ret)
460 		fcloop_call_host_done(fcpreq, tfcp_req, ret);
461 
462 	return;
463 }
464 
465 static void
fcloop_fcp_abort_recv_work(struct work_struct * work)466 fcloop_fcp_abort_recv_work(struct work_struct *work)
467 {
468 	struct fcloop_fcpreq *tfcp_req =
469 		container_of(work, struct fcloop_fcpreq, abort_rcv_work);
470 	struct nvmefc_fcp_req *fcpreq;
471 	bool completed = false;
472 	unsigned long flags;
473 
474 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
475 	fcpreq = tfcp_req->fcpreq;
476 	switch (tfcp_req->inistate) {
477 	case INI_IO_ABORTED:
478 		break;
479 	case INI_IO_COMPLETED:
480 		completed = true;
481 		break;
482 	default:
483 		spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
484 		WARN_ON(1);
485 		return;
486 	}
487 	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
488 
489 	if (unlikely(completed)) {
490 		/* remove reference taken in original abort downcall */
491 		fcloop_tfcp_req_put(tfcp_req);
492 		return;
493 	}
494 
495 	if (tfcp_req->tport->targetport)
496 		nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
497 					&tfcp_req->tgt_fcp_req);
498 
499 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
500 	tfcp_req->fcpreq = NULL;
501 	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
502 
503 	fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
504 	/* call_host_done releases reference for abort downcall */
505 }
506 
507 /*
508  * FCP IO operation done by target completion.
509  * call back up initiator "done" flows.
510  */
511 static void
fcloop_tgt_fcprqst_done_work(struct work_struct * work)512 fcloop_tgt_fcprqst_done_work(struct work_struct *work)
513 {
514 	struct fcloop_fcpreq *tfcp_req =
515 		container_of(work, struct fcloop_fcpreq, tio_done_work);
516 	struct nvmefc_fcp_req *fcpreq;
517 	unsigned long flags;
518 
519 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
520 	fcpreq = tfcp_req->fcpreq;
521 	tfcp_req->inistate = INI_IO_COMPLETED;
522 	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
523 
524 	fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
525 }
526 
527 
528 static int
fcloop_fcp_req(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,void * hw_queue_handle,struct nvmefc_fcp_req * fcpreq)529 fcloop_fcp_req(struct nvme_fc_local_port *localport,
530 			struct nvme_fc_remote_port *remoteport,
531 			void *hw_queue_handle,
532 			struct nvmefc_fcp_req *fcpreq)
533 {
534 	struct fcloop_rport *rport = remoteport->private;
535 	struct fcloop_ini_fcpreq *inireq = fcpreq->private;
536 	struct fcloop_fcpreq *tfcp_req;
537 
538 	if (!rport->targetport)
539 		return -ECONNREFUSED;
540 
541 	tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC);
542 	if (!tfcp_req)
543 		return -ENOMEM;
544 
545 	inireq->fcpreq = fcpreq;
546 	inireq->tfcp_req = tfcp_req;
547 	spin_lock_init(&inireq->inilock);
548 
549 	tfcp_req->fcpreq = fcpreq;
550 	tfcp_req->tport = rport->targetport->private;
551 	tfcp_req->inistate = INI_IO_START;
552 	spin_lock_init(&tfcp_req->reqlock);
553 	INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
554 	INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
555 	INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
556 	kref_init(&tfcp_req->ref);
557 
558 	schedule_work(&tfcp_req->fcp_rcv_work);
559 
560 	return 0;
561 }
562 
563 static void
fcloop_fcp_copy_data(u8 op,struct scatterlist * data_sg,struct scatterlist * io_sg,u32 offset,u32 length)564 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
565 			struct scatterlist *io_sg, u32 offset, u32 length)
566 {
567 	void *data_p, *io_p;
568 	u32 data_len, io_len, tlen;
569 
570 	io_p = sg_virt(io_sg);
571 	io_len = io_sg->length;
572 
573 	for ( ; offset; ) {
574 		tlen = min_t(u32, offset, io_len);
575 		offset -= tlen;
576 		io_len -= tlen;
577 		if (!io_len) {
578 			io_sg = sg_next(io_sg);
579 			io_p = sg_virt(io_sg);
580 			io_len = io_sg->length;
581 		} else
582 			io_p += tlen;
583 	}
584 
585 	data_p = sg_virt(data_sg);
586 	data_len = data_sg->length;
587 
588 	for ( ; length; ) {
589 		tlen = min_t(u32, io_len, data_len);
590 		tlen = min_t(u32, tlen, length);
591 
592 		if (op == NVMET_FCOP_WRITEDATA)
593 			memcpy(data_p, io_p, tlen);
594 		else
595 			memcpy(io_p, data_p, tlen);
596 
597 		length -= tlen;
598 
599 		io_len -= tlen;
600 		if ((!io_len) && (length)) {
601 			io_sg = sg_next(io_sg);
602 			io_p = sg_virt(io_sg);
603 			io_len = io_sg->length;
604 		} else
605 			io_p += tlen;
606 
607 		data_len -= tlen;
608 		if ((!data_len) && (length)) {
609 			data_sg = sg_next(data_sg);
610 			data_p = sg_virt(data_sg);
611 			data_len = data_sg->length;
612 		} else
613 			data_p += tlen;
614 	}
615 }
616 
617 static int
fcloop_fcp_op(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)618 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
619 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
620 {
621 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
622 	struct nvmefc_fcp_req *fcpreq;
623 	u32 rsplen = 0, xfrlen = 0;
624 	int fcp_err = 0, active, aborted;
625 	u8 op = tgt_fcpreq->op;
626 	unsigned long flags;
627 
628 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
629 	fcpreq = tfcp_req->fcpreq;
630 	active = tfcp_req->active;
631 	aborted = tfcp_req->aborted;
632 	tfcp_req->active = true;
633 	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
634 
635 	if (unlikely(active))
636 		/* illegal - call while i/o active */
637 		return -EALREADY;
638 
639 	if (unlikely(aborted)) {
640 		/* target transport has aborted i/o prior */
641 		spin_lock_irqsave(&tfcp_req->reqlock, flags);
642 		tfcp_req->active = false;
643 		spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
644 		tgt_fcpreq->transferred_length = 0;
645 		tgt_fcpreq->fcp_error = -ECANCELED;
646 		tgt_fcpreq->done(tgt_fcpreq);
647 		return 0;
648 	}
649 
650 	/*
651 	 * if fcpreq is NULL, the I/O has been aborted (from
652 	 * initiator side). For the target side, act as if all is well
653 	 * but don't actually move data.
654 	 */
655 
656 	switch (op) {
657 	case NVMET_FCOP_WRITEDATA:
658 		xfrlen = tgt_fcpreq->transfer_length;
659 		if (fcpreq) {
660 			fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
661 					fcpreq->first_sgl, tgt_fcpreq->offset,
662 					xfrlen);
663 			fcpreq->transferred_length += xfrlen;
664 		}
665 		break;
666 
667 	case NVMET_FCOP_READDATA:
668 	case NVMET_FCOP_READDATA_RSP:
669 		xfrlen = tgt_fcpreq->transfer_length;
670 		if (fcpreq) {
671 			fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
672 					fcpreq->first_sgl, tgt_fcpreq->offset,
673 					xfrlen);
674 			fcpreq->transferred_length += xfrlen;
675 		}
676 		if (op == NVMET_FCOP_READDATA)
677 			break;
678 
679 		/* Fall-Thru to RSP handling */
680 		/* FALLTHRU */
681 
682 	case NVMET_FCOP_RSP:
683 		if (fcpreq) {
684 			rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
685 					fcpreq->rsplen : tgt_fcpreq->rsplen);
686 			memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
687 			if (rsplen < tgt_fcpreq->rsplen)
688 				fcp_err = -E2BIG;
689 			fcpreq->rcv_rsplen = rsplen;
690 			fcpreq->status = 0;
691 		}
692 		tfcp_req->status = 0;
693 		break;
694 
695 	default:
696 		fcp_err = -EINVAL;
697 		break;
698 	}
699 
700 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
701 	tfcp_req->active = false;
702 	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
703 
704 	tgt_fcpreq->transferred_length = xfrlen;
705 	tgt_fcpreq->fcp_error = fcp_err;
706 	tgt_fcpreq->done(tgt_fcpreq);
707 
708 	return 0;
709 }
710 
711 static void
fcloop_tgt_fcp_abort(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)712 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
713 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
714 {
715 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
716 	unsigned long flags;
717 
718 	/*
719 	 * mark aborted only in case there were 2 threads in transport
720 	 * (one doing io, other doing abort) and only kills ops posted
721 	 * after the abort request
722 	 */
723 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
724 	tfcp_req->aborted = true;
725 	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
726 
727 	tfcp_req->status = NVME_SC_INTERNAL;
728 
729 	/*
730 	 * nothing more to do. If io wasn't active, the transport should
731 	 * immediately call the req_release. If it was active, the op
732 	 * will complete, and the lldd should call req_release.
733 	 */
734 }
735 
736 static void
fcloop_fcp_req_release(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)737 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
738 			struct nvmefc_tgt_fcp_req *tgt_fcpreq)
739 {
740 	struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
741 
742 	schedule_work(&tfcp_req->tio_done_work);
743 }
744 
745 static void
fcloop_ls_abort(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,struct nvmefc_ls_req * lsreq)746 fcloop_ls_abort(struct nvme_fc_local_port *localport,
747 			struct nvme_fc_remote_port *remoteport,
748 				struct nvmefc_ls_req *lsreq)
749 {
750 }
751 
752 static void
fcloop_fcp_abort(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,void * hw_queue_handle,struct nvmefc_fcp_req * fcpreq)753 fcloop_fcp_abort(struct nvme_fc_local_port *localport,
754 			struct nvme_fc_remote_port *remoteport,
755 			void *hw_queue_handle,
756 			struct nvmefc_fcp_req *fcpreq)
757 {
758 	struct fcloop_ini_fcpreq *inireq = fcpreq->private;
759 	struct fcloop_fcpreq *tfcp_req;
760 	bool abortio = true;
761 	unsigned long flags;
762 
763 	spin_lock(&inireq->inilock);
764 	tfcp_req = inireq->tfcp_req;
765 	if (tfcp_req)
766 		fcloop_tfcp_req_get(tfcp_req);
767 	spin_unlock(&inireq->inilock);
768 
769 	if (!tfcp_req)
770 		/* abort has already been called */
771 		return;
772 
773 	/* break initiator/target relationship for io */
774 	spin_lock_irqsave(&tfcp_req->reqlock, flags);
775 	switch (tfcp_req->inistate) {
776 	case INI_IO_START:
777 	case INI_IO_ACTIVE:
778 		tfcp_req->inistate = INI_IO_ABORTED;
779 		break;
780 	case INI_IO_COMPLETED:
781 		abortio = false;
782 		break;
783 	default:
784 		spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
785 		WARN_ON(1);
786 		return;
787 	}
788 	spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
789 
790 	if (abortio)
791 		/* leave the reference while the work item is scheduled */
792 		WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
793 	else  {
794 		/*
795 		 * as the io has already had the done callback made,
796 		 * nothing more to do. So release the reference taken above
797 		 */
798 		fcloop_tfcp_req_put(tfcp_req);
799 	}
800 }
801 
802 static void
fcloop_nport_free(struct kref * ref)803 fcloop_nport_free(struct kref *ref)
804 {
805 	struct fcloop_nport *nport =
806 		container_of(ref, struct fcloop_nport, ref);
807 	unsigned long flags;
808 
809 	spin_lock_irqsave(&fcloop_lock, flags);
810 	list_del(&nport->nport_list);
811 	spin_unlock_irqrestore(&fcloop_lock, flags);
812 
813 	kfree(nport);
814 }
815 
816 static void
fcloop_nport_put(struct fcloop_nport * nport)817 fcloop_nport_put(struct fcloop_nport *nport)
818 {
819 	kref_put(&nport->ref, fcloop_nport_free);
820 }
821 
822 static int
fcloop_nport_get(struct fcloop_nport * nport)823 fcloop_nport_get(struct fcloop_nport *nport)
824 {
825 	return kref_get_unless_zero(&nport->ref);
826 }
827 
828 static void
fcloop_localport_delete(struct nvme_fc_local_port * localport)829 fcloop_localport_delete(struct nvme_fc_local_port *localport)
830 {
831 	struct fcloop_lport_priv *lport_priv = localport->private;
832 	struct fcloop_lport *lport = lport_priv->lport;
833 
834 	/* release any threads waiting for the unreg to complete */
835 	complete(&lport->unreg_done);
836 }
837 
838 static void
fcloop_remoteport_delete(struct nvme_fc_remote_port * remoteport)839 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
840 {
841 	struct fcloop_rport *rport = remoteport->private;
842 
843 	fcloop_nport_put(rport->nport);
844 }
845 
846 static void
fcloop_targetport_delete(struct nvmet_fc_target_port * targetport)847 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
848 {
849 	struct fcloop_tport *tport = targetport->private;
850 
851 	fcloop_nport_put(tport->nport);
852 }
853 
854 #define	FCLOOP_HW_QUEUES		4
855 #define	FCLOOP_SGL_SEGS			256
856 #define FCLOOP_DMABOUND_4G		0xFFFFFFFF
857 
858 static struct nvme_fc_port_template fctemplate = {
859 	.localport_delete	= fcloop_localport_delete,
860 	.remoteport_delete	= fcloop_remoteport_delete,
861 	.create_queue		= fcloop_create_queue,
862 	.delete_queue		= fcloop_delete_queue,
863 	.ls_req			= fcloop_ls_req,
864 	.fcp_io			= fcloop_fcp_req,
865 	.ls_abort		= fcloop_ls_abort,
866 	.fcp_abort		= fcloop_fcp_abort,
867 	.max_hw_queues		= FCLOOP_HW_QUEUES,
868 	.max_sgl_segments	= FCLOOP_SGL_SEGS,
869 	.max_dif_sgl_segments	= FCLOOP_SGL_SEGS,
870 	.dma_boundary		= FCLOOP_DMABOUND_4G,
871 	/* sizes of additional private data for data structures */
872 	.local_priv_sz		= sizeof(struct fcloop_lport_priv),
873 	.remote_priv_sz		= sizeof(struct fcloop_rport),
874 	.lsrqst_priv_sz		= sizeof(struct fcloop_lsreq),
875 	.fcprqst_priv_sz	= sizeof(struct fcloop_ini_fcpreq),
876 };
877 
878 static struct nvmet_fc_target_template tgttemplate = {
879 	.targetport_delete	= fcloop_targetport_delete,
880 	.xmt_ls_rsp		= fcloop_xmt_ls_rsp,
881 	.fcp_op			= fcloop_fcp_op,
882 	.fcp_abort		= fcloop_tgt_fcp_abort,
883 	.fcp_req_release	= fcloop_fcp_req_release,
884 	.discovery_event	= fcloop_tgt_discovery_evt,
885 	.max_hw_queues		= FCLOOP_HW_QUEUES,
886 	.max_sgl_segments	= FCLOOP_SGL_SEGS,
887 	.max_dif_sgl_segments	= FCLOOP_SGL_SEGS,
888 	.dma_boundary		= FCLOOP_DMABOUND_4G,
889 	/* optional features */
890 	.target_features	= 0,
891 	/* sizes of additional private data for data structures */
892 	.target_priv_sz		= sizeof(struct fcloop_tport),
893 };
894 
895 static ssize_t
fcloop_create_local_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)896 fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
897 		const char *buf, size_t count)
898 {
899 	struct nvme_fc_port_info pinfo;
900 	struct fcloop_ctrl_options *opts;
901 	struct nvme_fc_local_port *localport;
902 	struct fcloop_lport *lport;
903 	struct fcloop_lport_priv *lport_priv;
904 	unsigned long flags;
905 	int ret = -ENOMEM;
906 
907 	lport = kzalloc(sizeof(*lport), GFP_KERNEL);
908 	if (!lport)
909 		return -ENOMEM;
910 
911 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
912 	if (!opts)
913 		goto out_free_lport;
914 
915 	ret = fcloop_parse_options(opts, buf);
916 	if (ret)
917 		goto out_free_opts;
918 
919 	/* everything there ? */
920 	if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
921 		ret = -EINVAL;
922 		goto out_free_opts;
923 	}
924 
925 	memset(&pinfo, 0, sizeof(pinfo));
926 	pinfo.node_name = opts->wwnn;
927 	pinfo.port_name = opts->wwpn;
928 	pinfo.port_role = opts->roles;
929 	pinfo.port_id = opts->fcaddr;
930 
931 	ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
932 	if (!ret) {
933 		/* success */
934 		lport_priv = localport->private;
935 		lport_priv->lport = lport;
936 
937 		lport->localport = localport;
938 		INIT_LIST_HEAD(&lport->lport_list);
939 
940 		spin_lock_irqsave(&fcloop_lock, flags);
941 		list_add_tail(&lport->lport_list, &fcloop_lports);
942 		spin_unlock_irqrestore(&fcloop_lock, flags);
943 	}
944 
945 out_free_opts:
946 	kfree(opts);
947 out_free_lport:
948 	/* free only if we're going to fail */
949 	if (ret)
950 		kfree(lport);
951 
952 	return ret ? ret : count;
953 }
954 
955 
956 static void
__unlink_local_port(struct fcloop_lport * lport)957 __unlink_local_port(struct fcloop_lport *lport)
958 {
959 	list_del(&lport->lport_list);
960 }
961 
962 static int
__wait_localport_unreg(struct fcloop_lport * lport)963 __wait_localport_unreg(struct fcloop_lport *lport)
964 {
965 	int ret;
966 
967 	init_completion(&lport->unreg_done);
968 
969 	ret = nvme_fc_unregister_localport(lport->localport);
970 
971 	wait_for_completion(&lport->unreg_done);
972 
973 	kfree(lport);
974 
975 	return ret;
976 }
977 
978 
979 static ssize_t
fcloop_delete_local_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)980 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
981 		const char *buf, size_t count)
982 {
983 	struct fcloop_lport *tlport, *lport = NULL;
984 	u64 nodename, portname;
985 	unsigned long flags;
986 	int ret;
987 
988 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
989 	if (ret)
990 		return ret;
991 
992 	spin_lock_irqsave(&fcloop_lock, flags);
993 
994 	list_for_each_entry(tlport, &fcloop_lports, lport_list) {
995 		if (tlport->localport->node_name == nodename &&
996 		    tlport->localport->port_name == portname) {
997 			lport = tlport;
998 			__unlink_local_port(lport);
999 			break;
1000 		}
1001 	}
1002 	spin_unlock_irqrestore(&fcloop_lock, flags);
1003 
1004 	if (!lport)
1005 		return -ENOENT;
1006 
1007 	ret = __wait_localport_unreg(lport);
1008 
1009 	return ret ? ret : count;
1010 }
1011 
1012 static struct fcloop_nport *
fcloop_alloc_nport(const char * buf,size_t count,bool remoteport)1013 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
1014 {
1015 	struct fcloop_nport *newnport, *nport = NULL;
1016 	struct fcloop_lport *tmplport, *lport = NULL;
1017 	struct fcloop_ctrl_options *opts;
1018 	unsigned long flags;
1019 	u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
1020 	int ret;
1021 
1022 	opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1023 	if (!opts)
1024 		return NULL;
1025 
1026 	ret = fcloop_parse_options(opts, buf);
1027 	if (ret)
1028 		goto out_free_opts;
1029 
1030 	/* everything there ? */
1031 	if ((opts->mask & opts_mask) != opts_mask) {
1032 		ret = -EINVAL;
1033 		goto out_free_opts;
1034 	}
1035 
1036 	newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
1037 	if (!newnport)
1038 		goto out_free_opts;
1039 
1040 	INIT_LIST_HEAD(&newnport->nport_list);
1041 	newnport->node_name = opts->wwnn;
1042 	newnport->port_name = opts->wwpn;
1043 	if (opts->mask & NVMF_OPT_ROLES)
1044 		newnport->port_role = opts->roles;
1045 	if (opts->mask & NVMF_OPT_FCADDR)
1046 		newnport->port_id = opts->fcaddr;
1047 	kref_init(&newnport->ref);
1048 
1049 	spin_lock_irqsave(&fcloop_lock, flags);
1050 
1051 	list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
1052 		if (tmplport->localport->node_name == opts->wwnn &&
1053 		    tmplport->localport->port_name == opts->wwpn)
1054 			goto out_invalid_opts;
1055 
1056 		if (tmplport->localport->node_name == opts->lpwwnn &&
1057 		    tmplport->localport->port_name == opts->lpwwpn)
1058 			lport = tmplport;
1059 	}
1060 
1061 	if (remoteport) {
1062 		if (!lport)
1063 			goto out_invalid_opts;
1064 		newnport->lport = lport;
1065 	}
1066 
1067 	list_for_each_entry(nport, &fcloop_nports, nport_list) {
1068 		if (nport->node_name == opts->wwnn &&
1069 		    nport->port_name == opts->wwpn) {
1070 			if ((remoteport && nport->rport) ||
1071 			    (!remoteport && nport->tport)) {
1072 				nport = NULL;
1073 				goto out_invalid_opts;
1074 			}
1075 
1076 			fcloop_nport_get(nport);
1077 
1078 			spin_unlock_irqrestore(&fcloop_lock, flags);
1079 
1080 			if (remoteport)
1081 				nport->lport = lport;
1082 			if (opts->mask & NVMF_OPT_ROLES)
1083 				nport->port_role = opts->roles;
1084 			if (opts->mask & NVMF_OPT_FCADDR)
1085 				nport->port_id = opts->fcaddr;
1086 			goto out_free_newnport;
1087 		}
1088 	}
1089 
1090 	list_add_tail(&newnport->nport_list, &fcloop_nports);
1091 
1092 	spin_unlock_irqrestore(&fcloop_lock, flags);
1093 
1094 	kfree(opts);
1095 	return newnport;
1096 
1097 out_invalid_opts:
1098 	spin_unlock_irqrestore(&fcloop_lock, flags);
1099 out_free_newnport:
1100 	kfree(newnport);
1101 out_free_opts:
1102 	kfree(opts);
1103 	return nport;
1104 }
1105 
1106 static ssize_t
fcloop_create_remote_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1107 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
1108 		const char *buf, size_t count)
1109 {
1110 	struct nvme_fc_remote_port *remoteport;
1111 	struct fcloop_nport *nport;
1112 	struct fcloop_rport *rport;
1113 	struct nvme_fc_port_info pinfo;
1114 	int ret;
1115 
1116 	nport = fcloop_alloc_nport(buf, count, true);
1117 	if (!nport)
1118 		return -EIO;
1119 
1120 	memset(&pinfo, 0, sizeof(pinfo));
1121 	pinfo.node_name = nport->node_name;
1122 	pinfo.port_name = nport->port_name;
1123 	pinfo.port_role = nport->port_role;
1124 	pinfo.port_id = nport->port_id;
1125 
1126 	ret = nvme_fc_register_remoteport(nport->lport->localport,
1127 						&pinfo, &remoteport);
1128 	if (ret || !remoteport) {
1129 		fcloop_nport_put(nport);
1130 		return ret;
1131 	}
1132 
1133 	/* success */
1134 	rport = remoteport->private;
1135 	rport->remoteport = remoteport;
1136 	rport->targetport = (nport->tport) ?  nport->tport->targetport : NULL;
1137 	if (nport->tport) {
1138 		nport->tport->remoteport = remoteport;
1139 		nport->tport->lport = nport->lport;
1140 	}
1141 	rport->nport = nport;
1142 	rport->lport = nport->lport;
1143 	nport->rport = rport;
1144 
1145 	return count;
1146 }
1147 
1148 
1149 static struct fcloop_rport *
__unlink_remote_port(struct fcloop_nport * nport)1150 __unlink_remote_port(struct fcloop_nport *nport)
1151 {
1152 	struct fcloop_rport *rport = nport->rport;
1153 
1154 	if (rport && nport->tport)
1155 		nport->tport->remoteport = NULL;
1156 	nport->rport = NULL;
1157 
1158 	return rport;
1159 }
1160 
1161 static int
__remoteport_unreg(struct fcloop_nport * nport,struct fcloop_rport * rport)1162 __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
1163 {
1164 	if (!rport)
1165 		return -EALREADY;
1166 
1167 	return nvme_fc_unregister_remoteport(rport->remoteport);
1168 }
1169 
1170 static ssize_t
fcloop_delete_remote_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1171 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1172 		const char *buf, size_t count)
1173 {
1174 	struct fcloop_nport *nport = NULL, *tmpport;
1175 	static struct fcloop_rport *rport;
1176 	u64 nodename, portname;
1177 	unsigned long flags;
1178 	int ret;
1179 
1180 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1181 	if (ret)
1182 		return ret;
1183 
1184 	spin_lock_irqsave(&fcloop_lock, flags);
1185 
1186 	list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1187 		if (tmpport->node_name == nodename &&
1188 		    tmpport->port_name == portname && tmpport->rport) {
1189 			nport = tmpport;
1190 			rport = __unlink_remote_port(nport);
1191 			break;
1192 		}
1193 	}
1194 
1195 	spin_unlock_irqrestore(&fcloop_lock, flags);
1196 
1197 	if (!nport)
1198 		return -ENOENT;
1199 
1200 	ret = __remoteport_unreg(nport, rport);
1201 
1202 	return ret ? ret : count;
1203 }
1204 
1205 static ssize_t
fcloop_create_target_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1206 fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1207 		const char *buf, size_t count)
1208 {
1209 	struct nvmet_fc_target_port *targetport;
1210 	struct fcloop_nport *nport;
1211 	struct fcloop_tport *tport;
1212 	struct nvmet_fc_port_info tinfo;
1213 	int ret;
1214 
1215 	nport = fcloop_alloc_nport(buf, count, false);
1216 	if (!nport)
1217 		return -EIO;
1218 
1219 	tinfo.node_name = nport->node_name;
1220 	tinfo.port_name = nport->port_name;
1221 	tinfo.port_id = nport->port_id;
1222 
1223 	ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1224 						&targetport);
1225 	if (ret) {
1226 		fcloop_nport_put(nport);
1227 		return ret;
1228 	}
1229 
1230 	/* success */
1231 	tport = targetport->private;
1232 	tport->targetport = targetport;
1233 	tport->remoteport = (nport->rport) ?  nport->rport->remoteport : NULL;
1234 	if (nport->rport)
1235 		nport->rport->targetport = targetport;
1236 	tport->nport = nport;
1237 	tport->lport = nport->lport;
1238 	nport->tport = tport;
1239 
1240 	return count;
1241 }
1242 
1243 
1244 static struct fcloop_tport *
__unlink_target_port(struct fcloop_nport * nport)1245 __unlink_target_port(struct fcloop_nport *nport)
1246 {
1247 	struct fcloop_tport *tport = nport->tport;
1248 
1249 	if (tport && nport->rport)
1250 		nport->rport->targetport = NULL;
1251 	nport->tport = NULL;
1252 
1253 	return tport;
1254 }
1255 
1256 static int
__targetport_unreg(struct fcloop_nport * nport,struct fcloop_tport * tport)1257 __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1258 {
1259 	if (!tport)
1260 		return -EALREADY;
1261 
1262 	return nvmet_fc_unregister_targetport(tport->targetport);
1263 }
1264 
1265 static ssize_t
fcloop_delete_target_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1266 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1267 		const char *buf, size_t count)
1268 {
1269 	struct fcloop_nport *nport = NULL, *tmpport;
1270 	struct fcloop_tport *tport = NULL;
1271 	u64 nodename, portname;
1272 	unsigned long flags;
1273 	int ret;
1274 
1275 	ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1276 	if (ret)
1277 		return ret;
1278 
1279 	spin_lock_irqsave(&fcloop_lock, flags);
1280 
1281 	list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1282 		if (tmpport->node_name == nodename &&
1283 		    tmpport->port_name == portname && tmpport->tport) {
1284 			nport = tmpport;
1285 			tport = __unlink_target_port(nport);
1286 			break;
1287 		}
1288 	}
1289 
1290 	spin_unlock_irqrestore(&fcloop_lock, flags);
1291 
1292 	if (!nport)
1293 		return -ENOENT;
1294 
1295 	ret = __targetport_unreg(nport, tport);
1296 
1297 	return ret ? ret : count;
1298 }
1299 
1300 
1301 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1302 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1303 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1304 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1305 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1306 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1307 
1308 static struct attribute *fcloop_dev_attrs[] = {
1309 	&dev_attr_add_local_port.attr,
1310 	&dev_attr_del_local_port.attr,
1311 	&dev_attr_add_remote_port.attr,
1312 	&dev_attr_del_remote_port.attr,
1313 	&dev_attr_add_target_port.attr,
1314 	&dev_attr_del_target_port.attr,
1315 	NULL
1316 };
1317 
1318 static struct attribute_group fclopp_dev_attrs_group = {
1319 	.attrs		= fcloop_dev_attrs,
1320 };
1321 
1322 static const struct attribute_group *fcloop_dev_attr_groups[] = {
1323 	&fclopp_dev_attrs_group,
1324 	NULL,
1325 };
1326 
1327 static struct class *fcloop_class;
1328 static struct device *fcloop_device;
1329 
1330 
fcloop_init(void)1331 static int __init fcloop_init(void)
1332 {
1333 	int ret;
1334 
1335 	fcloop_class = class_create(THIS_MODULE, "fcloop");
1336 	if (IS_ERR(fcloop_class)) {
1337 		pr_err("couldn't register class fcloop\n");
1338 		ret = PTR_ERR(fcloop_class);
1339 		return ret;
1340 	}
1341 
1342 	fcloop_device = device_create_with_groups(
1343 				fcloop_class, NULL, MKDEV(0, 0), NULL,
1344 				fcloop_dev_attr_groups, "ctl");
1345 	if (IS_ERR(fcloop_device)) {
1346 		pr_err("couldn't create ctl device!\n");
1347 		ret = PTR_ERR(fcloop_device);
1348 		goto out_destroy_class;
1349 	}
1350 
1351 	get_device(fcloop_device);
1352 
1353 	return 0;
1354 
1355 out_destroy_class:
1356 	class_destroy(fcloop_class);
1357 	return ret;
1358 }
1359 
fcloop_exit(void)1360 static void __exit fcloop_exit(void)
1361 {
1362 	struct fcloop_lport *lport;
1363 	struct fcloop_nport *nport;
1364 	struct fcloop_tport *tport;
1365 	struct fcloop_rport *rport;
1366 	unsigned long flags;
1367 	int ret;
1368 
1369 	spin_lock_irqsave(&fcloop_lock, flags);
1370 
1371 	for (;;) {
1372 		nport = list_first_entry_or_null(&fcloop_nports,
1373 						typeof(*nport), nport_list);
1374 		if (!nport)
1375 			break;
1376 
1377 		tport = __unlink_target_port(nport);
1378 		rport = __unlink_remote_port(nport);
1379 
1380 		spin_unlock_irqrestore(&fcloop_lock, flags);
1381 
1382 		ret = __targetport_unreg(nport, tport);
1383 		if (ret)
1384 			pr_warn("%s: Failed deleting target port\n", __func__);
1385 
1386 		ret = __remoteport_unreg(nport, rport);
1387 		if (ret)
1388 			pr_warn("%s: Failed deleting remote port\n", __func__);
1389 
1390 		spin_lock_irqsave(&fcloop_lock, flags);
1391 	}
1392 
1393 	for (;;) {
1394 		lport = list_first_entry_or_null(&fcloop_lports,
1395 						typeof(*lport), lport_list);
1396 		if (!lport)
1397 			break;
1398 
1399 		__unlink_local_port(lport);
1400 
1401 		spin_unlock_irqrestore(&fcloop_lock, flags);
1402 
1403 		ret = __wait_localport_unreg(lport);
1404 		if (ret)
1405 			pr_warn("%s: Failed deleting local port\n", __func__);
1406 
1407 		spin_lock_irqsave(&fcloop_lock, flags);
1408 	}
1409 
1410 	spin_unlock_irqrestore(&fcloop_lock, flags);
1411 
1412 	put_device(fcloop_device);
1413 
1414 	device_destroy(fcloop_class, MKDEV(0, 0));
1415 	class_destroy(fcloop_class);
1416 }
1417 
1418 module_init(fcloop_init);
1419 module_exit(fcloop_exit);
1420 
1421 MODULE_LICENSE("GPL v2");
1422