1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
9
10 #include "../host/nvme.h"
11 #include "../target/nvmet.h"
12 #include <linux/nvme-fc-driver.h>
13 #include <linux/nvme-fc.h>
14
15
16 enum {
17 NVMF_OPT_ERR = 0,
18 NVMF_OPT_WWNN = 1 << 0,
19 NVMF_OPT_WWPN = 1 << 1,
20 NVMF_OPT_ROLES = 1 << 2,
21 NVMF_OPT_FCADDR = 1 << 3,
22 NVMF_OPT_LPWWNN = 1 << 4,
23 NVMF_OPT_LPWWPN = 1 << 5,
24 };
25
26 struct fcloop_ctrl_options {
27 int mask;
28 u64 wwnn;
29 u64 wwpn;
30 u32 roles;
31 u32 fcaddr;
32 u64 lpwwnn;
33 u64 lpwwpn;
34 };
35
36 static const match_table_t opt_tokens = {
37 { NVMF_OPT_WWNN, "wwnn=%s" },
38 { NVMF_OPT_WWPN, "wwpn=%s" },
39 { NVMF_OPT_ROLES, "roles=%d" },
40 { NVMF_OPT_FCADDR, "fcaddr=%x" },
41 { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
42 { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
43 { NVMF_OPT_ERR, NULL }
44 };
45
fcloop_verify_addr(substring_t * s)46 static int fcloop_verify_addr(substring_t *s)
47 {
48 size_t blen = s->to - s->from + 1;
49
50 if (strnlen(s->from, blen) != NVME_FC_TRADDR_HEXNAMELEN + 2 ||
51 strncmp(s->from, "0x", 2))
52 return -EINVAL;
53
54 return 0;
55 }
56
57 static int
fcloop_parse_options(struct fcloop_ctrl_options * opts,const char * buf)58 fcloop_parse_options(struct fcloop_ctrl_options *opts,
59 const char *buf)
60 {
61 substring_t args[MAX_OPT_ARGS];
62 char *options, *o, *p;
63 int token, ret = 0;
64 u64 token64;
65
66 options = o = kstrdup(buf, GFP_KERNEL);
67 if (!options)
68 return -ENOMEM;
69
70 while ((p = strsep(&o, ",\n")) != NULL) {
71 if (!*p)
72 continue;
73
74 token = match_token(p, opt_tokens, args);
75 opts->mask |= token;
76 switch (token) {
77 case NVMF_OPT_WWNN:
78 if (fcloop_verify_addr(args) ||
79 match_u64(args, &token64)) {
80 ret = -EINVAL;
81 goto out_free_options;
82 }
83 opts->wwnn = token64;
84 break;
85 case NVMF_OPT_WWPN:
86 if (fcloop_verify_addr(args) ||
87 match_u64(args, &token64)) {
88 ret = -EINVAL;
89 goto out_free_options;
90 }
91 opts->wwpn = token64;
92 break;
93 case NVMF_OPT_ROLES:
94 if (match_int(args, &token)) {
95 ret = -EINVAL;
96 goto out_free_options;
97 }
98 opts->roles = token;
99 break;
100 case NVMF_OPT_FCADDR:
101 if (match_hex(args, &token)) {
102 ret = -EINVAL;
103 goto out_free_options;
104 }
105 opts->fcaddr = token;
106 break;
107 case NVMF_OPT_LPWWNN:
108 if (fcloop_verify_addr(args) ||
109 match_u64(args, &token64)) {
110 ret = -EINVAL;
111 goto out_free_options;
112 }
113 opts->lpwwnn = token64;
114 break;
115 case NVMF_OPT_LPWWPN:
116 if (fcloop_verify_addr(args) ||
117 match_u64(args, &token64)) {
118 ret = -EINVAL;
119 goto out_free_options;
120 }
121 opts->lpwwpn = token64;
122 break;
123 default:
124 pr_warn("unknown parameter or missing value '%s'\n", p);
125 ret = -EINVAL;
126 goto out_free_options;
127 }
128 }
129
130 out_free_options:
131 kfree(options);
132 return ret;
133 }
134
135
136 static int
fcloop_parse_nm_options(struct device * dev,u64 * nname,u64 * pname,const char * buf)137 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
138 const char *buf)
139 {
140 substring_t args[MAX_OPT_ARGS];
141 char *options, *o, *p;
142 int token, ret = 0;
143 u64 token64;
144
145 *nname = -1;
146 *pname = -1;
147
148 options = o = kstrdup(buf, GFP_KERNEL);
149 if (!options)
150 return -ENOMEM;
151
152 while ((p = strsep(&o, ",\n")) != NULL) {
153 if (!*p)
154 continue;
155
156 token = match_token(p, opt_tokens, args);
157 switch (token) {
158 case NVMF_OPT_WWNN:
159 if (fcloop_verify_addr(args) ||
160 match_u64(args, &token64)) {
161 ret = -EINVAL;
162 goto out_free_options;
163 }
164 *nname = token64;
165 break;
166 case NVMF_OPT_WWPN:
167 if (fcloop_verify_addr(args) ||
168 match_u64(args, &token64)) {
169 ret = -EINVAL;
170 goto out_free_options;
171 }
172 *pname = token64;
173 break;
174 default:
175 pr_warn("unknown parameter or missing value '%s'\n", p);
176 ret = -EINVAL;
177 goto out_free_options;
178 }
179 }
180
181 out_free_options:
182 kfree(options);
183
184 if (!ret) {
185 if (*nname == -1)
186 return -EINVAL;
187 if (*pname == -1)
188 return -EINVAL;
189 }
190
191 return ret;
192 }
193
194
195 #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
196
197 #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
198 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
199
200 #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
201
202
203 static DEFINE_SPINLOCK(fcloop_lock);
204 static LIST_HEAD(fcloop_lports);
205 static LIST_HEAD(fcloop_nports);
206
207 struct fcloop_lport {
208 struct nvme_fc_local_port *localport;
209 struct list_head lport_list;
210 struct completion unreg_done;
211 };
212
213 struct fcloop_lport_priv {
214 struct fcloop_lport *lport;
215 };
216
217 struct fcloop_rport {
218 struct nvme_fc_remote_port *remoteport;
219 struct nvmet_fc_target_port *targetport;
220 struct fcloop_nport *nport;
221 struct fcloop_lport *lport;
222 spinlock_t lock;
223 struct list_head ls_list;
224 struct work_struct ls_work;
225 };
226
227 struct fcloop_tport {
228 struct nvmet_fc_target_port *targetport;
229 struct nvme_fc_remote_port *remoteport;
230 struct fcloop_nport *nport;
231 struct fcloop_lport *lport;
232 spinlock_t lock;
233 struct list_head ls_list;
234 struct work_struct ls_work;
235 };
236
237 struct fcloop_nport {
238 struct fcloop_rport *rport;
239 struct fcloop_tport *tport;
240 struct fcloop_lport *lport;
241 struct list_head nport_list;
242 struct kref ref;
243 u64 node_name;
244 u64 port_name;
245 u32 port_role;
246 u32 port_id;
247 };
248
249 struct fcloop_lsreq {
250 struct nvmefc_ls_req *lsreq;
251 struct nvmefc_ls_rsp ls_rsp;
252 int lsdir; /* H2T or T2H */
253 int status;
254 struct list_head ls_list; /* fcloop_rport->ls_list */
255 };
256
257 struct fcloop_rscn {
258 struct fcloop_tport *tport;
259 struct work_struct work;
260 };
261
262 enum {
263 INI_IO_START = 0,
264 INI_IO_ACTIVE = 1,
265 INI_IO_ABORTED = 2,
266 INI_IO_COMPLETED = 3,
267 };
268
269 struct fcloop_fcpreq {
270 struct fcloop_tport *tport;
271 struct nvmefc_fcp_req *fcpreq;
272 spinlock_t reqlock;
273 u16 status;
274 u32 inistate;
275 bool active;
276 bool aborted;
277 struct kref ref;
278 struct work_struct fcp_rcv_work;
279 struct work_struct abort_rcv_work;
280 struct work_struct tio_done_work;
281 struct nvmefc_tgt_fcp_req tgt_fcp_req;
282 };
283
284 struct fcloop_ini_fcpreq {
285 struct nvmefc_fcp_req *fcpreq;
286 struct fcloop_fcpreq *tfcp_req;
287 spinlock_t inilock;
288 };
289
290 static inline struct fcloop_lsreq *
ls_rsp_to_lsreq(struct nvmefc_ls_rsp * lsrsp)291 ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp)
292 {
293 return container_of(lsrsp, struct fcloop_lsreq, ls_rsp);
294 }
295
296 static inline struct fcloop_fcpreq *
tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req * tgt_fcpreq)297 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
298 {
299 return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
300 }
301
302
303 static int
fcloop_create_queue(struct nvme_fc_local_port * localport,unsigned int qidx,u16 qsize,void ** handle)304 fcloop_create_queue(struct nvme_fc_local_port *localport,
305 unsigned int qidx, u16 qsize,
306 void **handle)
307 {
308 *handle = localport;
309 return 0;
310 }
311
312 static void
fcloop_delete_queue(struct nvme_fc_local_port * localport,unsigned int idx,void * handle)313 fcloop_delete_queue(struct nvme_fc_local_port *localport,
314 unsigned int idx, void *handle)
315 {
316 }
317
318 static void
fcloop_rport_lsrqst_work(struct work_struct * work)319 fcloop_rport_lsrqst_work(struct work_struct *work)
320 {
321 struct fcloop_rport *rport =
322 container_of(work, struct fcloop_rport, ls_work);
323 struct fcloop_lsreq *tls_req;
324
325 spin_lock(&rport->lock);
326 for (;;) {
327 tls_req = list_first_entry_or_null(&rport->ls_list,
328 struct fcloop_lsreq, ls_list);
329 if (!tls_req)
330 break;
331
332 list_del(&tls_req->ls_list);
333 spin_unlock(&rport->lock);
334
335 tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
336 /*
337 * callee may free memory containing tls_req.
338 * do not reference lsreq after this.
339 */
340
341 spin_lock(&rport->lock);
342 }
343 spin_unlock(&rport->lock);
344 }
345
346 static int
fcloop_h2t_ls_req(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,struct nvmefc_ls_req * lsreq)347 fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
348 struct nvme_fc_remote_port *remoteport,
349 struct nvmefc_ls_req *lsreq)
350 {
351 struct fcloop_lsreq *tls_req = lsreq->private;
352 struct fcloop_rport *rport = remoteport->private;
353 int ret = 0;
354
355 tls_req->lsreq = lsreq;
356 INIT_LIST_HEAD(&tls_req->ls_list);
357
358 if (!rport->targetport) {
359 tls_req->status = -ECONNREFUSED;
360 spin_lock(&rport->lock);
361 list_add_tail(&tls_req->ls_list, &rport->ls_list);
362 spin_unlock(&rport->lock);
363 queue_work(nvmet_wq, &rport->ls_work);
364 return ret;
365 }
366
367 tls_req->status = 0;
368 ret = nvmet_fc_rcv_ls_req(rport->targetport, rport,
369 &tls_req->ls_rsp,
370 lsreq->rqstaddr, lsreq->rqstlen);
371
372 return ret;
373 }
374
375 static int
fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port * targetport,struct nvmefc_ls_rsp * lsrsp)376 fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
377 struct nvmefc_ls_rsp *lsrsp)
378 {
379 struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
380 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
381 struct fcloop_tport *tport = targetport->private;
382 struct nvme_fc_remote_port *remoteport = tport->remoteport;
383 struct fcloop_rport *rport;
384
385 memcpy(lsreq->rspaddr, lsrsp->rspbuf,
386 ((lsreq->rsplen < lsrsp->rsplen) ?
387 lsreq->rsplen : lsrsp->rsplen));
388
389 lsrsp->done(lsrsp);
390
391 if (remoteport) {
392 rport = remoteport->private;
393 spin_lock(&rport->lock);
394 list_add_tail(&tls_req->ls_list, &rport->ls_list);
395 spin_unlock(&rport->lock);
396 queue_work(nvmet_wq, &rport->ls_work);
397 }
398
399 return 0;
400 }
401
402 static void
fcloop_tport_lsrqst_work(struct work_struct * work)403 fcloop_tport_lsrqst_work(struct work_struct *work)
404 {
405 struct fcloop_tport *tport =
406 container_of(work, struct fcloop_tport, ls_work);
407 struct fcloop_lsreq *tls_req;
408
409 spin_lock(&tport->lock);
410 for (;;) {
411 tls_req = list_first_entry_or_null(&tport->ls_list,
412 struct fcloop_lsreq, ls_list);
413 if (!tls_req)
414 break;
415
416 list_del(&tls_req->ls_list);
417 spin_unlock(&tport->lock);
418
419 tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
420 /*
421 * callee may free memory containing tls_req.
422 * do not reference lsreq after this.
423 */
424
425 spin_lock(&tport->lock);
426 }
427 spin_unlock(&tport->lock);
428 }
429
430 static int
fcloop_t2h_ls_req(struct nvmet_fc_target_port * targetport,void * hosthandle,struct nvmefc_ls_req * lsreq)431 fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
432 struct nvmefc_ls_req *lsreq)
433 {
434 struct fcloop_lsreq *tls_req = lsreq->private;
435 struct fcloop_tport *tport = targetport->private;
436 int ret = 0;
437
438 /*
439 * hosthandle should be the dst.rport value.
440 * hosthandle ignored as fcloop currently is
441 * 1:1 tgtport vs remoteport
442 */
443 tls_req->lsreq = lsreq;
444 INIT_LIST_HEAD(&tls_req->ls_list);
445
446 if (!tport->remoteport) {
447 tls_req->status = -ECONNREFUSED;
448 spin_lock(&tport->lock);
449 list_add_tail(&tls_req->ls_list, &tport->ls_list);
450 spin_unlock(&tport->lock);
451 queue_work(nvmet_wq, &tport->ls_work);
452 return ret;
453 }
454
455 tls_req->status = 0;
456 ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
457 lsreq->rqstaddr, lsreq->rqstlen);
458
459 return ret;
460 }
461
462 static int
fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,struct nvmefc_ls_rsp * lsrsp)463 fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
464 struct nvme_fc_remote_port *remoteport,
465 struct nvmefc_ls_rsp *lsrsp)
466 {
467 struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
468 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
469 struct fcloop_rport *rport = remoteport->private;
470 struct nvmet_fc_target_port *targetport = rport->targetport;
471 struct fcloop_tport *tport;
472
473 memcpy(lsreq->rspaddr, lsrsp->rspbuf,
474 ((lsreq->rsplen < lsrsp->rsplen) ?
475 lsreq->rsplen : lsrsp->rsplen));
476 lsrsp->done(lsrsp);
477
478 if (targetport) {
479 tport = targetport->private;
480 spin_lock(&tport->lock);
481 list_add_tail(&tls_req->ls_list, &tport->ls_list);
482 spin_unlock(&tport->lock);
483 queue_work(nvmet_wq, &tport->ls_work);
484 }
485
486 return 0;
487 }
488
489 static void
fcloop_t2h_host_release(void * hosthandle)490 fcloop_t2h_host_release(void *hosthandle)
491 {
492 /* host handle ignored for now */
493 }
494
495 static int
fcloop_t2h_host_traddr(void * hosthandle,u64 * wwnn,u64 * wwpn)496 fcloop_t2h_host_traddr(void *hosthandle, u64 *wwnn, u64 *wwpn)
497 {
498 struct fcloop_rport *rport = hosthandle;
499
500 *wwnn = rport->lport->localport->node_name;
501 *wwpn = rport->lport->localport->port_name;
502 return 0;
503 }
504
505 /*
506 * Simulate reception of RSCN and converting it to a initiator transport
507 * call to rescan a remote port.
508 */
509 static void
fcloop_tgt_rscn_work(struct work_struct * work)510 fcloop_tgt_rscn_work(struct work_struct *work)
511 {
512 struct fcloop_rscn *tgt_rscn =
513 container_of(work, struct fcloop_rscn, work);
514 struct fcloop_tport *tport = tgt_rscn->tport;
515
516 if (tport->remoteport)
517 nvme_fc_rescan_remoteport(tport->remoteport);
518 kfree(tgt_rscn);
519 }
520
521 static void
fcloop_tgt_discovery_evt(struct nvmet_fc_target_port * tgtport)522 fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
523 {
524 struct fcloop_rscn *tgt_rscn;
525
526 tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL);
527 if (!tgt_rscn)
528 return;
529
530 tgt_rscn->tport = tgtport->private;
531 INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
532
533 queue_work(nvmet_wq, &tgt_rscn->work);
534 }
535
536 static void
fcloop_tfcp_req_free(struct kref * ref)537 fcloop_tfcp_req_free(struct kref *ref)
538 {
539 struct fcloop_fcpreq *tfcp_req =
540 container_of(ref, struct fcloop_fcpreq, ref);
541
542 kfree(tfcp_req);
543 }
544
545 static void
fcloop_tfcp_req_put(struct fcloop_fcpreq * tfcp_req)546 fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
547 {
548 kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
549 }
550
551 static int
fcloop_tfcp_req_get(struct fcloop_fcpreq * tfcp_req)552 fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
553 {
554 return kref_get_unless_zero(&tfcp_req->ref);
555 }
556
557 static void
fcloop_call_host_done(struct nvmefc_fcp_req * fcpreq,struct fcloop_fcpreq * tfcp_req,int status)558 fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
559 struct fcloop_fcpreq *tfcp_req, int status)
560 {
561 struct fcloop_ini_fcpreq *inireq = NULL;
562
563 if (fcpreq) {
564 inireq = fcpreq->private;
565 spin_lock(&inireq->inilock);
566 inireq->tfcp_req = NULL;
567 spin_unlock(&inireq->inilock);
568
569 fcpreq->status = status;
570 fcpreq->done(fcpreq);
571 }
572
573 /* release original io reference on tgt struct */
574 fcloop_tfcp_req_put(tfcp_req);
575 }
576
577 static bool drop_fabric_opcode;
578 #define DROP_OPCODE_MASK 0x00FF
579 /* fabrics opcode will have a bit set above 1st byte */
580 static int drop_opcode = -1;
581 static int drop_instance;
582 static int drop_amount;
583 static int drop_current_cnt;
584
585 /*
586 * Routine to parse io and determine if the io is to be dropped.
587 * Returns:
588 * 0 if io is not obstructed
589 * 1 if io was dropped
590 */
check_for_drop(struct fcloop_fcpreq * tfcp_req)591 static int check_for_drop(struct fcloop_fcpreq *tfcp_req)
592 {
593 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
594 struct nvme_fc_cmd_iu *cmdiu = fcpreq->cmdaddr;
595 struct nvme_command *sqe = &cmdiu->sqe;
596
597 if (drop_opcode == -1)
598 return 0;
599
600 pr_info("%s: seq opcd x%02x fctype x%02x: drop F %s op x%02x "
601 "inst %d start %d amt %d\n",
602 __func__, sqe->common.opcode, sqe->fabrics.fctype,
603 drop_fabric_opcode ? "y" : "n",
604 drop_opcode, drop_current_cnt, drop_instance, drop_amount);
605
606 if ((drop_fabric_opcode &&
607 (sqe->common.opcode != nvme_fabrics_command ||
608 sqe->fabrics.fctype != drop_opcode)) ||
609 (!drop_fabric_opcode && sqe->common.opcode != drop_opcode))
610 return 0;
611
612 if (++drop_current_cnt >= drop_instance) {
613 if (drop_current_cnt >= drop_instance + drop_amount)
614 drop_opcode = -1;
615 return 1;
616 }
617
618 return 0;
619 }
620
621 static void
fcloop_fcp_recv_work(struct work_struct * work)622 fcloop_fcp_recv_work(struct work_struct *work)
623 {
624 struct fcloop_fcpreq *tfcp_req =
625 container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
626 struct nvmefc_fcp_req *fcpreq;
627 unsigned long flags;
628 int ret = 0;
629 bool aborted = false;
630
631 spin_lock_irqsave(&tfcp_req->reqlock, flags);
632 fcpreq = tfcp_req->fcpreq;
633 switch (tfcp_req->inistate) {
634 case INI_IO_START:
635 tfcp_req->inistate = INI_IO_ACTIVE;
636 break;
637 case INI_IO_ABORTED:
638 aborted = true;
639 break;
640 default:
641 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
642 WARN_ON(1);
643 return;
644 }
645 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
646
647 if (unlikely(aborted)) {
648 /* the abort handler will call fcloop_call_host_done */
649 return;
650 }
651
652 if (unlikely(check_for_drop(tfcp_req))) {
653 pr_info("%s: dropped command ********\n", __func__);
654 return;
655 }
656
657 ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
658 &tfcp_req->tgt_fcp_req,
659 fcpreq->cmdaddr, fcpreq->cmdlen);
660 if (ret)
661 fcloop_call_host_done(fcpreq, tfcp_req, ret);
662 }
663
664 static void
fcloop_fcp_abort_recv_work(struct work_struct * work)665 fcloop_fcp_abort_recv_work(struct work_struct *work)
666 {
667 struct fcloop_fcpreq *tfcp_req =
668 container_of(work, struct fcloop_fcpreq, abort_rcv_work);
669 struct nvmefc_fcp_req *fcpreq;
670 bool completed = false;
671 unsigned long flags;
672
673 spin_lock_irqsave(&tfcp_req->reqlock, flags);
674 switch (tfcp_req->inistate) {
675 case INI_IO_ABORTED:
676 fcpreq = tfcp_req->fcpreq;
677 tfcp_req->fcpreq = NULL;
678 break;
679 case INI_IO_COMPLETED:
680 completed = true;
681 break;
682 default:
683 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
684 WARN_ON(1);
685 return;
686 }
687 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
688
689 if (unlikely(completed)) {
690 /* remove reference taken in original abort downcall */
691 fcloop_tfcp_req_put(tfcp_req);
692 return;
693 }
694
695 if (tfcp_req->tport->targetport)
696 nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
697 &tfcp_req->tgt_fcp_req);
698
699 fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
700 /* call_host_done releases reference for abort downcall */
701 }
702
703 /*
704 * FCP IO operation done by target completion.
705 * call back up initiator "done" flows.
706 */
707 static void
fcloop_tgt_fcprqst_done_work(struct work_struct * work)708 fcloop_tgt_fcprqst_done_work(struct work_struct *work)
709 {
710 struct fcloop_fcpreq *tfcp_req =
711 container_of(work, struct fcloop_fcpreq, tio_done_work);
712 struct nvmefc_fcp_req *fcpreq;
713 unsigned long flags;
714
715 spin_lock_irqsave(&tfcp_req->reqlock, flags);
716 fcpreq = tfcp_req->fcpreq;
717 tfcp_req->inistate = INI_IO_COMPLETED;
718 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
719
720 fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
721 }
722
723
724 static int
fcloop_fcp_req(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,void * hw_queue_handle,struct nvmefc_fcp_req * fcpreq)725 fcloop_fcp_req(struct nvme_fc_local_port *localport,
726 struct nvme_fc_remote_port *remoteport,
727 void *hw_queue_handle,
728 struct nvmefc_fcp_req *fcpreq)
729 {
730 struct fcloop_rport *rport = remoteport->private;
731 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
732 struct fcloop_fcpreq *tfcp_req;
733
734 if (!rport->targetport)
735 return -ECONNREFUSED;
736
737 tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC);
738 if (!tfcp_req)
739 return -ENOMEM;
740
741 inireq->fcpreq = fcpreq;
742 inireq->tfcp_req = tfcp_req;
743 spin_lock_init(&inireq->inilock);
744
745 tfcp_req->fcpreq = fcpreq;
746 tfcp_req->tport = rport->targetport->private;
747 tfcp_req->inistate = INI_IO_START;
748 spin_lock_init(&tfcp_req->reqlock);
749 INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
750 INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
751 INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
752 kref_init(&tfcp_req->ref);
753
754 queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
755
756 return 0;
757 }
758
759 static void
fcloop_fcp_copy_data(u8 op,struct scatterlist * data_sg,struct scatterlist * io_sg,u32 offset,u32 length)760 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
761 struct scatterlist *io_sg, u32 offset, u32 length)
762 {
763 void *data_p, *io_p;
764 u32 data_len, io_len, tlen;
765
766 io_p = sg_virt(io_sg);
767 io_len = io_sg->length;
768
769 for ( ; offset; ) {
770 tlen = min_t(u32, offset, io_len);
771 offset -= tlen;
772 io_len -= tlen;
773 if (!io_len) {
774 io_sg = sg_next(io_sg);
775 io_p = sg_virt(io_sg);
776 io_len = io_sg->length;
777 } else
778 io_p += tlen;
779 }
780
781 data_p = sg_virt(data_sg);
782 data_len = data_sg->length;
783
784 for ( ; length; ) {
785 tlen = min_t(u32, io_len, data_len);
786 tlen = min_t(u32, tlen, length);
787
788 if (op == NVMET_FCOP_WRITEDATA)
789 memcpy(data_p, io_p, tlen);
790 else
791 memcpy(io_p, data_p, tlen);
792
793 length -= tlen;
794
795 io_len -= tlen;
796 if ((!io_len) && (length)) {
797 io_sg = sg_next(io_sg);
798 io_p = sg_virt(io_sg);
799 io_len = io_sg->length;
800 } else
801 io_p += tlen;
802
803 data_len -= tlen;
804 if ((!data_len) && (length)) {
805 data_sg = sg_next(data_sg);
806 data_p = sg_virt(data_sg);
807 data_len = data_sg->length;
808 } else
809 data_p += tlen;
810 }
811 }
812
813 static int
fcloop_fcp_op(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)814 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
815 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
816 {
817 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
818 struct nvmefc_fcp_req *fcpreq;
819 u32 rsplen = 0, xfrlen = 0;
820 int fcp_err = 0, active, aborted;
821 u8 op = tgt_fcpreq->op;
822 unsigned long flags;
823
824 spin_lock_irqsave(&tfcp_req->reqlock, flags);
825 fcpreq = tfcp_req->fcpreq;
826 active = tfcp_req->active;
827 aborted = tfcp_req->aborted;
828 tfcp_req->active = true;
829 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
830
831 if (unlikely(active))
832 /* illegal - call while i/o active */
833 return -EALREADY;
834
835 if (unlikely(aborted)) {
836 /* target transport has aborted i/o prior */
837 spin_lock_irqsave(&tfcp_req->reqlock, flags);
838 tfcp_req->active = false;
839 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
840 tgt_fcpreq->transferred_length = 0;
841 tgt_fcpreq->fcp_error = -ECANCELED;
842 tgt_fcpreq->done(tgt_fcpreq);
843 return 0;
844 }
845
846 /*
847 * if fcpreq is NULL, the I/O has been aborted (from
848 * initiator side). For the target side, act as if all is well
849 * but don't actually move data.
850 */
851
852 switch (op) {
853 case NVMET_FCOP_WRITEDATA:
854 xfrlen = tgt_fcpreq->transfer_length;
855 if (fcpreq) {
856 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
857 fcpreq->first_sgl, tgt_fcpreq->offset,
858 xfrlen);
859 fcpreq->transferred_length += xfrlen;
860 }
861 break;
862
863 case NVMET_FCOP_READDATA:
864 case NVMET_FCOP_READDATA_RSP:
865 xfrlen = tgt_fcpreq->transfer_length;
866 if (fcpreq) {
867 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
868 fcpreq->first_sgl, tgt_fcpreq->offset,
869 xfrlen);
870 fcpreq->transferred_length += xfrlen;
871 }
872 if (op == NVMET_FCOP_READDATA)
873 break;
874
875 /* Fall-Thru to RSP handling */
876 fallthrough;
877
878 case NVMET_FCOP_RSP:
879 if (fcpreq) {
880 rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
881 fcpreq->rsplen : tgt_fcpreq->rsplen);
882 memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
883 if (rsplen < tgt_fcpreq->rsplen)
884 fcp_err = -E2BIG;
885 fcpreq->rcv_rsplen = rsplen;
886 fcpreq->status = 0;
887 }
888 tfcp_req->status = 0;
889 break;
890
891 default:
892 fcp_err = -EINVAL;
893 break;
894 }
895
896 spin_lock_irqsave(&tfcp_req->reqlock, flags);
897 tfcp_req->active = false;
898 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
899
900 tgt_fcpreq->transferred_length = xfrlen;
901 tgt_fcpreq->fcp_error = fcp_err;
902 tgt_fcpreq->done(tgt_fcpreq);
903
904 return 0;
905 }
906
907 static void
fcloop_tgt_fcp_abort(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)908 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
909 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
910 {
911 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
912 unsigned long flags;
913
914 /*
915 * mark aborted only in case there were 2 threads in transport
916 * (one doing io, other doing abort) and only kills ops posted
917 * after the abort request
918 */
919 spin_lock_irqsave(&tfcp_req->reqlock, flags);
920 tfcp_req->aborted = true;
921 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
922
923 tfcp_req->status = NVME_SC_INTERNAL;
924
925 /*
926 * nothing more to do. If io wasn't active, the transport should
927 * immediately call the req_release. If it was active, the op
928 * will complete, and the lldd should call req_release.
929 */
930 }
931
932 static void
fcloop_fcp_req_release(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)933 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
934 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
935 {
936 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
937
938 queue_work(nvmet_wq, &tfcp_req->tio_done_work);
939 }
940
941 static void
fcloop_h2t_ls_abort(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,struct nvmefc_ls_req * lsreq)942 fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport,
943 struct nvme_fc_remote_port *remoteport,
944 struct nvmefc_ls_req *lsreq)
945 {
946 }
947
948 static void
fcloop_t2h_ls_abort(struct nvmet_fc_target_port * targetport,void * hosthandle,struct nvmefc_ls_req * lsreq)949 fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport,
950 void *hosthandle, struct nvmefc_ls_req *lsreq)
951 {
952 }
953
954 static void
fcloop_fcp_abort(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,void * hw_queue_handle,struct nvmefc_fcp_req * fcpreq)955 fcloop_fcp_abort(struct nvme_fc_local_port *localport,
956 struct nvme_fc_remote_port *remoteport,
957 void *hw_queue_handle,
958 struct nvmefc_fcp_req *fcpreq)
959 {
960 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
961 struct fcloop_fcpreq *tfcp_req;
962 bool abortio = true;
963 unsigned long flags;
964
965 spin_lock(&inireq->inilock);
966 tfcp_req = inireq->tfcp_req;
967 if (tfcp_req)
968 fcloop_tfcp_req_get(tfcp_req);
969 spin_unlock(&inireq->inilock);
970
971 if (!tfcp_req)
972 /* abort has already been called */
973 return;
974
975 /* break initiator/target relationship for io */
976 spin_lock_irqsave(&tfcp_req->reqlock, flags);
977 switch (tfcp_req->inistate) {
978 case INI_IO_START:
979 case INI_IO_ACTIVE:
980 tfcp_req->inistate = INI_IO_ABORTED;
981 break;
982 case INI_IO_COMPLETED:
983 abortio = false;
984 break;
985 default:
986 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
987 WARN_ON(1);
988 return;
989 }
990 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
991
992 if (abortio)
993 /* leave the reference while the work item is scheduled */
994 WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
995 else {
996 /*
997 * as the io has already had the done callback made,
998 * nothing more to do. So release the reference taken above
999 */
1000 fcloop_tfcp_req_put(tfcp_req);
1001 }
1002 }
1003
1004 static void
fcloop_nport_free(struct kref * ref)1005 fcloop_nport_free(struct kref *ref)
1006 {
1007 struct fcloop_nport *nport =
1008 container_of(ref, struct fcloop_nport, ref);
1009
1010 kfree(nport);
1011 }
1012
1013 static void
fcloop_nport_put(struct fcloop_nport * nport)1014 fcloop_nport_put(struct fcloop_nport *nport)
1015 {
1016 kref_put(&nport->ref, fcloop_nport_free);
1017 }
1018
1019 static int
fcloop_nport_get(struct fcloop_nport * nport)1020 fcloop_nport_get(struct fcloop_nport *nport)
1021 {
1022 return kref_get_unless_zero(&nport->ref);
1023 }
1024
1025 static void
fcloop_localport_delete(struct nvme_fc_local_port * localport)1026 fcloop_localport_delete(struct nvme_fc_local_port *localport)
1027 {
1028 struct fcloop_lport_priv *lport_priv = localport->private;
1029 struct fcloop_lport *lport = lport_priv->lport;
1030
1031 /* release any threads waiting for the unreg to complete */
1032 complete(&lport->unreg_done);
1033 }
1034
1035 static void
fcloop_remoteport_delete(struct nvme_fc_remote_port * remoteport)1036 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
1037 {
1038 struct fcloop_rport *rport = remoteport->private;
1039
1040 flush_work(&rport->ls_work);
1041 fcloop_nport_put(rport->nport);
1042 }
1043
1044 static void
fcloop_targetport_delete(struct nvmet_fc_target_port * targetport)1045 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
1046 {
1047 struct fcloop_tport *tport = targetport->private;
1048
1049 flush_work(&tport->ls_work);
1050 fcloop_nport_put(tport->nport);
1051 }
1052
1053 #define FCLOOP_HW_QUEUES 4
1054 #define FCLOOP_SGL_SEGS 256
1055 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
1056
1057 static struct nvme_fc_port_template fctemplate = {
1058 .localport_delete = fcloop_localport_delete,
1059 .remoteport_delete = fcloop_remoteport_delete,
1060 .create_queue = fcloop_create_queue,
1061 .delete_queue = fcloop_delete_queue,
1062 .ls_req = fcloop_h2t_ls_req,
1063 .fcp_io = fcloop_fcp_req,
1064 .ls_abort = fcloop_h2t_ls_abort,
1065 .fcp_abort = fcloop_fcp_abort,
1066 .xmt_ls_rsp = fcloop_t2h_xmt_ls_rsp,
1067 .max_hw_queues = FCLOOP_HW_QUEUES,
1068 .max_sgl_segments = FCLOOP_SGL_SEGS,
1069 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
1070 .dma_boundary = FCLOOP_DMABOUND_4G,
1071 /* sizes of additional private data for data structures */
1072 .local_priv_sz = sizeof(struct fcloop_lport_priv),
1073 .remote_priv_sz = sizeof(struct fcloop_rport),
1074 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
1075 .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
1076 };
1077
1078 static struct nvmet_fc_target_template tgttemplate = {
1079 .targetport_delete = fcloop_targetport_delete,
1080 .xmt_ls_rsp = fcloop_h2t_xmt_ls_rsp,
1081 .fcp_op = fcloop_fcp_op,
1082 .fcp_abort = fcloop_tgt_fcp_abort,
1083 .fcp_req_release = fcloop_fcp_req_release,
1084 .discovery_event = fcloop_tgt_discovery_evt,
1085 .ls_req = fcloop_t2h_ls_req,
1086 .ls_abort = fcloop_t2h_ls_abort,
1087 .host_release = fcloop_t2h_host_release,
1088 .host_traddr = fcloop_t2h_host_traddr,
1089 .max_hw_queues = FCLOOP_HW_QUEUES,
1090 .max_sgl_segments = FCLOOP_SGL_SEGS,
1091 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
1092 .dma_boundary = FCLOOP_DMABOUND_4G,
1093 /* optional features */
1094 .target_features = 0,
1095 /* sizes of additional private data for data structures */
1096 .target_priv_sz = sizeof(struct fcloop_tport),
1097 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
1098 };
1099
1100 static ssize_t
fcloop_create_local_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1101 fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
1102 const char *buf, size_t count)
1103 {
1104 struct nvme_fc_port_info pinfo;
1105 struct fcloop_ctrl_options *opts;
1106 struct nvme_fc_local_port *localport;
1107 struct fcloop_lport *lport;
1108 struct fcloop_lport_priv *lport_priv;
1109 unsigned long flags;
1110 int ret = -ENOMEM;
1111
1112 lport = kzalloc(sizeof(*lport), GFP_KERNEL);
1113 if (!lport)
1114 return -ENOMEM;
1115
1116 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1117 if (!opts)
1118 goto out_free_lport;
1119
1120 ret = fcloop_parse_options(opts, buf);
1121 if (ret)
1122 goto out_free_opts;
1123
1124 /* everything there ? */
1125 if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
1126 ret = -EINVAL;
1127 goto out_free_opts;
1128 }
1129
1130 memset(&pinfo, 0, sizeof(pinfo));
1131 pinfo.node_name = opts->wwnn;
1132 pinfo.port_name = opts->wwpn;
1133 pinfo.port_role = opts->roles;
1134 pinfo.port_id = opts->fcaddr;
1135
1136 ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
1137 if (!ret) {
1138 /* success */
1139 lport_priv = localport->private;
1140 lport_priv->lport = lport;
1141
1142 lport->localport = localport;
1143 INIT_LIST_HEAD(&lport->lport_list);
1144
1145 spin_lock_irqsave(&fcloop_lock, flags);
1146 list_add_tail(&lport->lport_list, &fcloop_lports);
1147 spin_unlock_irqrestore(&fcloop_lock, flags);
1148 }
1149
1150 out_free_opts:
1151 kfree(opts);
1152 out_free_lport:
1153 /* free only if we're going to fail */
1154 if (ret)
1155 kfree(lport);
1156
1157 return ret ? ret : count;
1158 }
1159
1160
1161 static void
__unlink_local_port(struct fcloop_lport * lport)1162 __unlink_local_port(struct fcloop_lport *lport)
1163 {
1164 list_del(&lport->lport_list);
1165 }
1166
1167 static int
__wait_localport_unreg(struct fcloop_lport * lport)1168 __wait_localport_unreg(struct fcloop_lport *lport)
1169 {
1170 int ret;
1171
1172 init_completion(&lport->unreg_done);
1173
1174 ret = nvme_fc_unregister_localport(lport->localport);
1175
1176 if (!ret)
1177 wait_for_completion(&lport->unreg_done);
1178
1179 kfree(lport);
1180
1181 return ret;
1182 }
1183
1184
1185 static ssize_t
fcloop_delete_local_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1186 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
1187 const char *buf, size_t count)
1188 {
1189 struct fcloop_lport *tlport, *lport = NULL;
1190 u64 nodename, portname;
1191 unsigned long flags;
1192 int ret;
1193
1194 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1195 if (ret)
1196 return ret;
1197
1198 spin_lock_irqsave(&fcloop_lock, flags);
1199
1200 list_for_each_entry(tlport, &fcloop_lports, lport_list) {
1201 if (tlport->localport->node_name == nodename &&
1202 tlport->localport->port_name == portname) {
1203 lport = tlport;
1204 __unlink_local_port(lport);
1205 break;
1206 }
1207 }
1208 spin_unlock_irqrestore(&fcloop_lock, flags);
1209
1210 if (!lport)
1211 return -ENOENT;
1212
1213 ret = __wait_localport_unreg(lport);
1214
1215 return ret ? ret : count;
1216 }
1217
1218 static struct fcloop_nport *
fcloop_alloc_nport(const char * buf,size_t count,bool remoteport)1219 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
1220 {
1221 struct fcloop_nport *newnport, *nport = NULL;
1222 struct fcloop_lport *tmplport, *lport = NULL;
1223 struct fcloop_ctrl_options *opts;
1224 unsigned long flags;
1225 u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
1226 int ret;
1227
1228 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1229 if (!opts)
1230 return NULL;
1231
1232 ret = fcloop_parse_options(opts, buf);
1233 if (ret)
1234 goto out_free_opts;
1235
1236 /* everything there ? */
1237 if ((opts->mask & opts_mask) != opts_mask) {
1238 ret = -EINVAL;
1239 goto out_free_opts;
1240 }
1241
1242 newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
1243 if (!newnport)
1244 goto out_free_opts;
1245
1246 INIT_LIST_HEAD(&newnport->nport_list);
1247 newnport->node_name = opts->wwnn;
1248 newnport->port_name = opts->wwpn;
1249 if (opts->mask & NVMF_OPT_ROLES)
1250 newnport->port_role = opts->roles;
1251 if (opts->mask & NVMF_OPT_FCADDR)
1252 newnport->port_id = opts->fcaddr;
1253 kref_init(&newnport->ref);
1254
1255 spin_lock_irqsave(&fcloop_lock, flags);
1256
1257 list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
1258 if (tmplport->localport->node_name == opts->wwnn &&
1259 tmplport->localport->port_name == opts->wwpn)
1260 goto out_invalid_opts;
1261
1262 if (tmplport->localport->node_name == opts->lpwwnn &&
1263 tmplport->localport->port_name == opts->lpwwpn)
1264 lport = tmplport;
1265 }
1266
1267 if (remoteport) {
1268 if (!lport)
1269 goto out_invalid_opts;
1270 newnport->lport = lport;
1271 }
1272
1273 list_for_each_entry(nport, &fcloop_nports, nport_list) {
1274 if (nport->node_name == opts->wwnn &&
1275 nport->port_name == opts->wwpn) {
1276 if ((remoteport && nport->rport) ||
1277 (!remoteport && nport->tport)) {
1278 nport = NULL;
1279 goto out_invalid_opts;
1280 }
1281
1282 fcloop_nport_get(nport);
1283
1284 spin_unlock_irqrestore(&fcloop_lock, flags);
1285
1286 if (remoteport)
1287 nport->lport = lport;
1288 if (opts->mask & NVMF_OPT_ROLES)
1289 nport->port_role = opts->roles;
1290 if (opts->mask & NVMF_OPT_FCADDR)
1291 nport->port_id = opts->fcaddr;
1292 goto out_free_newnport;
1293 }
1294 }
1295
1296 list_add_tail(&newnport->nport_list, &fcloop_nports);
1297
1298 spin_unlock_irqrestore(&fcloop_lock, flags);
1299
1300 kfree(opts);
1301 return newnport;
1302
1303 out_invalid_opts:
1304 spin_unlock_irqrestore(&fcloop_lock, flags);
1305 out_free_newnport:
1306 kfree(newnport);
1307 out_free_opts:
1308 kfree(opts);
1309 return nport;
1310 }
1311
1312 static ssize_t
fcloop_create_remote_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1313 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
1314 const char *buf, size_t count)
1315 {
1316 struct nvme_fc_remote_port *remoteport;
1317 struct fcloop_nport *nport;
1318 struct fcloop_rport *rport;
1319 struct nvme_fc_port_info pinfo;
1320 int ret;
1321
1322 nport = fcloop_alloc_nport(buf, count, true);
1323 if (!nport)
1324 return -EIO;
1325
1326 memset(&pinfo, 0, sizeof(pinfo));
1327 pinfo.node_name = nport->node_name;
1328 pinfo.port_name = nport->port_name;
1329 pinfo.port_role = nport->port_role;
1330 pinfo.port_id = nport->port_id;
1331
1332 ret = nvme_fc_register_remoteport(nport->lport->localport,
1333 &pinfo, &remoteport);
1334 if (ret || !remoteport) {
1335 fcloop_nport_put(nport);
1336 return ret;
1337 }
1338
1339 /* success */
1340 rport = remoteport->private;
1341 rport->remoteport = remoteport;
1342 rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
1343 if (nport->tport) {
1344 nport->tport->remoteport = remoteport;
1345 nport->tport->lport = nport->lport;
1346 }
1347 rport->nport = nport;
1348 rport->lport = nport->lport;
1349 nport->rport = rport;
1350 spin_lock_init(&rport->lock);
1351 INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
1352 INIT_LIST_HEAD(&rport->ls_list);
1353
1354 return count;
1355 }
1356
1357
1358 static struct fcloop_rport *
__unlink_remote_port(struct fcloop_nport * nport)1359 __unlink_remote_port(struct fcloop_nport *nport)
1360 {
1361 struct fcloop_rport *rport = nport->rport;
1362
1363 if (rport && nport->tport)
1364 nport->tport->remoteport = NULL;
1365 nport->rport = NULL;
1366
1367 list_del(&nport->nport_list);
1368
1369 return rport;
1370 }
1371
1372 static int
__remoteport_unreg(struct fcloop_nport * nport,struct fcloop_rport * rport)1373 __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
1374 {
1375 if (!rport)
1376 return -EALREADY;
1377
1378 return nvme_fc_unregister_remoteport(rport->remoteport);
1379 }
1380
1381 static ssize_t
fcloop_delete_remote_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1382 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1383 const char *buf, size_t count)
1384 {
1385 struct fcloop_nport *nport = NULL, *tmpport;
1386 static struct fcloop_rport *rport;
1387 u64 nodename, portname;
1388 unsigned long flags;
1389 int ret;
1390
1391 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1392 if (ret)
1393 return ret;
1394
1395 spin_lock_irqsave(&fcloop_lock, flags);
1396
1397 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1398 if (tmpport->node_name == nodename &&
1399 tmpport->port_name == portname && tmpport->rport) {
1400 nport = tmpport;
1401 rport = __unlink_remote_port(nport);
1402 break;
1403 }
1404 }
1405
1406 spin_unlock_irqrestore(&fcloop_lock, flags);
1407
1408 if (!nport)
1409 return -ENOENT;
1410
1411 ret = __remoteport_unreg(nport, rport);
1412
1413 return ret ? ret : count;
1414 }
1415
1416 static ssize_t
fcloop_create_target_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1417 fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1418 const char *buf, size_t count)
1419 {
1420 struct nvmet_fc_target_port *targetport;
1421 struct fcloop_nport *nport;
1422 struct fcloop_tport *tport;
1423 struct nvmet_fc_port_info tinfo;
1424 int ret;
1425
1426 nport = fcloop_alloc_nport(buf, count, false);
1427 if (!nport)
1428 return -EIO;
1429
1430 tinfo.node_name = nport->node_name;
1431 tinfo.port_name = nport->port_name;
1432 tinfo.port_id = nport->port_id;
1433
1434 ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1435 &targetport);
1436 if (ret) {
1437 fcloop_nport_put(nport);
1438 return ret;
1439 }
1440
1441 /* success */
1442 tport = targetport->private;
1443 tport->targetport = targetport;
1444 tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
1445 if (nport->rport)
1446 nport->rport->targetport = targetport;
1447 tport->nport = nport;
1448 tport->lport = nport->lport;
1449 nport->tport = tport;
1450 spin_lock_init(&tport->lock);
1451 INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work);
1452 INIT_LIST_HEAD(&tport->ls_list);
1453
1454 return count;
1455 }
1456
1457
1458 static struct fcloop_tport *
__unlink_target_port(struct fcloop_nport * nport)1459 __unlink_target_port(struct fcloop_nport *nport)
1460 {
1461 struct fcloop_tport *tport = nport->tport;
1462
1463 if (tport && nport->rport)
1464 nport->rport->targetport = NULL;
1465 nport->tport = NULL;
1466
1467 return tport;
1468 }
1469
1470 static int
__targetport_unreg(struct fcloop_nport * nport,struct fcloop_tport * tport)1471 __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1472 {
1473 if (!tport)
1474 return -EALREADY;
1475
1476 return nvmet_fc_unregister_targetport(tport->targetport);
1477 }
1478
1479 static ssize_t
fcloop_delete_target_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1480 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1481 const char *buf, size_t count)
1482 {
1483 struct fcloop_nport *nport = NULL, *tmpport;
1484 struct fcloop_tport *tport = NULL;
1485 u64 nodename, portname;
1486 unsigned long flags;
1487 int ret;
1488
1489 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1490 if (ret)
1491 return ret;
1492
1493 spin_lock_irqsave(&fcloop_lock, flags);
1494
1495 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1496 if (tmpport->node_name == nodename &&
1497 tmpport->port_name == portname && tmpport->tport) {
1498 nport = tmpport;
1499 tport = __unlink_target_port(nport);
1500 break;
1501 }
1502 }
1503
1504 spin_unlock_irqrestore(&fcloop_lock, flags);
1505
1506 if (!nport)
1507 return -ENOENT;
1508
1509 ret = __targetport_unreg(nport, tport);
1510
1511 return ret ? ret : count;
1512 }
1513
1514 static ssize_t
fcloop_set_cmd_drop(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1515 fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
1516 const char *buf, size_t count)
1517 {
1518 unsigned int opcode;
1519 int starting, amount;
1520
1521 if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
1522 return -EBADRQC;
1523
1524 drop_current_cnt = 0;
1525 drop_fabric_opcode = (opcode & ~DROP_OPCODE_MASK) ? true : false;
1526 drop_opcode = (opcode & DROP_OPCODE_MASK);
1527 drop_instance = starting;
1528 /* the check to drop routine uses instance + count to know when
1529 * to end. Thus, if dropping 1 instance, count should be 0.
1530 * so subtract 1 from the count.
1531 */
1532 drop_amount = amount - 1;
1533
1534 pr_info("%s: DROP: Starting at instance %d of%s opcode x%x drop +%d "
1535 "instances\n",
1536 __func__, drop_instance, drop_fabric_opcode ? " fabric" : "",
1537 drop_opcode, drop_amount);
1538
1539 return count;
1540 }
1541
1542
1543 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1544 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1545 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1546 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1547 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1548 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1549 static DEVICE_ATTR(set_cmd_drop, 0200, NULL, fcloop_set_cmd_drop);
1550
1551 static struct attribute *fcloop_dev_attrs[] = {
1552 &dev_attr_add_local_port.attr,
1553 &dev_attr_del_local_port.attr,
1554 &dev_attr_add_remote_port.attr,
1555 &dev_attr_del_remote_port.attr,
1556 &dev_attr_add_target_port.attr,
1557 &dev_attr_del_target_port.attr,
1558 &dev_attr_set_cmd_drop.attr,
1559 NULL
1560 };
1561
1562 static const struct attribute_group fclopp_dev_attrs_group = {
1563 .attrs = fcloop_dev_attrs,
1564 };
1565
1566 static const struct attribute_group *fcloop_dev_attr_groups[] = {
1567 &fclopp_dev_attrs_group,
1568 NULL,
1569 };
1570
1571 static const struct class fcloop_class = {
1572 .name = "fcloop",
1573 };
1574 static struct device *fcloop_device;
1575
1576
fcloop_init(void)1577 static int __init fcloop_init(void)
1578 {
1579 int ret;
1580
1581 ret = class_register(&fcloop_class);
1582 if (ret) {
1583 pr_err("couldn't register class fcloop\n");
1584 return ret;
1585 }
1586
1587 fcloop_device = device_create_with_groups(
1588 &fcloop_class, NULL, MKDEV(0, 0), NULL,
1589 fcloop_dev_attr_groups, "ctl");
1590 if (IS_ERR(fcloop_device)) {
1591 pr_err("couldn't create ctl device!\n");
1592 ret = PTR_ERR(fcloop_device);
1593 goto out_destroy_class;
1594 }
1595
1596 get_device(fcloop_device);
1597
1598 return 0;
1599
1600 out_destroy_class:
1601 class_unregister(&fcloop_class);
1602 return ret;
1603 }
1604
fcloop_exit(void)1605 static void __exit fcloop_exit(void)
1606 {
1607 struct fcloop_lport *lport = NULL;
1608 struct fcloop_nport *nport = NULL;
1609 struct fcloop_tport *tport;
1610 struct fcloop_rport *rport;
1611 unsigned long flags;
1612 int ret;
1613
1614 spin_lock_irqsave(&fcloop_lock, flags);
1615
1616 for (;;) {
1617 nport = list_first_entry_or_null(&fcloop_nports,
1618 typeof(*nport), nport_list);
1619 if (!nport)
1620 break;
1621
1622 tport = __unlink_target_port(nport);
1623 rport = __unlink_remote_port(nport);
1624
1625 spin_unlock_irqrestore(&fcloop_lock, flags);
1626
1627 ret = __targetport_unreg(nport, tport);
1628 if (ret)
1629 pr_warn("%s: Failed deleting target port\n", __func__);
1630
1631 ret = __remoteport_unreg(nport, rport);
1632 if (ret)
1633 pr_warn("%s: Failed deleting remote port\n", __func__);
1634
1635 spin_lock_irqsave(&fcloop_lock, flags);
1636 }
1637
1638 for (;;) {
1639 lport = list_first_entry_or_null(&fcloop_lports,
1640 typeof(*lport), lport_list);
1641 if (!lport)
1642 break;
1643
1644 __unlink_local_port(lport);
1645
1646 spin_unlock_irqrestore(&fcloop_lock, flags);
1647
1648 ret = __wait_localport_unreg(lport);
1649 if (ret)
1650 pr_warn("%s: Failed deleting local port\n", __func__);
1651
1652 spin_lock_irqsave(&fcloop_lock, flags);
1653 }
1654
1655 spin_unlock_irqrestore(&fcloop_lock, flags);
1656
1657 put_device(fcloop_device);
1658
1659 device_destroy(&fcloop_class, MKDEV(0, 0));
1660 class_unregister(&fcloop_class);
1661 }
1662
1663 module_init(fcloop_init);
1664 module_exit(fcloop_exit);
1665
1666 MODULE_DESCRIPTION("NVMe target FC loop transport driver");
1667 MODULE_LICENSE("GPL v2");
1668