1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
9
10 #include "../host/nvme.h"
11 #include "../target/nvmet.h"
12 #include <linux/nvme-fc-driver.h>
13 #include <linux/nvme-fc.h>
14
15
16 enum {
17 NVMF_OPT_ERR = 0,
18 NVMF_OPT_WWNN = 1 << 0,
19 NVMF_OPT_WWPN = 1 << 1,
20 NVMF_OPT_ROLES = 1 << 2,
21 NVMF_OPT_FCADDR = 1 << 3,
22 NVMF_OPT_LPWWNN = 1 << 4,
23 NVMF_OPT_LPWWPN = 1 << 5,
24 };
25
26 struct fcloop_ctrl_options {
27 int mask;
28 u64 wwnn;
29 u64 wwpn;
30 u32 roles;
31 u32 fcaddr;
32 u64 lpwwnn;
33 u64 lpwwpn;
34 };
35
36 static const match_table_t opt_tokens = {
37 { NVMF_OPT_WWNN, "wwnn=%s" },
38 { NVMF_OPT_WWPN, "wwpn=%s" },
39 { NVMF_OPT_ROLES, "roles=%d" },
40 { NVMF_OPT_FCADDR, "fcaddr=%x" },
41 { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
42 { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
43 { NVMF_OPT_ERR, NULL }
44 };
45
fcloop_verify_addr(substring_t * s)46 static int fcloop_verify_addr(substring_t *s)
47 {
48 size_t blen = s->to - s->from + 1;
49
50 if (strnlen(s->from, blen) != NVME_FC_TRADDR_HEXNAMELEN + 2 ||
51 strncmp(s->from, "0x", 2))
52 return -EINVAL;
53
54 return 0;
55 }
56
57 static int
fcloop_parse_options(struct fcloop_ctrl_options * opts,const char * buf)58 fcloop_parse_options(struct fcloop_ctrl_options *opts,
59 const char *buf)
60 {
61 substring_t args[MAX_OPT_ARGS];
62 char *options, *o, *p;
63 int token, ret = 0;
64 u64 token64;
65
66 options = o = kstrdup(buf, GFP_KERNEL);
67 if (!options)
68 return -ENOMEM;
69
70 while ((p = strsep(&o, ",\n")) != NULL) {
71 if (!*p)
72 continue;
73
74 token = match_token(p, opt_tokens, args);
75 opts->mask |= token;
76 switch (token) {
77 case NVMF_OPT_WWNN:
78 if (fcloop_verify_addr(args) ||
79 match_u64(args, &token64)) {
80 ret = -EINVAL;
81 goto out_free_options;
82 }
83 opts->wwnn = token64;
84 break;
85 case NVMF_OPT_WWPN:
86 if (fcloop_verify_addr(args) ||
87 match_u64(args, &token64)) {
88 ret = -EINVAL;
89 goto out_free_options;
90 }
91 opts->wwpn = token64;
92 break;
93 case NVMF_OPT_ROLES:
94 if (match_int(args, &token)) {
95 ret = -EINVAL;
96 goto out_free_options;
97 }
98 opts->roles = token;
99 break;
100 case NVMF_OPT_FCADDR:
101 if (match_hex(args, &token)) {
102 ret = -EINVAL;
103 goto out_free_options;
104 }
105 opts->fcaddr = token;
106 break;
107 case NVMF_OPT_LPWWNN:
108 if (fcloop_verify_addr(args) ||
109 match_u64(args, &token64)) {
110 ret = -EINVAL;
111 goto out_free_options;
112 }
113 opts->lpwwnn = token64;
114 break;
115 case NVMF_OPT_LPWWPN:
116 if (fcloop_verify_addr(args) ||
117 match_u64(args, &token64)) {
118 ret = -EINVAL;
119 goto out_free_options;
120 }
121 opts->lpwwpn = token64;
122 break;
123 default:
124 pr_warn("unknown parameter or missing value '%s'\n", p);
125 ret = -EINVAL;
126 goto out_free_options;
127 }
128 }
129
130 out_free_options:
131 kfree(options);
132 return ret;
133 }
134
135
136 static int
fcloop_parse_nm_options(struct device * dev,u64 * nname,u64 * pname,const char * buf)137 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
138 const char *buf)
139 {
140 substring_t args[MAX_OPT_ARGS];
141 char *options, *o, *p;
142 int token, ret = 0;
143 u64 token64;
144
145 *nname = -1;
146 *pname = -1;
147
148 options = o = kstrdup(buf, GFP_KERNEL);
149 if (!options)
150 return -ENOMEM;
151
152 while ((p = strsep(&o, ",\n")) != NULL) {
153 if (!*p)
154 continue;
155
156 token = match_token(p, opt_tokens, args);
157 switch (token) {
158 case NVMF_OPT_WWNN:
159 if (fcloop_verify_addr(args) ||
160 match_u64(args, &token64)) {
161 ret = -EINVAL;
162 goto out_free_options;
163 }
164 *nname = token64;
165 break;
166 case NVMF_OPT_WWPN:
167 if (fcloop_verify_addr(args) ||
168 match_u64(args, &token64)) {
169 ret = -EINVAL;
170 goto out_free_options;
171 }
172 *pname = token64;
173 break;
174 default:
175 pr_warn("unknown parameter or missing value '%s'\n", p);
176 ret = -EINVAL;
177 goto out_free_options;
178 }
179 }
180
181 out_free_options:
182 kfree(options);
183
184 if (!ret) {
185 if (*nname == -1)
186 return -EINVAL;
187 if (*pname == -1)
188 return -EINVAL;
189 }
190
191 return ret;
192 }
193
194
195 #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
196
197 #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
198 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
199
200 #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
201
202
203 static DEFINE_SPINLOCK(fcloop_lock);
204 static LIST_HEAD(fcloop_lports);
205 static LIST_HEAD(fcloop_nports);
206
207 struct fcloop_lport {
208 struct nvme_fc_local_port *localport;
209 struct list_head lport_list;
210 struct completion unreg_done;
211 };
212
213 struct fcloop_lport_priv {
214 struct fcloop_lport *lport;
215 };
216
217 struct fcloop_rport {
218 struct nvme_fc_remote_port *remoteport;
219 struct nvmet_fc_target_port *targetport;
220 struct fcloop_nport *nport;
221 struct fcloop_lport *lport;
222 spinlock_t lock;
223 struct list_head ls_list;
224 struct work_struct ls_work;
225 };
226
227 struct fcloop_tport {
228 struct nvmet_fc_target_port *targetport;
229 struct nvme_fc_remote_port *remoteport;
230 struct fcloop_nport *nport;
231 struct fcloop_lport *lport;
232 spinlock_t lock;
233 struct list_head ls_list;
234 struct work_struct ls_work;
235 };
236
237 struct fcloop_nport {
238 struct fcloop_rport *rport;
239 struct fcloop_tport *tport;
240 struct fcloop_lport *lport;
241 struct list_head nport_list;
242 struct kref ref;
243 u64 node_name;
244 u64 port_name;
245 u32 port_role;
246 u32 port_id;
247 };
248
249 struct fcloop_lsreq {
250 struct nvmefc_ls_req *lsreq;
251 struct nvmefc_ls_rsp ls_rsp;
252 int lsdir; /* H2T or T2H */
253 int status;
254 struct list_head ls_list; /* fcloop_rport->ls_list */
255 };
256
257 struct fcloop_rscn {
258 struct fcloop_tport *tport;
259 struct work_struct work;
260 };
261
262 enum {
263 INI_IO_START = 0,
264 INI_IO_ACTIVE = 1,
265 INI_IO_ABORTED = 2,
266 INI_IO_COMPLETED = 3,
267 };
268
269 struct fcloop_fcpreq {
270 struct fcloop_tport *tport;
271 struct nvmefc_fcp_req *fcpreq;
272 spinlock_t reqlock;
273 u16 status;
274 u32 inistate;
275 bool active;
276 bool aborted;
277 struct kref ref;
278 struct work_struct fcp_rcv_work;
279 struct work_struct abort_rcv_work;
280 struct work_struct tio_done_work;
281 struct nvmefc_tgt_fcp_req tgt_fcp_req;
282 };
283
284 struct fcloop_ini_fcpreq {
285 struct nvmefc_fcp_req *fcpreq;
286 struct fcloop_fcpreq *tfcp_req;
287 spinlock_t inilock;
288 };
289
290 static inline struct fcloop_lsreq *
ls_rsp_to_lsreq(struct nvmefc_ls_rsp * lsrsp)291 ls_rsp_to_lsreq(struct nvmefc_ls_rsp *lsrsp)
292 {
293 return container_of(lsrsp, struct fcloop_lsreq, ls_rsp);
294 }
295
296 static inline struct fcloop_fcpreq *
tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req * tgt_fcpreq)297 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
298 {
299 return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
300 }
301
302
303 static int
fcloop_create_queue(struct nvme_fc_local_port * localport,unsigned int qidx,u16 qsize,void ** handle)304 fcloop_create_queue(struct nvme_fc_local_port *localport,
305 unsigned int qidx, u16 qsize,
306 void **handle)
307 {
308 *handle = localport;
309 return 0;
310 }
311
312 static void
fcloop_delete_queue(struct nvme_fc_local_port * localport,unsigned int idx,void * handle)313 fcloop_delete_queue(struct nvme_fc_local_port *localport,
314 unsigned int idx, void *handle)
315 {
316 }
317
318 static void
fcloop_rport_lsrqst_work(struct work_struct * work)319 fcloop_rport_lsrqst_work(struct work_struct *work)
320 {
321 struct fcloop_rport *rport =
322 container_of(work, struct fcloop_rport, ls_work);
323 struct fcloop_lsreq *tls_req;
324
325 spin_lock(&rport->lock);
326 for (;;) {
327 tls_req = list_first_entry_or_null(&rport->ls_list,
328 struct fcloop_lsreq, ls_list);
329 if (!tls_req)
330 break;
331
332 list_del(&tls_req->ls_list);
333 spin_unlock(&rport->lock);
334
335 tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
336 /*
337 * callee may free memory containing tls_req.
338 * do not reference lsreq after this.
339 */
340
341 spin_lock(&rport->lock);
342 }
343 spin_unlock(&rport->lock);
344 }
345
346 static int
fcloop_h2t_ls_req(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,struct nvmefc_ls_req * lsreq)347 fcloop_h2t_ls_req(struct nvme_fc_local_port *localport,
348 struct nvme_fc_remote_port *remoteport,
349 struct nvmefc_ls_req *lsreq)
350 {
351 struct fcloop_lsreq *tls_req = lsreq->private;
352 struct fcloop_rport *rport = remoteport->private;
353 int ret = 0;
354
355 tls_req->lsreq = lsreq;
356 INIT_LIST_HEAD(&tls_req->ls_list);
357
358 if (!rport->targetport) {
359 tls_req->status = -ECONNREFUSED;
360 spin_lock(&rport->lock);
361 list_add_tail(&rport->ls_list, &tls_req->ls_list);
362 spin_unlock(&rport->lock);
363 queue_work(nvmet_wq, &rport->ls_work);
364 return ret;
365 }
366
367 tls_req->status = 0;
368 ret = nvmet_fc_rcv_ls_req(rport->targetport, rport,
369 &tls_req->ls_rsp,
370 lsreq->rqstaddr, lsreq->rqstlen);
371
372 return ret;
373 }
374
375 static int
fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port * targetport,struct nvmefc_ls_rsp * lsrsp)376 fcloop_h2t_xmt_ls_rsp(struct nvmet_fc_target_port *targetport,
377 struct nvmefc_ls_rsp *lsrsp)
378 {
379 struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
380 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
381 struct fcloop_tport *tport = targetport->private;
382 struct nvme_fc_remote_port *remoteport = tport->remoteport;
383 struct fcloop_rport *rport;
384
385 memcpy(lsreq->rspaddr, lsrsp->rspbuf,
386 ((lsreq->rsplen < lsrsp->rsplen) ?
387 lsreq->rsplen : lsrsp->rsplen));
388
389 lsrsp->done(lsrsp);
390
391 if (remoteport) {
392 rport = remoteport->private;
393 spin_lock(&rport->lock);
394 list_add_tail(&rport->ls_list, &tls_req->ls_list);
395 spin_unlock(&rport->lock);
396 queue_work(nvmet_wq, &rport->ls_work);
397 }
398
399 return 0;
400 }
401
402 static void
fcloop_tport_lsrqst_work(struct work_struct * work)403 fcloop_tport_lsrqst_work(struct work_struct *work)
404 {
405 struct fcloop_tport *tport =
406 container_of(work, struct fcloop_tport, ls_work);
407 struct fcloop_lsreq *tls_req;
408
409 spin_lock(&tport->lock);
410 for (;;) {
411 tls_req = list_first_entry_or_null(&tport->ls_list,
412 struct fcloop_lsreq, ls_list);
413 if (!tls_req)
414 break;
415
416 list_del(&tls_req->ls_list);
417 spin_unlock(&tport->lock);
418
419 tls_req->lsreq->done(tls_req->lsreq, tls_req->status);
420 /*
421 * callee may free memory containing tls_req.
422 * do not reference lsreq after this.
423 */
424
425 spin_lock(&tport->lock);
426 }
427 spin_unlock(&tport->lock);
428 }
429
430 static int
fcloop_t2h_ls_req(struct nvmet_fc_target_port * targetport,void * hosthandle,struct nvmefc_ls_req * lsreq)431 fcloop_t2h_ls_req(struct nvmet_fc_target_port *targetport, void *hosthandle,
432 struct nvmefc_ls_req *lsreq)
433 {
434 struct fcloop_lsreq *tls_req = lsreq->private;
435 struct fcloop_tport *tport = targetport->private;
436 int ret = 0;
437
438 /*
439 * hosthandle should be the dst.rport value.
440 * hosthandle ignored as fcloop currently is
441 * 1:1 tgtport vs remoteport
442 */
443 tls_req->lsreq = lsreq;
444 INIT_LIST_HEAD(&tls_req->ls_list);
445
446 if (!tport->remoteport) {
447 tls_req->status = -ECONNREFUSED;
448 spin_lock(&tport->lock);
449 list_add_tail(&tport->ls_list, &tls_req->ls_list);
450 spin_unlock(&tport->lock);
451 queue_work(nvmet_wq, &tport->ls_work);
452 return ret;
453 }
454
455 tls_req->status = 0;
456 ret = nvme_fc_rcv_ls_req(tport->remoteport, &tls_req->ls_rsp,
457 lsreq->rqstaddr, lsreq->rqstlen);
458
459 return ret;
460 }
461
462 static int
fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,struct nvmefc_ls_rsp * lsrsp)463 fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
464 struct nvme_fc_remote_port *remoteport,
465 struct nvmefc_ls_rsp *lsrsp)
466 {
467 struct fcloop_lsreq *tls_req = ls_rsp_to_lsreq(lsrsp);
468 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
469 struct fcloop_rport *rport = remoteport->private;
470 struct nvmet_fc_target_port *targetport = rport->targetport;
471 struct fcloop_tport *tport;
472
473 memcpy(lsreq->rspaddr, lsrsp->rspbuf,
474 ((lsreq->rsplen < lsrsp->rsplen) ?
475 lsreq->rsplen : lsrsp->rsplen));
476 lsrsp->done(lsrsp);
477
478 if (targetport) {
479 tport = targetport->private;
480 spin_lock(&tport->lock);
481 list_add_tail(&tport->ls_list, &tls_req->ls_list);
482 spin_unlock(&tport->lock);
483 queue_work(nvmet_wq, &tport->ls_work);
484 }
485
486 return 0;
487 }
488
489 static void
fcloop_t2h_host_release(void * hosthandle)490 fcloop_t2h_host_release(void *hosthandle)
491 {
492 /* host handle ignored for now */
493 }
494
495 /*
496 * Simulate reception of RSCN and converting it to a initiator transport
497 * call to rescan a remote port.
498 */
499 static void
fcloop_tgt_rscn_work(struct work_struct * work)500 fcloop_tgt_rscn_work(struct work_struct *work)
501 {
502 struct fcloop_rscn *tgt_rscn =
503 container_of(work, struct fcloop_rscn, work);
504 struct fcloop_tport *tport = tgt_rscn->tport;
505
506 if (tport->remoteport)
507 nvme_fc_rescan_remoteport(tport->remoteport);
508 kfree(tgt_rscn);
509 }
510
511 static void
fcloop_tgt_discovery_evt(struct nvmet_fc_target_port * tgtport)512 fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
513 {
514 struct fcloop_rscn *tgt_rscn;
515
516 tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL);
517 if (!tgt_rscn)
518 return;
519
520 tgt_rscn->tport = tgtport->private;
521 INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
522
523 queue_work(nvmet_wq, &tgt_rscn->work);
524 }
525
526 static void
fcloop_tfcp_req_free(struct kref * ref)527 fcloop_tfcp_req_free(struct kref *ref)
528 {
529 struct fcloop_fcpreq *tfcp_req =
530 container_of(ref, struct fcloop_fcpreq, ref);
531
532 kfree(tfcp_req);
533 }
534
535 static void
fcloop_tfcp_req_put(struct fcloop_fcpreq * tfcp_req)536 fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
537 {
538 kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
539 }
540
541 static int
fcloop_tfcp_req_get(struct fcloop_fcpreq * tfcp_req)542 fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
543 {
544 return kref_get_unless_zero(&tfcp_req->ref);
545 }
546
547 static void
fcloop_call_host_done(struct nvmefc_fcp_req * fcpreq,struct fcloop_fcpreq * tfcp_req,int status)548 fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
549 struct fcloop_fcpreq *tfcp_req, int status)
550 {
551 struct fcloop_ini_fcpreq *inireq = NULL;
552
553 if (fcpreq) {
554 inireq = fcpreq->private;
555 spin_lock(&inireq->inilock);
556 inireq->tfcp_req = NULL;
557 spin_unlock(&inireq->inilock);
558
559 fcpreq->status = status;
560 fcpreq->done(fcpreq);
561 }
562
563 /* release original io reference on tgt struct */
564 fcloop_tfcp_req_put(tfcp_req);
565 }
566
567 static bool drop_fabric_opcode;
568 #define DROP_OPCODE_MASK 0x00FF
569 /* fabrics opcode will have a bit set above 1st byte */
570 static int drop_opcode = -1;
571 static int drop_instance;
572 static int drop_amount;
573 static int drop_current_cnt;
574
575 /*
576 * Routine to parse io and determine if the io is to be dropped.
577 * Returns:
578 * 0 if io is not obstructed
579 * 1 if io was dropped
580 */
check_for_drop(struct fcloop_fcpreq * tfcp_req)581 static int check_for_drop(struct fcloop_fcpreq *tfcp_req)
582 {
583 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
584 struct nvme_fc_cmd_iu *cmdiu = fcpreq->cmdaddr;
585 struct nvme_command *sqe = &cmdiu->sqe;
586
587 if (drop_opcode == -1)
588 return 0;
589
590 pr_info("%s: seq opcd x%02x fctype x%02x: drop F %s op x%02x "
591 "inst %d start %d amt %d\n",
592 __func__, sqe->common.opcode, sqe->fabrics.fctype,
593 drop_fabric_opcode ? "y" : "n",
594 drop_opcode, drop_current_cnt, drop_instance, drop_amount);
595
596 if ((drop_fabric_opcode &&
597 (sqe->common.opcode != nvme_fabrics_command ||
598 sqe->fabrics.fctype != drop_opcode)) ||
599 (!drop_fabric_opcode && sqe->common.opcode != drop_opcode))
600 return 0;
601
602 if (++drop_current_cnt >= drop_instance) {
603 if (drop_current_cnt >= drop_instance + drop_amount)
604 drop_opcode = -1;
605 return 1;
606 }
607
608 return 0;
609 }
610
611 static void
fcloop_fcp_recv_work(struct work_struct * work)612 fcloop_fcp_recv_work(struct work_struct *work)
613 {
614 struct fcloop_fcpreq *tfcp_req =
615 container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
616 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
617 unsigned long flags;
618 int ret = 0;
619 bool aborted = false;
620
621 spin_lock_irqsave(&tfcp_req->reqlock, flags);
622 switch (tfcp_req->inistate) {
623 case INI_IO_START:
624 tfcp_req->inistate = INI_IO_ACTIVE;
625 break;
626 case INI_IO_ABORTED:
627 aborted = true;
628 break;
629 default:
630 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
631 WARN_ON(1);
632 return;
633 }
634 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
635
636 if (unlikely(aborted))
637 ret = -ECANCELED;
638 else {
639 if (likely(!check_for_drop(tfcp_req)))
640 ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
641 &tfcp_req->tgt_fcp_req,
642 fcpreq->cmdaddr, fcpreq->cmdlen);
643 else
644 pr_info("%s: dropped command ********\n", __func__);
645 }
646 if (ret)
647 fcloop_call_host_done(fcpreq, tfcp_req, ret);
648
649 return;
650 }
651
652 static void
fcloop_fcp_abort_recv_work(struct work_struct * work)653 fcloop_fcp_abort_recv_work(struct work_struct *work)
654 {
655 struct fcloop_fcpreq *tfcp_req =
656 container_of(work, struct fcloop_fcpreq, abort_rcv_work);
657 struct nvmefc_fcp_req *fcpreq;
658 bool completed = false;
659 unsigned long flags;
660
661 spin_lock_irqsave(&tfcp_req->reqlock, flags);
662 fcpreq = tfcp_req->fcpreq;
663 switch (tfcp_req->inistate) {
664 case INI_IO_ABORTED:
665 break;
666 case INI_IO_COMPLETED:
667 completed = true;
668 break;
669 default:
670 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
671 WARN_ON(1);
672 return;
673 }
674 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
675
676 if (unlikely(completed)) {
677 /* remove reference taken in original abort downcall */
678 fcloop_tfcp_req_put(tfcp_req);
679 return;
680 }
681
682 if (tfcp_req->tport->targetport)
683 nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
684 &tfcp_req->tgt_fcp_req);
685
686 spin_lock_irqsave(&tfcp_req->reqlock, flags);
687 tfcp_req->fcpreq = NULL;
688 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
689
690 fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
691 /* call_host_done releases reference for abort downcall */
692 }
693
694 /*
695 * FCP IO operation done by target completion.
696 * call back up initiator "done" flows.
697 */
698 static void
fcloop_tgt_fcprqst_done_work(struct work_struct * work)699 fcloop_tgt_fcprqst_done_work(struct work_struct *work)
700 {
701 struct fcloop_fcpreq *tfcp_req =
702 container_of(work, struct fcloop_fcpreq, tio_done_work);
703 struct nvmefc_fcp_req *fcpreq;
704 unsigned long flags;
705
706 spin_lock_irqsave(&tfcp_req->reqlock, flags);
707 fcpreq = tfcp_req->fcpreq;
708 tfcp_req->inistate = INI_IO_COMPLETED;
709 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
710
711 fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
712 }
713
714
715 static int
fcloop_fcp_req(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,void * hw_queue_handle,struct nvmefc_fcp_req * fcpreq)716 fcloop_fcp_req(struct nvme_fc_local_port *localport,
717 struct nvme_fc_remote_port *remoteport,
718 void *hw_queue_handle,
719 struct nvmefc_fcp_req *fcpreq)
720 {
721 struct fcloop_rport *rport = remoteport->private;
722 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
723 struct fcloop_fcpreq *tfcp_req;
724
725 if (!rport->targetport)
726 return -ECONNREFUSED;
727
728 tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC);
729 if (!tfcp_req)
730 return -ENOMEM;
731
732 inireq->fcpreq = fcpreq;
733 inireq->tfcp_req = tfcp_req;
734 spin_lock_init(&inireq->inilock);
735
736 tfcp_req->fcpreq = fcpreq;
737 tfcp_req->tport = rport->targetport->private;
738 tfcp_req->inistate = INI_IO_START;
739 spin_lock_init(&tfcp_req->reqlock);
740 INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
741 INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
742 INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
743 kref_init(&tfcp_req->ref);
744
745 queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work);
746
747 return 0;
748 }
749
750 static void
fcloop_fcp_copy_data(u8 op,struct scatterlist * data_sg,struct scatterlist * io_sg,u32 offset,u32 length)751 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
752 struct scatterlist *io_sg, u32 offset, u32 length)
753 {
754 void *data_p, *io_p;
755 u32 data_len, io_len, tlen;
756
757 io_p = sg_virt(io_sg);
758 io_len = io_sg->length;
759
760 for ( ; offset; ) {
761 tlen = min_t(u32, offset, io_len);
762 offset -= tlen;
763 io_len -= tlen;
764 if (!io_len) {
765 io_sg = sg_next(io_sg);
766 io_p = sg_virt(io_sg);
767 io_len = io_sg->length;
768 } else
769 io_p += tlen;
770 }
771
772 data_p = sg_virt(data_sg);
773 data_len = data_sg->length;
774
775 for ( ; length; ) {
776 tlen = min_t(u32, io_len, data_len);
777 tlen = min_t(u32, tlen, length);
778
779 if (op == NVMET_FCOP_WRITEDATA)
780 memcpy(data_p, io_p, tlen);
781 else
782 memcpy(io_p, data_p, tlen);
783
784 length -= tlen;
785
786 io_len -= tlen;
787 if ((!io_len) && (length)) {
788 io_sg = sg_next(io_sg);
789 io_p = sg_virt(io_sg);
790 io_len = io_sg->length;
791 } else
792 io_p += tlen;
793
794 data_len -= tlen;
795 if ((!data_len) && (length)) {
796 data_sg = sg_next(data_sg);
797 data_p = sg_virt(data_sg);
798 data_len = data_sg->length;
799 } else
800 data_p += tlen;
801 }
802 }
803
804 static int
fcloop_fcp_op(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)805 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
806 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
807 {
808 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
809 struct nvmefc_fcp_req *fcpreq;
810 u32 rsplen = 0, xfrlen = 0;
811 int fcp_err = 0, active, aborted;
812 u8 op = tgt_fcpreq->op;
813 unsigned long flags;
814
815 spin_lock_irqsave(&tfcp_req->reqlock, flags);
816 fcpreq = tfcp_req->fcpreq;
817 active = tfcp_req->active;
818 aborted = tfcp_req->aborted;
819 tfcp_req->active = true;
820 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
821
822 if (unlikely(active))
823 /* illegal - call while i/o active */
824 return -EALREADY;
825
826 if (unlikely(aborted)) {
827 /* target transport has aborted i/o prior */
828 spin_lock_irqsave(&tfcp_req->reqlock, flags);
829 tfcp_req->active = false;
830 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
831 tgt_fcpreq->transferred_length = 0;
832 tgt_fcpreq->fcp_error = -ECANCELED;
833 tgt_fcpreq->done(tgt_fcpreq);
834 return 0;
835 }
836
837 /*
838 * if fcpreq is NULL, the I/O has been aborted (from
839 * initiator side). For the target side, act as if all is well
840 * but don't actually move data.
841 */
842
843 switch (op) {
844 case NVMET_FCOP_WRITEDATA:
845 xfrlen = tgt_fcpreq->transfer_length;
846 if (fcpreq) {
847 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
848 fcpreq->first_sgl, tgt_fcpreq->offset,
849 xfrlen);
850 fcpreq->transferred_length += xfrlen;
851 }
852 break;
853
854 case NVMET_FCOP_READDATA:
855 case NVMET_FCOP_READDATA_RSP:
856 xfrlen = tgt_fcpreq->transfer_length;
857 if (fcpreq) {
858 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
859 fcpreq->first_sgl, tgt_fcpreq->offset,
860 xfrlen);
861 fcpreq->transferred_length += xfrlen;
862 }
863 if (op == NVMET_FCOP_READDATA)
864 break;
865
866 /* Fall-Thru to RSP handling */
867 fallthrough;
868
869 case NVMET_FCOP_RSP:
870 if (fcpreq) {
871 rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
872 fcpreq->rsplen : tgt_fcpreq->rsplen);
873 memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
874 if (rsplen < tgt_fcpreq->rsplen)
875 fcp_err = -E2BIG;
876 fcpreq->rcv_rsplen = rsplen;
877 fcpreq->status = 0;
878 }
879 tfcp_req->status = 0;
880 break;
881
882 default:
883 fcp_err = -EINVAL;
884 break;
885 }
886
887 spin_lock_irqsave(&tfcp_req->reqlock, flags);
888 tfcp_req->active = false;
889 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
890
891 tgt_fcpreq->transferred_length = xfrlen;
892 tgt_fcpreq->fcp_error = fcp_err;
893 tgt_fcpreq->done(tgt_fcpreq);
894
895 return 0;
896 }
897
898 static void
fcloop_tgt_fcp_abort(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)899 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
900 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
901 {
902 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
903 unsigned long flags;
904
905 /*
906 * mark aborted only in case there were 2 threads in transport
907 * (one doing io, other doing abort) and only kills ops posted
908 * after the abort request
909 */
910 spin_lock_irqsave(&tfcp_req->reqlock, flags);
911 tfcp_req->aborted = true;
912 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
913
914 tfcp_req->status = NVME_SC_INTERNAL;
915
916 /*
917 * nothing more to do. If io wasn't active, the transport should
918 * immediately call the req_release. If it was active, the op
919 * will complete, and the lldd should call req_release.
920 */
921 }
922
923 static void
fcloop_fcp_req_release(struct nvmet_fc_target_port * tgtport,struct nvmefc_tgt_fcp_req * tgt_fcpreq)924 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
925 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
926 {
927 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
928
929 queue_work(nvmet_wq, &tfcp_req->tio_done_work);
930 }
931
932 static void
fcloop_h2t_ls_abort(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,struct nvmefc_ls_req * lsreq)933 fcloop_h2t_ls_abort(struct nvme_fc_local_port *localport,
934 struct nvme_fc_remote_port *remoteport,
935 struct nvmefc_ls_req *lsreq)
936 {
937 }
938
939 static void
fcloop_t2h_ls_abort(struct nvmet_fc_target_port * targetport,void * hosthandle,struct nvmefc_ls_req * lsreq)940 fcloop_t2h_ls_abort(struct nvmet_fc_target_port *targetport,
941 void *hosthandle, struct nvmefc_ls_req *lsreq)
942 {
943 }
944
945 static void
fcloop_fcp_abort(struct nvme_fc_local_port * localport,struct nvme_fc_remote_port * remoteport,void * hw_queue_handle,struct nvmefc_fcp_req * fcpreq)946 fcloop_fcp_abort(struct nvme_fc_local_port *localport,
947 struct nvme_fc_remote_port *remoteport,
948 void *hw_queue_handle,
949 struct nvmefc_fcp_req *fcpreq)
950 {
951 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
952 struct fcloop_fcpreq *tfcp_req;
953 bool abortio = true;
954 unsigned long flags;
955
956 spin_lock(&inireq->inilock);
957 tfcp_req = inireq->tfcp_req;
958 if (tfcp_req)
959 fcloop_tfcp_req_get(tfcp_req);
960 spin_unlock(&inireq->inilock);
961
962 if (!tfcp_req)
963 /* abort has already been called */
964 return;
965
966 /* break initiator/target relationship for io */
967 spin_lock_irqsave(&tfcp_req->reqlock, flags);
968 switch (tfcp_req->inistate) {
969 case INI_IO_START:
970 case INI_IO_ACTIVE:
971 tfcp_req->inistate = INI_IO_ABORTED;
972 break;
973 case INI_IO_COMPLETED:
974 abortio = false;
975 break;
976 default:
977 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
978 WARN_ON(1);
979 return;
980 }
981 spin_unlock_irqrestore(&tfcp_req->reqlock, flags);
982
983 if (abortio)
984 /* leave the reference while the work item is scheduled */
985 WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work));
986 else {
987 /*
988 * as the io has already had the done callback made,
989 * nothing more to do. So release the reference taken above
990 */
991 fcloop_tfcp_req_put(tfcp_req);
992 }
993 }
994
995 static void
fcloop_nport_free(struct kref * ref)996 fcloop_nport_free(struct kref *ref)
997 {
998 struct fcloop_nport *nport =
999 container_of(ref, struct fcloop_nport, ref);
1000 unsigned long flags;
1001
1002 spin_lock_irqsave(&fcloop_lock, flags);
1003 list_del(&nport->nport_list);
1004 spin_unlock_irqrestore(&fcloop_lock, flags);
1005
1006 kfree(nport);
1007 }
1008
1009 static void
fcloop_nport_put(struct fcloop_nport * nport)1010 fcloop_nport_put(struct fcloop_nport *nport)
1011 {
1012 kref_put(&nport->ref, fcloop_nport_free);
1013 }
1014
1015 static int
fcloop_nport_get(struct fcloop_nport * nport)1016 fcloop_nport_get(struct fcloop_nport *nport)
1017 {
1018 return kref_get_unless_zero(&nport->ref);
1019 }
1020
1021 static void
fcloop_localport_delete(struct nvme_fc_local_port * localport)1022 fcloop_localport_delete(struct nvme_fc_local_port *localport)
1023 {
1024 struct fcloop_lport_priv *lport_priv = localport->private;
1025 struct fcloop_lport *lport = lport_priv->lport;
1026
1027 /* release any threads waiting for the unreg to complete */
1028 complete(&lport->unreg_done);
1029 }
1030
1031 static void
fcloop_remoteport_delete(struct nvme_fc_remote_port * remoteport)1032 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
1033 {
1034 struct fcloop_rport *rport = remoteport->private;
1035
1036 flush_work(&rport->ls_work);
1037 fcloop_nport_put(rport->nport);
1038 }
1039
1040 static void
fcloop_targetport_delete(struct nvmet_fc_target_port * targetport)1041 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
1042 {
1043 struct fcloop_tport *tport = targetport->private;
1044
1045 flush_work(&tport->ls_work);
1046 fcloop_nport_put(tport->nport);
1047 }
1048
1049 #define FCLOOP_HW_QUEUES 4
1050 #define FCLOOP_SGL_SEGS 256
1051 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
1052
1053 static struct nvme_fc_port_template fctemplate = {
1054 .localport_delete = fcloop_localport_delete,
1055 .remoteport_delete = fcloop_remoteport_delete,
1056 .create_queue = fcloop_create_queue,
1057 .delete_queue = fcloop_delete_queue,
1058 .ls_req = fcloop_h2t_ls_req,
1059 .fcp_io = fcloop_fcp_req,
1060 .ls_abort = fcloop_h2t_ls_abort,
1061 .fcp_abort = fcloop_fcp_abort,
1062 .xmt_ls_rsp = fcloop_t2h_xmt_ls_rsp,
1063 .max_hw_queues = FCLOOP_HW_QUEUES,
1064 .max_sgl_segments = FCLOOP_SGL_SEGS,
1065 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
1066 .dma_boundary = FCLOOP_DMABOUND_4G,
1067 /* sizes of additional private data for data structures */
1068 .local_priv_sz = sizeof(struct fcloop_lport_priv),
1069 .remote_priv_sz = sizeof(struct fcloop_rport),
1070 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
1071 .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
1072 };
1073
1074 static struct nvmet_fc_target_template tgttemplate = {
1075 .targetport_delete = fcloop_targetport_delete,
1076 .xmt_ls_rsp = fcloop_h2t_xmt_ls_rsp,
1077 .fcp_op = fcloop_fcp_op,
1078 .fcp_abort = fcloop_tgt_fcp_abort,
1079 .fcp_req_release = fcloop_fcp_req_release,
1080 .discovery_event = fcloop_tgt_discovery_evt,
1081 .ls_req = fcloop_t2h_ls_req,
1082 .ls_abort = fcloop_t2h_ls_abort,
1083 .host_release = fcloop_t2h_host_release,
1084 .max_hw_queues = FCLOOP_HW_QUEUES,
1085 .max_sgl_segments = FCLOOP_SGL_SEGS,
1086 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
1087 .dma_boundary = FCLOOP_DMABOUND_4G,
1088 /* optional features */
1089 .target_features = 0,
1090 /* sizes of additional private data for data structures */
1091 .target_priv_sz = sizeof(struct fcloop_tport),
1092 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
1093 };
1094
1095 static ssize_t
fcloop_create_local_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1096 fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
1097 const char *buf, size_t count)
1098 {
1099 struct nvme_fc_port_info pinfo;
1100 struct fcloop_ctrl_options *opts;
1101 struct nvme_fc_local_port *localport;
1102 struct fcloop_lport *lport;
1103 struct fcloop_lport_priv *lport_priv;
1104 unsigned long flags;
1105 int ret = -ENOMEM;
1106
1107 lport = kzalloc(sizeof(*lport), GFP_KERNEL);
1108 if (!lport)
1109 return -ENOMEM;
1110
1111 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1112 if (!opts)
1113 goto out_free_lport;
1114
1115 ret = fcloop_parse_options(opts, buf);
1116 if (ret)
1117 goto out_free_opts;
1118
1119 /* everything there ? */
1120 if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
1121 ret = -EINVAL;
1122 goto out_free_opts;
1123 }
1124
1125 memset(&pinfo, 0, sizeof(pinfo));
1126 pinfo.node_name = opts->wwnn;
1127 pinfo.port_name = opts->wwpn;
1128 pinfo.port_role = opts->roles;
1129 pinfo.port_id = opts->fcaddr;
1130
1131 ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
1132 if (!ret) {
1133 /* success */
1134 lport_priv = localport->private;
1135 lport_priv->lport = lport;
1136
1137 lport->localport = localport;
1138 INIT_LIST_HEAD(&lport->lport_list);
1139
1140 spin_lock_irqsave(&fcloop_lock, flags);
1141 list_add_tail(&lport->lport_list, &fcloop_lports);
1142 spin_unlock_irqrestore(&fcloop_lock, flags);
1143 }
1144
1145 out_free_opts:
1146 kfree(opts);
1147 out_free_lport:
1148 /* free only if we're going to fail */
1149 if (ret)
1150 kfree(lport);
1151
1152 return ret ? ret : count;
1153 }
1154
1155
1156 static void
__unlink_local_port(struct fcloop_lport * lport)1157 __unlink_local_port(struct fcloop_lport *lport)
1158 {
1159 list_del(&lport->lport_list);
1160 }
1161
1162 static int
__wait_localport_unreg(struct fcloop_lport * lport)1163 __wait_localport_unreg(struct fcloop_lport *lport)
1164 {
1165 int ret;
1166
1167 init_completion(&lport->unreg_done);
1168
1169 ret = nvme_fc_unregister_localport(lport->localport);
1170
1171 wait_for_completion(&lport->unreg_done);
1172
1173 kfree(lport);
1174
1175 return ret;
1176 }
1177
1178
1179 static ssize_t
fcloop_delete_local_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1180 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
1181 const char *buf, size_t count)
1182 {
1183 struct fcloop_lport *tlport, *lport = NULL;
1184 u64 nodename, portname;
1185 unsigned long flags;
1186 int ret;
1187
1188 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1189 if (ret)
1190 return ret;
1191
1192 spin_lock_irqsave(&fcloop_lock, flags);
1193
1194 list_for_each_entry(tlport, &fcloop_lports, lport_list) {
1195 if (tlport->localport->node_name == nodename &&
1196 tlport->localport->port_name == portname) {
1197 lport = tlport;
1198 __unlink_local_port(lport);
1199 break;
1200 }
1201 }
1202 spin_unlock_irqrestore(&fcloop_lock, flags);
1203
1204 if (!lport)
1205 return -ENOENT;
1206
1207 ret = __wait_localport_unreg(lport);
1208
1209 return ret ? ret : count;
1210 }
1211
1212 static struct fcloop_nport *
fcloop_alloc_nport(const char * buf,size_t count,bool remoteport)1213 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
1214 {
1215 struct fcloop_nport *newnport, *nport = NULL;
1216 struct fcloop_lport *tmplport, *lport = NULL;
1217 struct fcloop_ctrl_options *opts;
1218 unsigned long flags;
1219 u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
1220 int ret;
1221
1222 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1223 if (!opts)
1224 return NULL;
1225
1226 ret = fcloop_parse_options(opts, buf);
1227 if (ret)
1228 goto out_free_opts;
1229
1230 /* everything there ? */
1231 if ((opts->mask & opts_mask) != opts_mask) {
1232 ret = -EINVAL;
1233 goto out_free_opts;
1234 }
1235
1236 newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
1237 if (!newnport)
1238 goto out_free_opts;
1239
1240 INIT_LIST_HEAD(&newnport->nport_list);
1241 newnport->node_name = opts->wwnn;
1242 newnport->port_name = opts->wwpn;
1243 if (opts->mask & NVMF_OPT_ROLES)
1244 newnport->port_role = opts->roles;
1245 if (opts->mask & NVMF_OPT_FCADDR)
1246 newnport->port_id = opts->fcaddr;
1247 kref_init(&newnport->ref);
1248
1249 spin_lock_irqsave(&fcloop_lock, flags);
1250
1251 list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
1252 if (tmplport->localport->node_name == opts->wwnn &&
1253 tmplport->localport->port_name == opts->wwpn)
1254 goto out_invalid_opts;
1255
1256 if (tmplport->localport->node_name == opts->lpwwnn &&
1257 tmplport->localport->port_name == opts->lpwwpn)
1258 lport = tmplport;
1259 }
1260
1261 if (remoteport) {
1262 if (!lport)
1263 goto out_invalid_opts;
1264 newnport->lport = lport;
1265 }
1266
1267 list_for_each_entry(nport, &fcloop_nports, nport_list) {
1268 if (nport->node_name == opts->wwnn &&
1269 nport->port_name == opts->wwpn) {
1270 if ((remoteport && nport->rport) ||
1271 (!remoteport && nport->tport)) {
1272 nport = NULL;
1273 goto out_invalid_opts;
1274 }
1275
1276 fcloop_nport_get(nport);
1277
1278 spin_unlock_irqrestore(&fcloop_lock, flags);
1279
1280 if (remoteport)
1281 nport->lport = lport;
1282 if (opts->mask & NVMF_OPT_ROLES)
1283 nport->port_role = opts->roles;
1284 if (opts->mask & NVMF_OPT_FCADDR)
1285 nport->port_id = opts->fcaddr;
1286 goto out_free_newnport;
1287 }
1288 }
1289
1290 list_add_tail(&newnport->nport_list, &fcloop_nports);
1291
1292 spin_unlock_irqrestore(&fcloop_lock, flags);
1293
1294 kfree(opts);
1295 return newnport;
1296
1297 out_invalid_opts:
1298 spin_unlock_irqrestore(&fcloop_lock, flags);
1299 out_free_newnport:
1300 kfree(newnport);
1301 out_free_opts:
1302 kfree(opts);
1303 return nport;
1304 }
1305
1306 static ssize_t
fcloop_create_remote_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1307 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
1308 const char *buf, size_t count)
1309 {
1310 struct nvme_fc_remote_port *remoteport;
1311 struct fcloop_nport *nport;
1312 struct fcloop_rport *rport;
1313 struct nvme_fc_port_info pinfo;
1314 int ret;
1315
1316 nport = fcloop_alloc_nport(buf, count, true);
1317 if (!nport)
1318 return -EIO;
1319
1320 memset(&pinfo, 0, sizeof(pinfo));
1321 pinfo.node_name = nport->node_name;
1322 pinfo.port_name = nport->port_name;
1323 pinfo.port_role = nport->port_role;
1324 pinfo.port_id = nport->port_id;
1325
1326 ret = nvme_fc_register_remoteport(nport->lport->localport,
1327 &pinfo, &remoteport);
1328 if (ret || !remoteport) {
1329 fcloop_nport_put(nport);
1330 return ret;
1331 }
1332
1333 /* success */
1334 rport = remoteport->private;
1335 rport->remoteport = remoteport;
1336 rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
1337 if (nport->tport) {
1338 nport->tport->remoteport = remoteport;
1339 nport->tport->lport = nport->lport;
1340 }
1341 rport->nport = nport;
1342 rport->lport = nport->lport;
1343 nport->rport = rport;
1344 spin_lock_init(&rport->lock);
1345 INIT_WORK(&rport->ls_work, fcloop_rport_lsrqst_work);
1346 INIT_LIST_HEAD(&rport->ls_list);
1347
1348 return count;
1349 }
1350
1351
1352 static struct fcloop_rport *
__unlink_remote_port(struct fcloop_nport * nport)1353 __unlink_remote_port(struct fcloop_nport *nport)
1354 {
1355 struct fcloop_rport *rport = nport->rport;
1356
1357 if (rport && nport->tport)
1358 nport->tport->remoteport = NULL;
1359 nport->rport = NULL;
1360
1361 return rport;
1362 }
1363
1364 static int
__remoteport_unreg(struct fcloop_nport * nport,struct fcloop_rport * rport)1365 __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
1366 {
1367 if (!rport)
1368 return -EALREADY;
1369
1370 return nvme_fc_unregister_remoteport(rport->remoteport);
1371 }
1372
1373 static ssize_t
fcloop_delete_remote_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1374 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1375 const char *buf, size_t count)
1376 {
1377 struct fcloop_nport *nport = NULL, *tmpport;
1378 static struct fcloop_rport *rport;
1379 u64 nodename, portname;
1380 unsigned long flags;
1381 int ret;
1382
1383 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1384 if (ret)
1385 return ret;
1386
1387 spin_lock_irqsave(&fcloop_lock, flags);
1388
1389 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1390 if (tmpport->node_name == nodename &&
1391 tmpport->port_name == portname && tmpport->rport) {
1392 nport = tmpport;
1393 rport = __unlink_remote_port(nport);
1394 break;
1395 }
1396 }
1397
1398 spin_unlock_irqrestore(&fcloop_lock, flags);
1399
1400 if (!nport)
1401 return -ENOENT;
1402
1403 ret = __remoteport_unreg(nport, rport);
1404
1405 return ret ? ret : count;
1406 }
1407
1408 static ssize_t
fcloop_create_target_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1409 fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1410 const char *buf, size_t count)
1411 {
1412 struct nvmet_fc_target_port *targetport;
1413 struct fcloop_nport *nport;
1414 struct fcloop_tport *tport;
1415 struct nvmet_fc_port_info tinfo;
1416 int ret;
1417
1418 nport = fcloop_alloc_nport(buf, count, false);
1419 if (!nport)
1420 return -EIO;
1421
1422 tinfo.node_name = nport->node_name;
1423 tinfo.port_name = nport->port_name;
1424 tinfo.port_id = nport->port_id;
1425
1426 ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1427 &targetport);
1428 if (ret) {
1429 fcloop_nport_put(nport);
1430 return ret;
1431 }
1432
1433 /* success */
1434 tport = targetport->private;
1435 tport->targetport = targetport;
1436 tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
1437 if (nport->rport)
1438 nport->rport->targetport = targetport;
1439 tport->nport = nport;
1440 tport->lport = nport->lport;
1441 nport->tport = tport;
1442 spin_lock_init(&tport->lock);
1443 INIT_WORK(&tport->ls_work, fcloop_tport_lsrqst_work);
1444 INIT_LIST_HEAD(&tport->ls_list);
1445
1446 return count;
1447 }
1448
1449
1450 static struct fcloop_tport *
__unlink_target_port(struct fcloop_nport * nport)1451 __unlink_target_port(struct fcloop_nport *nport)
1452 {
1453 struct fcloop_tport *tport = nport->tport;
1454
1455 if (tport && nport->rport)
1456 nport->rport->targetport = NULL;
1457 nport->tport = NULL;
1458
1459 return tport;
1460 }
1461
1462 static int
__targetport_unreg(struct fcloop_nport * nport,struct fcloop_tport * tport)1463 __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1464 {
1465 if (!tport)
1466 return -EALREADY;
1467
1468 return nvmet_fc_unregister_targetport(tport->targetport);
1469 }
1470
1471 static ssize_t
fcloop_delete_target_port(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1472 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1473 const char *buf, size_t count)
1474 {
1475 struct fcloop_nport *nport = NULL, *tmpport;
1476 struct fcloop_tport *tport = NULL;
1477 u64 nodename, portname;
1478 unsigned long flags;
1479 int ret;
1480
1481 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1482 if (ret)
1483 return ret;
1484
1485 spin_lock_irqsave(&fcloop_lock, flags);
1486
1487 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1488 if (tmpport->node_name == nodename &&
1489 tmpport->port_name == portname && tmpport->tport) {
1490 nport = tmpport;
1491 tport = __unlink_target_port(nport);
1492 break;
1493 }
1494 }
1495
1496 spin_unlock_irqrestore(&fcloop_lock, flags);
1497
1498 if (!nport)
1499 return -ENOENT;
1500
1501 ret = __targetport_unreg(nport, tport);
1502
1503 return ret ? ret : count;
1504 }
1505
1506 static ssize_t
fcloop_set_cmd_drop(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1507 fcloop_set_cmd_drop(struct device *dev, struct device_attribute *attr,
1508 const char *buf, size_t count)
1509 {
1510 unsigned int opcode;
1511 int starting, amount;
1512
1513 if (sscanf(buf, "%x:%d:%d", &opcode, &starting, &amount) != 3)
1514 return -EBADRQC;
1515
1516 drop_current_cnt = 0;
1517 drop_fabric_opcode = (opcode & ~DROP_OPCODE_MASK) ? true : false;
1518 drop_opcode = (opcode & DROP_OPCODE_MASK);
1519 drop_instance = starting;
1520 /* the check to drop routine uses instance + count to know when
1521 * to end. Thus, if dropping 1 instance, count should be 0.
1522 * so subtract 1 from the count.
1523 */
1524 drop_amount = amount - 1;
1525
1526 pr_info("%s: DROP: Starting at instance %d of%s opcode x%x drop +%d "
1527 "instances\n",
1528 __func__, drop_instance, drop_fabric_opcode ? " fabric" : "",
1529 drop_opcode, drop_amount);
1530
1531 return count;
1532 }
1533
1534
1535 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1536 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1537 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1538 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1539 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1540 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1541 static DEVICE_ATTR(set_cmd_drop, 0200, NULL, fcloop_set_cmd_drop);
1542
1543 static struct attribute *fcloop_dev_attrs[] = {
1544 &dev_attr_add_local_port.attr,
1545 &dev_attr_del_local_port.attr,
1546 &dev_attr_add_remote_port.attr,
1547 &dev_attr_del_remote_port.attr,
1548 &dev_attr_add_target_port.attr,
1549 &dev_attr_del_target_port.attr,
1550 &dev_attr_set_cmd_drop.attr,
1551 NULL
1552 };
1553
1554 static const struct attribute_group fclopp_dev_attrs_group = {
1555 .attrs = fcloop_dev_attrs,
1556 };
1557
1558 static const struct attribute_group *fcloop_dev_attr_groups[] = {
1559 &fclopp_dev_attrs_group,
1560 NULL,
1561 };
1562
1563 static struct class *fcloop_class;
1564 static struct device *fcloop_device;
1565
1566
fcloop_init(void)1567 static int __init fcloop_init(void)
1568 {
1569 int ret;
1570
1571 fcloop_class = class_create(THIS_MODULE, "fcloop");
1572 if (IS_ERR(fcloop_class)) {
1573 pr_err("couldn't register class fcloop\n");
1574 ret = PTR_ERR(fcloop_class);
1575 return ret;
1576 }
1577
1578 fcloop_device = device_create_with_groups(
1579 fcloop_class, NULL, MKDEV(0, 0), NULL,
1580 fcloop_dev_attr_groups, "ctl");
1581 if (IS_ERR(fcloop_device)) {
1582 pr_err("couldn't create ctl device!\n");
1583 ret = PTR_ERR(fcloop_device);
1584 goto out_destroy_class;
1585 }
1586
1587 get_device(fcloop_device);
1588
1589 return 0;
1590
1591 out_destroy_class:
1592 class_destroy(fcloop_class);
1593 return ret;
1594 }
1595
fcloop_exit(void)1596 static void __exit fcloop_exit(void)
1597 {
1598 struct fcloop_lport *lport = NULL;
1599 struct fcloop_nport *nport = NULL;
1600 struct fcloop_tport *tport;
1601 struct fcloop_rport *rport;
1602 unsigned long flags;
1603 int ret;
1604
1605 spin_lock_irqsave(&fcloop_lock, flags);
1606
1607 for (;;) {
1608 nport = list_first_entry_or_null(&fcloop_nports,
1609 typeof(*nport), nport_list);
1610 if (!nport)
1611 break;
1612
1613 tport = __unlink_target_port(nport);
1614 rport = __unlink_remote_port(nport);
1615
1616 spin_unlock_irqrestore(&fcloop_lock, flags);
1617
1618 ret = __targetport_unreg(nport, tport);
1619 if (ret)
1620 pr_warn("%s: Failed deleting target port\n", __func__);
1621
1622 ret = __remoteport_unreg(nport, rport);
1623 if (ret)
1624 pr_warn("%s: Failed deleting remote port\n", __func__);
1625
1626 spin_lock_irqsave(&fcloop_lock, flags);
1627 }
1628
1629 for (;;) {
1630 lport = list_first_entry_or_null(&fcloop_lports,
1631 typeof(*lport), lport_list);
1632 if (!lport)
1633 break;
1634
1635 __unlink_local_port(lport);
1636
1637 spin_unlock_irqrestore(&fcloop_lock, flags);
1638
1639 ret = __wait_localport_unreg(lport);
1640 if (ret)
1641 pr_warn("%s: Failed deleting local port\n", __func__);
1642
1643 spin_lock_irqsave(&fcloop_lock, flags);
1644 }
1645
1646 spin_unlock_irqrestore(&fcloop_lock, flags);
1647
1648 put_device(fcloop_device);
1649
1650 device_destroy(fcloop_class, MKDEV(0, 0));
1651 class_destroy(fcloop_class);
1652 }
1653
1654 module_init(fcloop_init);
1655 module_exit(fcloop_exit);
1656
1657 MODULE_LICENSE("GPL v2");
1658