1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
4 */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/slab.h>
8 #include <linux/blk-mq.h>
9 #include <linux/parser.h>
10 #include <linux/random.h>
11 #include <uapi/scsi/fc/fc_fs.h>
12 #include <uapi/scsi/fc/fc_els.h>
13
14 #include "nvmet.h"
15 #include <linux/nvme-fc-driver.h>
16 #include <linux/nvme-fc.h>
17 #include "../host/fc.h"
18
19
20 /* *************************** Data Structures/Defines ****************** */
21
22
23 #define NVMET_LS_CTX_COUNT 256
24
25 struct nvmet_fc_tgtport;
26 struct nvmet_fc_tgt_assoc;
27
28 struct nvmet_fc_ls_iod { /* for an LS RQST RCV */
29 struct nvmefc_ls_rsp *lsrsp;
30 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
31
32 struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */
33
34 struct nvmet_fc_tgtport *tgtport;
35 struct nvmet_fc_tgt_assoc *assoc;
36 void *hosthandle;
37
38 union nvmefc_ls_requests *rqstbuf;
39 union nvmefc_ls_responses *rspbuf;
40 u16 rqstdatalen;
41 dma_addr_t rspdma;
42
43 struct scatterlist sg[2];
44
45 struct work_struct work;
46 } __aligned(sizeof(unsigned long long));
47
48 struct nvmet_fc_ls_req_op { /* for an LS RQST XMT */
49 struct nvmefc_ls_req ls_req;
50
51 struct nvmet_fc_tgtport *tgtport;
52 void *hosthandle;
53
54 int ls_error;
55 struct list_head lsreq_list; /* tgtport->ls_req_list */
56 bool req_queued;
57 };
58
59
60 /* desired maximum for a single sequence - if sg list allows it */
61 #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
62
63 enum nvmet_fcp_datadir {
64 NVMET_FCP_NODATA,
65 NVMET_FCP_WRITE,
66 NVMET_FCP_READ,
67 NVMET_FCP_ABORTED,
68 };
69
70 struct nvmet_fc_fcp_iod {
71 struct nvmefc_tgt_fcp_req *fcpreq;
72
73 struct nvme_fc_cmd_iu cmdiubuf;
74 struct nvme_fc_ersp_iu rspiubuf;
75 dma_addr_t rspdma;
76 struct scatterlist *next_sg;
77 struct scatterlist *data_sg;
78 int data_sg_cnt;
79 u32 offset;
80 enum nvmet_fcp_datadir io_dir;
81 bool active;
82 bool abort;
83 bool aborted;
84 bool writedataactive;
85 spinlock_t flock;
86
87 struct nvmet_req req;
88 struct work_struct defer_work;
89
90 struct nvmet_fc_tgtport *tgtport;
91 struct nvmet_fc_tgt_queue *queue;
92
93 struct list_head fcp_list; /* tgtport->fcp_list */
94 };
95
96 struct nvmet_fc_tgtport {
97 struct nvmet_fc_target_port fc_target_port;
98
99 struct list_head tgt_list; /* nvmet_fc_target_list */
100 struct device *dev; /* dev for dma mapping */
101 struct nvmet_fc_target_template *ops;
102
103 struct nvmet_fc_ls_iod *iod;
104 spinlock_t lock;
105 struct list_head ls_rcv_list;
106 struct list_head ls_req_list;
107 struct list_head ls_busylist;
108 struct list_head assoc_list;
109 struct list_head host_list;
110 struct ida assoc_cnt;
111 struct nvmet_fc_port_entry *pe;
112 struct kref ref;
113 u32 max_sg_cnt;
114 };
115
116 struct nvmet_fc_port_entry {
117 struct nvmet_fc_tgtport *tgtport;
118 struct nvmet_port *port;
119 u64 node_name;
120 u64 port_name;
121 struct list_head pe_list;
122 };
123
124 struct nvmet_fc_defer_fcp_req {
125 struct list_head req_list;
126 struct nvmefc_tgt_fcp_req *fcp_req;
127 };
128
129 struct nvmet_fc_tgt_queue {
130 bool ninetypercent;
131 u16 qid;
132 u16 sqsize;
133 u16 ersp_ratio;
134 __le16 sqhd;
135 atomic_t connected;
136 atomic_t sqtail;
137 atomic_t zrspcnt;
138 atomic_t rsn;
139 spinlock_t qlock;
140 struct nvmet_cq nvme_cq;
141 struct nvmet_sq nvme_sq;
142 struct nvmet_fc_tgt_assoc *assoc;
143 struct list_head fod_list;
144 struct list_head pending_cmd_list;
145 struct list_head avail_defer_list;
146 struct workqueue_struct *work_q;
147 struct kref ref;
148 struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */
149 } __aligned(sizeof(unsigned long long));
150
151 struct nvmet_fc_hostport {
152 struct nvmet_fc_tgtport *tgtport;
153 void *hosthandle;
154 struct list_head host_list;
155 struct kref ref;
156 u8 invalid;
157 };
158
159 struct nvmet_fc_tgt_assoc {
160 u64 association_id;
161 u32 a_id;
162 atomic_t terminating;
163 struct nvmet_fc_tgtport *tgtport;
164 struct nvmet_fc_hostport *hostport;
165 struct nvmet_fc_ls_iod *rcv_disconn;
166 struct list_head a_list;
167 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
168 struct kref ref;
169 struct work_struct del_work;
170 };
171
172
173 static inline int
nvmet_fc_iodnum(struct nvmet_fc_ls_iod * iodptr)174 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
175 {
176 return (iodptr - iodptr->tgtport->iod);
177 }
178
179 static inline int
nvmet_fc_fodnum(struct nvmet_fc_fcp_iod * fodptr)180 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
181 {
182 return (fodptr - fodptr->queue->fod);
183 }
184
185
186 /*
187 * Association and Connection IDs:
188 *
189 * Association ID will have random number in upper 6 bytes and zero
190 * in lower 2 bytes
191 *
192 * Connection IDs will be Association ID with QID or'd in lower 2 bytes
193 *
194 * note: Association ID = Connection ID for queue 0
195 */
196 #define BYTES_FOR_QID sizeof(u16)
197 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
198 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
199
200 static inline u64
nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc * assoc,u16 qid)201 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
202 {
203 return (assoc->association_id | qid);
204 }
205
206 static inline u64
nvmet_fc_getassociationid(u64 connectionid)207 nvmet_fc_getassociationid(u64 connectionid)
208 {
209 return connectionid & ~NVMET_FC_QUEUEID_MASK;
210 }
211
212 static inline u16
nvmet_fc_getqueueid(u64 connectionid)213 nvmet_fc_getqueueid(u64 connectionid)
214 {
215 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
216 }
217
218 static inline struct nvmet_fc_tgtport *
targetport_to_tgtport(struct nvmet_fc_target_port * targetport)219 targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
220 {
221 return container_of(targetport, struct nvmet_fc_tgtport,
222 fc_target_port);
223 }
224
225 static inline struct nvmet_fc_fcp_iod *
nvmet_req_to_fod(struct nvmet_req * nvme_req)226 nvmet_req_to_fod(struct nvmet_req *nvme_req)
227 {
228 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
229 }
230
231
232 /* *************************** Globals **************************** */
233
234
235 static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
236
237 static LIST_HEAD(nvmet_fc_target_list);
238 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
239 static LIST_HEAD(nvmet_fc_portentry_list);
240
241
242 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
243 static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
244 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
245 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
246 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
247 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
248 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
249 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
250 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
251 struct nvmet_fc_fcp_iod *fod);
252 static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
253 static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
254 struct nvmet_fc_ls_iod *iod);
255
256
257 /* *********************** FC-NVME DMA Handling **************************** */
258
259 /*
260 * The fcloop device passes in a NULL device pointer. Real LLD's will
261 * pass in a valid device pointer. If NULL is passed to the dma mapping
262 * routines, depending on the platform, it may or may not succeed, and
263 * may crash.
264 *
265 * As such:
266 * Wrapper all the dma routines and check the dev pointer.
267 *
268 * If simple mappings (return just a dma address, we'll noop them,
269 * returning a dma address of 0.
270 *
271 * On more complex mappings (dma_map_sg), a pseudo routine fills
272 * in the scatter list, setting all dma addresses to 0.
273 */
274
275 static inline dma_addr_t
fc_dma_map_single(struct device * dev,void * ptr,size_t size,enum dma_data_direction dir)276 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
277 enum dma_data_direction dir)
278 {
279 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
280 }
281
282 static inline int
fc_dma_mapping_error(struct device * dev,dma_addr_t dma_addr)283 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
284 {
285 return dev ? dma_mapping_error(dev, dma_addr) : 0;
286 }
287
288 static inline void
fc_dma_unmap_single(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)289 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
290 enum dma_data_direction dir)
291 {
292 if (dev)
293 dma_unmap_single(dev, addr, size, dir);
294 }
295
296 static inline void
fc_dma_sync_single_for_cpu(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)297 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
298 enum dma_data_direction dir)
299 {
300 if (dev)
301 dma_sync_single_for_cpu(dev, addr, size, dir);
302 }
303
304 static inline void
fc_dma_sync_single_for_device(struct device * dev,dma_addr_t addr,size_t size,enum dma_data_direction dir)305 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
306 enum dma_data_direction dir)
307 {
308 if (dev)
309 dma_sync_single_for_device(dev, addr, size, dir);
310 }
311
312 /* pseudo dma_map_sg call */
313 static int
fc_map_sg(struct scatterlist * sg,int nents)314 fc_map_sg(struct scatterlist *sg, int nents)
315 {
316 struct scatterlist *s;
317 int i;
318
319 WARN_ON(nents == 0 || sg[0].length == 0);
320
321 for_each_sg(sg, s, nents, i) {
322 s->dma_address = 0L;
323 #ifdef CONFIG_NEED_SG_DMA_LENGTH
324 s->dma_length = s->length;
325 #endif
326 }
327 return nents;
328 }
329
330 static inline int
fc_dma_map_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)331 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
332 enum dma_data_direction dir)
333 {
334 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
335 }
336
337 static inline void
fc_dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nents,enum dma_data_direction dir)338 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
339 enum dma_data_direction dir)
340 {
341 if (dev)
342 dma_unmap_sg(dev, sg, nents, dir);
343 }
344
345
346 /* ********************** FC-NVME LS XMT Handling ************************* */
347
348
349 static void
__nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op * lsop)350 __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
351 {
352 struct nvmet_fc_tgtport *tgtport = lsop->tgtport;
353 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
354 unsigned long flags;
355
356 spin_lock_irqsave(&tgtport->lock, flags);
357
358 if (!lsop->req_queued) {
359 spin_unlock_irqrestore(&tgtport->lock, flags);
360 goto out_puttgtport;
361 }
362
363 list_del(&lsop->lsreq_list);
364
365 lsop->req_queued = false;
366
367 spin_unlock_irqrestore(&tgtport->lock, flags);
368
369 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
370 (lsreq->rqstlen + lsreq->rsplen),
371 DMA_BIDIRECTIONAL);
372
373 out_puttgtport:
374 nvmet_fc_tgtport_put(tgtport);
375 }
376
377 static int
__nvmet_fc_send_ls_req(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_req_op * lsop,void (* done)(struct nvmefc_ls_req * req,int status))378 __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
379 struct nvmet_fc_ls_req_op *lsop,
380 void (*done)(struct nvmefc_ls_req *req, int status))
381 {
382 struct nvmefc_ls_req *lsreq = &lsop->ls_req;
383 unsigned long flags;
384 int ret = 0;
385
386 if (!tgtport->ops->ls_req)
387 return -EOPNOTSUPP;
388
389 if (!nvmet_fc_tgtport_get(tgtport))
390 return -ESHUTDOWN;
391
392 lsreq->done = done;
393 lsop->req_queued = false;
394 INIT_LIST_HEAD(&lsop->lsreq_list);
395
396 lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
397 lsreq->rqstlen + lsreq->rsplen,
398 DMA_BIDIRECTIONAL);
399 if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) {
400 ret = -EFAULT;
401 goto out_puttgtport;
402 }
403 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
404
405 spin_lock_irqsave(&tgtport->lock, flags);
406
407 list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list);
408
409 lsop->req_queued = true;
410
411 spin_unlock_irqrestore(&tgtport->lock, flags);
412
413 ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle,
414 lsreq);
415 if (ret)
416 goto out_unlink;
417
418 return 0;
419
420 out_unlink:
421 lsop->ls_error = ret;
422 spin_lock_irqsave(&tgtport->lock, flags);
423 lsop->req_queued = false;
424 list_del(&lsop->lsreq_list);
425 spin_unlock_irqrestore(&tgtport->lock, flags);
426 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
427 (lsreq->rqstlen + lsreq->rsplen),
428 DMA_BIDIRECTIONAL);
429 out_puttgtport:
430 nvmet_fc_tgtport_put(tgtport);
431
432 return ret;
433 }
434
435 static int
nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_req_op * lsop,void (* done)(struct nvmefc_ls_req * req,int status))436 nvmet_fc_send_ls_req_async(struct nvmet_fc_tgtport *tgtport,
437 struct nvmet_fc_ls_req_op *lsop,
438 void (*done)(struct nvmefc_ls_req *req, int status))
439 {
440 /* don't wait for completion */
441
442 return __nvmet_fc_send_ls_req(tgtport, lsop, done);
443 }
444
445 static void
nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req * lsreq,int status)446 nvmet_fc_disconnect_assoc_done(struct nvmefc_ls_req *lsreq, int status)
447 {
448 struct nvmet_fc_ls_req_op *lsop =
449 container_of(lsreq, struct nvmet_fc_ls_req_op, ls_req);
450
451 __nvmet_fc_finish_ls_req(lsop);
452
453 /* fc-nvme target doesn't care about success or failure of cmd */
454
455 kfree(lsop);
456 }
457
458 /*
459 * This routine sends a FC-NVME LS to disconnect (aka terminate)
460 * the FC-NVME Association. Terminating the association also
461 * terminates the FC-NVME connections (per queue, both admin and io
462 * queues) that are part of the association. E.g. things are torn
463 * down, and the related FC-NVME Association ID and Connection IDs
464 * become invalid.
465 *
466 * The behavior of the fc-nvme target is such that it's
467 * understanding of the association and connections will implicitly
468 * be torn down. The action is implicit as it may be due to a loss of
469 * connectivity with the fc-nvme host, so the target may never get a
470 * response even if it tried. As such, the action of this routine
471 * is to asynchronously send the LS, ignore any results of the LS, and
472 * continue on with terminating the association. If the fc-nvme host
473 * is present and receives the LS, it too can tear down.
474 */
475 static void
nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc * assoc)476 nvmet_fc_xmt_disconnect_assoc(struct nvmet_fc_tgt_assoc *assoc)
477 {
478 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
479 struct fcnvme_ls_disconnect_assoc_rqst *discon_rqst;
480 struct fcnvme_ls_disconnect_assoc_acc *discon_acc;
481 struct nvmet_fc_ls_req_op *lsop;
482 struct nvmefc_ls_req *lsreq;
483 int ret;
484
485 /*
486 * If ls_req is NULL or no hosthandle, it's an older lldd and no
487 * message is normal. Otherwise, send unless the hostport has
488 * already been invalidated by the lldd.
489 */
490 if (!tgtport->ops->ls_req || !assoc->hostport ||
491 assoc->hostport->invalid)
492 return;
493
494 lsop = kzalloc((sizeof(*lsop) +
495 sizeof(*discon_rqst) + sizeof(*discon_acc) +
496 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL);
497 if (!lsop) {
498 dev_info(tgtport->dev,
499 "{%d:%d} send Disconnect Association failed: ENOMEM\n",
500 tgtport->fc_target_port.port_num, assoc->a_id);
501 return;
502 }
503
504 discon_rqst = (struct fcnvme_ls_disconnect_assoc_rqst *)&lsop[1];
505 discon_acc = (struct fcnvme_ls_disconnect_assoc_acc *)&discon_rqst[1];
506 lsreq = &lsop->ls_req;
507 if (tgtport->ops->lsrqst_priv_sz)
508 lsreq->private = (void *)&discon_acc[1];
509 else
510 lsreq->private = NULL;
511
512 lsop->tgtport = tgtport;
513 lsop->hosthandle = assoc->hostport->hosthandle;
514
515 nvmefc_fmt_lsreq_discon_assoc(lsreq, discon_rqst, discon_acc,
516 assoc->association_id);
517
518 ret = nvmet_fc_send_ls_req_async(tgtport, lsop,
519 nvmet_fc_disconnect_assoc_done);
520 if (ret) {
521 dev_info(tgtport->dev,
522 "{%d:%d} XMT Disconnect Association failed: %d\n",
523 tgtport->fc_target_port.port_num, assoc->a_id, ret);
524 kfree(lsop);
525 }
526 }
527
528
529 /* *********************** FC-NVME Port Management ************************ */
530
531
532 static int
nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport * tgtport)533 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
534 {
535 struct nvmet_fc_ls_iod *iod;
536 int i;
537
538 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
539 GFP_KERNEL);
540 if (!iod)
541 return -ENOMEM;
542
543 tgtport->iod = iod;
544
545 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
546 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
547 iod->tgtport = tgtport;
548 list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
549
550 iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) +
551 sizeof(union nvmefc_ls_responses),
552 GFP_KERNEL);
553 if (!iod->rqstbuf)
554 goto out_fail;
555
556 iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1];
557
558 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
559 sizeof(*iod->rspbuf),
560 DMA_TO_DEVICE);
561 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
562 goto out_fail;
563 }
564
565 return 0;
566
567 out_fail:
568 kfree(iod->rqstbuf);
569 list_del(&iod->ls_rcv_list);
570 for (iod--, i--; i >= 0; iod--, i--) {
571 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
572 sizeof(*iod->rspbuf), DMA_TO_DEVICE);
573 kfree(iod->rqstbuf);
574 list_del(&iod->ls_rcv_list);
575 }
576
577 kfree(iod);
578
579 return -EFAULT;
580 }
581
582 static void
nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport * tgtport)583 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
584 {
585 struct nvmet_fc_ls_iod *iod = tgtport->iod;
586 int i;
587
588 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
589 fc_dma_unmap_single(tgtport->dev,
590 iod->rspdma, sizeof(*iod->rspbuf),
591 DMA_TO_DEVICE);
592 kfree(iod->rqstbuf);
593 list_del(&iod->ls_rcv_list);
594 }
595 kfree(tgtport->iod);
596 }
597
598 static struct nvmet_fc_ls_iod *
nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport * tgtport)599 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
600 {
601 struct nvmet_fc_ls_iod *iod;
602 unsigned long flags;
603
604 spin_lock_irqsave(&tgtport->lock, flags);
605 iod = list_first_entry_or_null(&tgtport->ls_rcv_list,
606 struct nvmet_fc_ls_iod, ls_rcv_list);
607 if (iod)
608 list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist);
609 spin_unlock_irqrestore(&tgtport->lock, flags);
610 return iod;
611 }
612
613
614 static void
nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)615 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
616 struct nvmet_fc_ls_iod *iod)
617 {
618 unsigned long flags;
619
620 spin_lock_irqsave(&tgtport->lock, flags);
621 list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list);
622 spin_unlock_irqrestore(&tgtport->lock, flags);
623 }
624
625 static void
nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_tgt_queue * queue)626 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
627 struct nvmet_fc_tgt_queue *queue)
628 {
629 struct nvmet_fc_fcp_iod *fod = queue->fod;
630 int i;
631
632 for (i = 0; i < queue->sqsize; fod++, i++) {
633 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
634 fod->tgtport = tgtport;
635 fod->queue = queue;
636 fod->active = false;
637 fod->abort = false;
638 fod->aborted = false;
639 fod->fcpreq = NULL;
640 list_add_tail(&fod->fcp_list, &queue->fod_list);
641 spin_lock_init(&fod->flock);
642
643 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
644 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
645 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
646 list_del(&fod->fcp_list);
647 for (fod--, i--; i >= 0; fod--, i--) {
648 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
649 sizeof(fod->rspiubuf),
650 DMA_TO_DEVICE);
651 fod->rspdma = 0L;
652 list_del(&fod->fcp_list);
653 }
654
655 return;
656 }
657 }
658 }
659
660 static void
nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_tgt_queue * queue)661 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
662 struct nvmet_fc_tgt_queue *queue)
663 {
664 struct nvmet_fc_fcp_iod *fod = queue->fod;
665 int i;
666
667 for (i = 0; i < queue->sqsize; fod++, i++) {
668 if (fod->rspdma)
669 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
670 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
671 }
672 }
673
674 static struct nvmet_fc_fcp_iod *
nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue * queue)675 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
676 {
677 struct nvmet_fc_fcp_iod *fod;
678
679 lockdep_assert_held(&queue->qlock);
680
681 fod = list_first_entry_or_null(&queue->fod_list,
682 struct nvmet_fc_fcp_iod, fcp_list);
683 if (fod) {
684 list_del(&fod->fcp_list);
685 fod->active = true;
686 /*
687 * no queue reference is taken, as it was taken by the
688 * queue lookup just prior to the allocation. The iod
689 * will "inherit" that reference.
690 */
691 }
692 return fod;
693 }
694
695
696 static void
nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_tgt_queue * queue,struct nvmefc_tgt_fcp_req * fcpreq)697 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
698 struct nvmet_fc_tgt_queue *queue,
699 struct nvmefc_tgt_fcp_req *fcpreq)
700 {
701 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
702
703 /*
704 * put all admin cmds on hw queue id 0. All io commands go to
705 * the respective hw queue based on a modulo basis
706 */
707 fcpreq->hwqid = queue->qid ?
708 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
709
710 nvmet_fc_handle_fcp_rqst(tgtport, fod);
711 }
712
713 static void
nvmet_fc_fcp_rqst_op_defer_work(struct work_struct * work)714 nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work)
715 {
716 struct nvmet_fc_fcp_iod *fod =
717 container_of(work, struct nvmet_fc_fcp_iod, defer_work);
718
719 /* Submit deferred IO for processing */
720 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
721
722 }
723
724 static void
nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue * queue,struct nvmet_fc_fcp_iod * fod)725 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
726 struct nvmet_fc_fcp_iod *fod)
727 {
728 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
729 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
730 struct nvmet_fc_defer_fcp_req *deferfcp;
731 unsigned long flags;
732
733 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
734 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
735
736 fcpreq->nvmet_fc_private = NULL;
737
738 fod->active = false;
739 fod->abort = false;
740 fod->aborted = false;
741 fod->writedataactive = false;
742 fod->fcpreq = NULL;
743
744 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
745
746 /* release the queue lookup reference on the completed IO */
747 nvmet_fc_tgt_q_put(queue);
748
749 spin_lock_irqsave(&queue->qlock, flags);
750 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
751 struct nvmet_fc_defer_fcp_req, req_list);
752 if (!deferfcp) {
753 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
754 spin_unlock_irqrestore(&queue->qlock, flags);
755 return;
756 }
757
758 /* Re-use the fod for the next pending cmd that was deferred */
759 list_del(&deferfcp->req_list);
760
761 fcpreq = deferfcp->fcp_req;
762
763 /* deferfcp can be reused for another IO at a later date */
764 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
765
766 spin_unlock_irqrestore(&queue->qlock, flags);
767
768 /* Save NVME CMD IO in fod */
769 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
770
771 /* Setup new fcpreq to be processed */
772 fcpreq->rspaddr = NULL;
773 fcpreq->rsplen = 0;
774 fcpreq->nvmet_fc_private = fod;
775 fod->fcpreq = fcpreq;
776 fod->active = true;
777
778 /* inform LLDD IO is now being processed */
779 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
780
781 /*
782 * Leave the queue lookup get reference taken when
783 * fod was originally allocated.
784 */
785
786 queue_work(queue->work_q, &fod->defer_work);
787 }
788
789 static struct nvmet_fc_tgt_queue *
nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc * assoc,u16 qid,u16 sqsize)790 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
791 u16 qid, u16 sqsize)
792 {
793 struct nvmet_fc_tgt_queue *queue;
794 unsigned long flags;
795 int ret;
796
797 if (qid > NVMET_NR_QUEUES)
798 return NULL;
799
800 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
801 if (!queue)
802 return NULL;
803
804 if (!nvmet_fc_tgt_a_get(assoc))
805 goto out_free_queue;
806
807 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
808 assoc->tgtport->fc_target_port.port_num,
809 assoc->a_id, qid);
810 if (!queue->work_q)
811 goto out_a_put;
812
813 queue->qid = qid;
814 queue->sqsize = sqsize;
815 queue->assoc = assoc;
816 INIT_LIST_HEAD(&queue->fod_list);
817 INIT_LIST_HEAD(&queue->avail_defer_list);
818 INIT_LIST_HEAD(&queue->pending_cmd_list);
819 atomic_set(&queue->connected, 0);
820 atomic_set(&queue->sqtail, 0);
821 atomic_set(&queue->rsn, 1);
822 atomic_set(&queue->zrspcnt, 0);
823 spin_lock_init(&queue->qlock);
824 kref_init(&queue->ref);
825
826 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
827
828 ret = nvmet_sq_init(&queue->nvme_sq);
829 if (ret)
830 goto out_fail_iodlist;
831
832 WARN_ON(assoc->queues[qid]);
833 spin_lock_irqsave(&assoc->tgtport->lock, flags);
834 assoc->queues[qid] = queue;
835 spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
836
837 return queue;
838
839 out_fail_iodlist:
840 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
841 destroy_workqueue(queue->work_q);
842 out_a_put:
843 nvmet_fc_tgt_a_put(assoc);
844 out_free_queue:
845 kfree(queue);
846 return NULL;
847 }
848
849
850 static void
nvmet_fc_tgt_queue_free(struct kref * ref)851 nvmet_fc_tgt_queue_free(struct kref *ref)
852 {
853 struct nvmet_fc_tgt_queue *queue =
854 container_of(ref, struct nvmet_fc_tgt_queue, ref);
855 unsigned long flags;
856
857 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
858 queue->assoc->queues[queue->qid] = NULL;
859 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
860
861 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
862
863 nvmet_fc_tgt_a_put(queue->assoc);
864
865 destroy_workqueue(queue->work_q);
866
867 kfree(queue);
868 }
869
870 static void
nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue * queue)871 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
872 {
873 kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
874 }
875
876 static int
nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue * queue)877 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
878 {
879 return kref_get_unless_zero(&queue->ref);
880 }
881
882
883 static void
nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue * queue)884 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
885 {
886 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
887 struct nvmet_fc_fcp_iod *fod = queue->fod;
888 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
889 unsigned long flags;
890 int i;
891 bool disconnect;
892
893 disconnect = atomic_xchg(&queue->connected, 0);
894
895 /* if not connected, nothing to do */
896 if (!disconnect)
897 return;
898
899 spin_lock_irqsave(&queue->qlock, flags);
900 /* abort outstanding io's */
901 for (i = 0; i < queue->sqsize; fod++, i++) {
902 if (fod->active) {
903 spin_lock(&fod->flock);
904 fod->abort = true;
905 /*
906 * only call lldd abort routine if waiting for
907 * writedata. other outstanding ops should finish
908 * on their own.
909 */
910 if (fod->writedataactive) {
911 fod->aborted = true;
912 spin_unlock(&fod->flock);
913 tgtport->ops->fcp_abort(
914 &tgtport->fc_target_port, fod->fcpreq);
915 } else
916 spin_unlock(&fod->flock);
917 }
918 }
919
920 /* Cleanup defer'ed IOs in queue */
921 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
922 req_list) {
923 list_del(&deferfcp->req_list);
924 kfree(deferfcp);
925 }
926
927 for (;;) {
928 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
929 struct nvmet_fc_defer_fcp_req, req_list);
930 if (!deferfcp)
931 break;
932
933 list_del(&deferfcp->req_list);
934 spin_unlock_irqrestore(&queue->qlock, flags);
935
936 tgtport->ops->defer_rcv(&tgtport->fc_target_port,
937 deferfcp->fcp_req);
938
939 tgtport->ops->fcp_abort(&tgtport->fc_target_port,
940 deferfcp->fcp_req);
941
942 tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
943 deferfcp->fcp_req);
944
945 /* release the queue lookup reference */
946 nvmet_fc_tgt_q_put(queue);
947
948 kfree(deferfcp);
949
950 spin_lock_irqsave(&queue->qlock, flags);
951 }
952 spin_unlock_irqrestore(&queue->qlock, flags);
953
954 flush_workqueue(queue->work_q);
955
956 nvmet_sq_destroy(&queue->nvme_sq);
957
958 nvmet_fc_tgt_q_put(queue);
959 }
960
961 static struct nvmet_fc_tgt_queue *
nvmet_fc_find_target_queue(struct nvmet_fc_tgtport * tgtport,u64 connection_id)962 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
963 u64 connection_id)
964 {
965 struct nvmet_fc_tgt_assoc *assoc;
966 struct nvmet_fc_tgt_queue *queue;
967 u64 association_id = nvmet_fc_getassociationid(connection_id);
968 u16 qid = nvmet_fc_getqueueid(connection_id);
969 unsigned long flags;
970
971 if (qid > NVMET_NR_QUEUES)
972 return NULL;
973
974 spin_lock_irqsave(&tgtport->lock, flags);
975 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
976 if (association_id == assoc->association_id) {
977 queue = assoc->queues[qid];
978 if (queue &&
979 (!atomic_read(&queue->connected) ||
980 !nvmet_fc_tgt_q_get(queue)))
981 queue = NULL;
982 spin_unlock_irqrestore(&tgtport->lock, flags);
983 return queue;
984 }
985 }
986 spin_unlock_irqrestore(&tgtport->lock, flags);
987 return NULL;
988 }
989
990 static void
nvmet_fc_hostport_free(struct kref * ref)991 nvmet_fc_hostport_free(struct kref *ref)
992 {
993 struct nvmet_fc_hostport *hostport =
994 container_of(ref, struct nvmet_fc_hostport, ref);
995 struct nvmet_fc_tgtport *tgtport = hostport->tgtport;
996 unsigned long flags;
997
998 spin_lock_irqsave(&tgtport->lock, flags);
999 list_del(&hostport->host_list);
1000 spin_unlock_irqrestore(&tgtport->lock, flags);
1001 if (tgtport->ops->host_release && hostport->invalid)
1002 tgtport->ops->host_release(hostport->hosthandle);
1003 kfree(hostport);
1004 nvmet_fc_tgtport_put(tgtport);
1005 }
1006
1007 static void
nvmet_fc_hostport_put(struct nvmet_fc_hostport * hostport)1008 nvmet_fc_hostport_put(struct nvmet_fc_hostport *hostport)
1009 {
1010 kref_put(&hostport->ref, nvmet_fc_hostport_free);
1011 }
1012
1013 static int
nvmet_fc_hostport_get(struct nvmet_fc_hostport * hostport)1014 nvmet_fc_hostport_get(struct nvmet_fc_hostport *hostport)
1015 {
1016 return kref_get_unless_zero(&hostport->ref);
1017 }
1018
1019 static void
nvmet_fc_free_hostport(struct nvmet_fc_hostport * hostport)1020 nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport)
1021 {
1022 /* if LLDD not implemented, leave as NULL */
1023 if (!hostport || !hostport->hosthandle)
1024 return;
1025
1026 nvmet_fc_hostport_put(hostport);
1027 }
1028
1029 static struct nvmet_fc_hostport *
nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport * tgtport,void * hosthandle)1030 nvmet_fc_alloc_hostport(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1031 {
1032 struct nvmet_fc_hostport *newhost, *host, *match = NULL;
1033 unsigned long flags;
1034
1035 /* if LLDD not implemented, leave as NULL */
1036 if (!hosthandle)
1037 return NULL;
1038
1039 /* take reference for what will be the newly allocated hostport */
1040 if (!nvmet_fc_tgtport_get(tgtport))
1041 return ERR_PTR(-EINVAL);
1042
1043 newhost = kzalloc(sizeof(*newhost), GFP_KERNEL);
1044 if (!newhost) {
1045 spin_lock_irqsave(&tgtport->lock, flags);
1046 list_for_each_entry(host, &tgtport->host_list, host_list) {
1047 if (host->hosthandle == hosthandle && !host->invalid) {
1048 if (nvmet_fc_hostport_get(host)) {
1049 match = host;
1050 break;
1051 }
1052 }
1053 }
1054 spin_unlock_irqrestore(&tgtport->lock, flags);
1055 /* no allocation - release reference */
1056 nvmet_fc_tgtport_put(tgtport);
1057 return (match) ? match : ERR_PTR(-ENOMEM);
1058 }
1059
1060 newhost->tgtport = tgtport;
1061 newhost->hosthandle = hosthandle;
1062 INIT_LIST_HEAD(&newhost->host_list);
1063 kref_init(&newhost->ref);
1064
1065 spin_lock_irqsave(&tgtport->lock, flags);
1066 list_for_each_entry(host, &tgtport->host_list, host_list) {
1067 if (host->hosthandle == hosthandle && !host->invalid) {
1068 if (nvmet_fc_hostport_get(host)) {
1069 match = host;
1070 break;
1071 }
1072 }
1073 }
1074 if (match) {
1075 kfree(newhost);
1076 newhost = NULL;
1077 /* releasing allocation - release reference */
1078 nvmet_fc_tgtport_put(tgtport);
1079 } else
1080 list_add_tail(&newhost->host_list, &tgtport->host_list);
1081 spin_unlock_irqrestore(&tgtport->lock, flags);
1082
1083 return (match) ? match : newhost;
1084 }
1085
1086 static void
nvmet_fc_delete_assoc(struct work_struct * work)1087 nvmet_fc_delete_assoc(struct work_struct *work)
1088 {
1089 struct nvmet_fc_tgt_assoc *assoc =
1090 container_of(work, struct nvmet_fc_tgt_assoc, del_work);
1091
1092 nvmet_fc_delete_target_assoc(assoc);
1093 nvmet_fc_tgt_a_put(assoc);
1094 }
1095
1096 static struct nvmet_fc_tgt_assoc *
nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport * tgtport,void * hosthandle)1097 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
1098 {
1099 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
1100 unsigned long flags;
1101 u64 ran;
1102 int idx;
1103 bool needrandom = true;
1104
1105 if (!tgtport->pe)
1106 return NULL;
1107
1108 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
1109 if (!assoc)
1110 return NULL;
1111
1112 idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
1113 if (idx < 0)
1114 goto out_free_assoc;
1115
1116 if (!nvmet_fc_tgtport_get(tgtport))
1117 goto out_ida;
1118
1119 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle);
1120 if (IS_ERR(assoc->hostport))
1121 goto out_put;
1122
1123 assoc->tgtport = tgtport;
1124 assoc->a_id = idx;
1125 INIT_LIST_HEAD(&assoc->a_list);
1126 kref_init(&assoc->ref);
1127 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
1128 atomic_set(&assoc->terminating, 0);
1129
1130 while (needrandom) {
1131 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
1132 ran = ran << BYTES_FOR_QID_SHIFT;
1133
1134 spin_lock_irqsave(&tgtport->lock, flags);
1135 needrandom = false;
1136 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list) {
1137 if (ran == tmpassoc->association_id) {
1138 needrandom = true;
1139 break;
1140 }
1141 }
1142 if (!needrandom) {
1143 assoc->association_id = ran;
1144 list_add_tail(&assoc->a_list, &tgtport->assoc_list);
1145 }
1146 spin_unlock_irqrestore(&tgtport->lock, flags);
1147 }
1148
1149 return assoc;
1150
1151 out_put:
1152 nvmet_fc_tgtport_put(tgtport);
1153 out_ida:
1154 ida_simple_remove(&tgtport->assoc_cnt, idx);
1155 out_free_assoc:
1156 kfree(assoc);
1157 return NULL;
1158 }
1159
1160 static void
nvmet_fc_target_assoc_free(struct kref * ref)1161 nvmet_fc_target_assoc_free(struct kref *ref)
1162 {
1163 struct nvmet_fc_tgt_assoc *assoc =
1164 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
1165 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1166 struct nvmet_fc_ls_iod *oldls;
1167 unsigned long flags;
1168
1169 /* Send Disconnect now that all i/o has completed */
1170 nvmet_fc_xmt_disconnect_assoc(assoc);
1171
1172 nvmet_fc_free_hostport(assoc->hostport);
1173 spin_lock_irqsave(&tgtport->lock, flags);
1174 list_del(&assoc->a_list);
1175 oldls = assoc->rcv_disconn;
1176 spin_unlock_irqrestore(&tgtport->lock, flags);
1177 /* if pending Rcv Disconnect Association LS, send rsp now */
1178 if (oldls)
1179 nvmet_fc_xmt_ls_rsp(tgtport, oldls);
1180 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
1181 dev_info(tgtport->dev,
1182 "{%d:%d} Association freed\n",
1183 tgtport->fc_target_port.port_num, assoc->a_id);
1184 kfree(assoc);
1185 nvmet_fc_tgtport_put(tgtport);
1186 }
1187
1188 static void
nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc * assoc)1189 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
1190 {
1191 kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
1192 }
1193
1194 static int
nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc * assoc)1195 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
1196 {
1197 return kref_get_unless_zero(&assoc->ref);
1198 }
1199
1200 static void
nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc * assoc)1201 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
1202 {
1203 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
1204 struct nvmet_fc_tgt_queue *queue;
1205 unsigned long flags;
1206 int i, terminating;
1207
1208 terminating = atomic_xchg(&assoc->terminating, 1);
1209
1210 /* if already terminating, do nothing */
1211 if (terminating)
1212 return;
1213
1214 spin_lock_irqsave(&tgtport->lock, flags);
1215 for (i = NVMET_NR_QUEUES; i >= 0; i--) {
1216 queue = assoc->queues[i];
1217 if (queue) {
1218 if (!nvmet_fc_tgt_q_get(queue))
1219 continue;
1220 spin_unlock_irqrestore(&tgtport->lock, flags);
1221 nvmet_fc_delete_target_queue(queue);
1222 nvmet_fc_tgt_q_put(queue);
1223 spin_lock_irqsave(&tgtport->lock, flags);
1224 }
1225 }
1226 spin_unlock_irqrestore(&tgtport->lock, flags);
1227
1228 dev_info(tgtport->dev,
1229 "{%d:%d} Association deleted\n",
1230 tgtport->fc_target_port.port_num, assoc->a_id);
1231
1232 nvmet_fc_tgt_a_put(assoc);
1233 }
1234
1235 static struct nvmet_fc_tgt_assoc *
nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport * tgtport,u64 association_id)1236 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
1237 u64 association_id)
1238 {
1239 struct nvmet_fc_tgt_assoc *assoc;
1240 struct nvmet_fc_tgt_assoc *ret = NULL;
1241 unsigned long flags;
1242
1243 spin_lock_irqsave(&tgtport->lock, flags);
1244 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1245 if (association_id == assoc->association_id) {
1246 ret = assoc;
1247 if (!nvmet_fc_tgt_a_get(assoc))
1248 ret = NULL;
1249 break;
1250 }
1251 }
1252 spin_unlock_irqrestore(&tgtport->lock, flags);
1253
1254 return ret;
1255 }
1256
1257 static void
nvmet_fc_portentry_bind(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_port_entry * pe,struct nvmet_port * port)1258 nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
1259 struct nvmet_fc_port_entry *pe,
1260 struct nvmet_port *port)
1261 {
1262 lockdep_assert_held(&nvmet_fc_tgtlock);
1263
1264 pe->tgtport = tgtport;
1265 tgtport->pe = pe;
1266
1267 pe->port = port;
1268 port->priv = pe;
1269
1270 pe->node_name = tgtport->fc_target_port.node_name;
1271 pe->port_name = tgtport->fc_target_port.port_name;
1272 INIT_LIST_HEAD(&pe->pe_list);
1273
1274 list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list);
1275 }
1276
1277 static void
nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry * pe)1278 nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
1279 {
1280 unsigned long flags;
1281
1282 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1283 if (pe->tgtport)
1284 pe->tgtport->pe = NULL;
1285 list_del(&pe->pe_list);
1286 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1287 }
1288
1289 /*
1290 * called when a targetport deregisters. Breaks the relationship
1291 * with the nvmet port, but leaves the port_entry in place so that
1292 * re-registration can resume operation.
1293 */
1294 static void
nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport * tgtport)1295 nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
1296 {
1297 struct nvmet_fc_port_entry *pe;
1298 unsigned long flags;
1299
1300 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1301 pe = tgtport->pe;
1302 if (pe)
1303 pe->tgtport = NULL;
1304 tgtport->pe = NULL;
1305 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1306 }
1307
1308 /*
1309 * called when a new targetport is registered. Looks in the
1310 * existing nvmet port_entries to see if the nvmet layer is
1311 * configured for the targetport's wwn's. (the targetport existed,
1312 * nvmet configured, the lldd unregistered the tgtport, and is now
1313 * reregistering the same targetport). If so, set the nvmet port
1314 * port entry on the targetport.
1315 */
1316 static void
nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport * tgtport)1317 nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
1318 {
1319 struct nvmet_fc_port_entry *pe;
1320 unsigned long flags;
1321
1322 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1323 list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) {
1324 if (tgtport->fc_target_port.node_name == pe->node_name &&
1325 tgtport->fc_target_port.port_name == pe->port_name) {
1326 WARN_ON(pe->tgtport);
1327 tgtport->pe = pe;
1328 pe->tgtport = tgtport;
1329 break;
1330 }
1331 }
1332 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1333 }
1334
1335 /**
1336 * nvme_fc_register_targetport - transport entry point called by an
1337 * LLDD to register the existence of a local
1338 * NVME subystem FC port.
1339 * @pinfo: pointer to information about the port to be registered
1340 * @template: LLDD entrypoints and operational parameters for the port
1341 * @dev: physical hardware device node port corresponds to. Will be
1342 * used for DMA mappings
1343 * @portptr: pointer to a local port pointer. Upon success, the routine
1344 * will allocate a nvme_fc_local_port structure and place its
1345 * address in the local port pointer. Upon failure, local port
1346 * pointer will be set to NULL.
1347 *
1348 * Returns:
1349 * a completion status. Must be 0 upon success; a negative errno
1350 * (ex: -ENXIO) upon failure.
1351 */
1352 int
nvmet_fc_register_targetport(struct nvmet_fc_port_info * pinfo,struct nvmet_fc_target_template * template,struct device * dev,struct nvmet_fc_target_port ** portptr)1353 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
1354 struct nvmet_fc_target_template *template,
1355 struct device *dev,
1356 struct nvmet_fc_target_port **portptr)
1357 {
1358 struct nvmet_fc_tgtport *newrec;
1359 unsigned long flags;
1360 int ret, idx;
1361
1362 if (!template->xmt_ls_rsp || !template->fcp_op ||
1363 !template->fcp_abort ||
1364 !template->fcp_req_release || !template->targetport_delete ||
1365 !template->max_hw_queues || !template->max_sgl_segments ||
1366 !template->max_dif_sgl_segments || !template->dma_boundary) {
1367 ret = -EINVAL;
1368 goto out_regtgt_failed;
1369 }
1370
1371 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
1372 GFP_KERNEL);
1373 if (!newrec) {
1374 ret = -ENOMEM;
1375 goto out_regtgt_failed;
1376 }
1377
1378 idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
1379 if (idx < 0) {
1380 ret = -ENOSPC;
1381 goto out_fail_kfree;
1382 }
1383
1384 if (!get_device(dev) && dev) {
1385 ret = -ENODEV;
1386 goto out_ida_put;
1387 }
1388
1389 newrec->fc_target_port.node_name = pinfo->node_name;
1390 newrec->fc_target_port.port_name = pinfo->port_name;
1391 if (template->target_priv_sz)
1392 newrec->fc_target_port.private = &newrec[1];
1393 else
1394 newrec->fc_target_port.private = NULL;
1395 newrec->fc_target_port.port_id = pinfo->port_id;
1396 newrec->fc_target_port.port_num = idx;
1397 INIT_LIST_HEAD(&newrec->tgt_list);
1398 newrec->dev = dev;
1399 newrec->ops = template;
1400 spin_lock_init(&newrec->lock);
1401 INIT_LIST_HEAD(&newrec->ls_rcv_list);
1402 INIT_LIST_HEAD(&newrec->ls_req_list);
1403 INIT_LIST_HEAD(&newrec->ls_busylist);
1404 INIT_LIST_HEAD(&newrec->assoc_list);
1405 INIT_LIST_HEAD(&newrec->host_list);
1406 kref_init(&newrec->ref);
1407 ida_init(&newrec->assoc_cnt);
1408 newrec->max_sg_cnt = template->max_sgl_segments;
1409
1410 ret = nvmet_fc_alloc_ls_iodlist(newrec);
1411 if (ret) {
1412 ret = -ENOMEM;
1413 goto out_free_newrec;
1414 }
1415
1416 nvmet_fc_portentry_rebind_tgt(newrec);
1417
1418 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1419 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
1420 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1421
1422 *portptr = &newrec->fc_target_port;
1423 return 0;
1424
1425 out_free_newrec:
1426 put_device(dev);
1427 out_ida_put:
1428 ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
1429 out_fail_kfree:
1430 kfree(newrec);
1431 out_regtgt_failed:
1432 *portptr = NULL;
1433 return ret;
1434 }
1435 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
1436
1437
1438 static void
nvmet_fc_free_tgtport(struct kref * ref)1439 nvmet_fc_free_tgtport(struct kref *ref)
1440 {
1441 struct nvmet_fc_tgtport *tgtport =
1442 container_of(ref, struct nvmet_fc_tgtport, ref);
1443 struct device *dev = tgtport->dev;
1444 unsigned long flags;
1445
1446 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1447 list_del(&tgtport->tgt_list);
1448 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1449
1450 nvmet_fc_free_ls_iodlist(tgtport);
1451
1452 /* let the LLDD know we've finished tearing it down */
1453 tgtport->ops->targetport_delete(&tgtport->fc_target_port);
1454
1455 ida_simple_remove(&nvmet_fc_tgtport_cnt,
1456 tgtport->fc_target_port.port_num);
1457
1458 ida_destroy(&tgtport->assoc_cnt);
1459
1460 kfree(tgtport);
1461
1462 put_device(dev);
1463 }
1464
1465 static void
nvmet_fc_tgtport_put(struct nvmet_fc_tgtport * tgtport)1466 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
1467 {
1468 kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
1469 }
1470
1471 static int
nvmet_fc_tgtport_get(struct nvmet_fc_tgtport * tgtport)1472 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
1473 {
1474 return kref_get_unless_zero(&tgtport->ref);
1475 }
1476
1477 static void
__nvmet_fc_free_assocs(struct nvmet_fc_tgtport * tgtport)1478 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1479 {
1480 struct nvmet_fc_tgt_assoc *assoc, *next;
1481 unsigned long flags;
1482
1483 spin_lock_irqsave(&tgtport->lock, flags);
1484 list_for_each_entry_safe(assoc, next,
1485 &tgtport->assoc_list, a_list) {
1486 if (!nvmet_fc_tgt_a_get(assoc))
1487 continue;
1488 if (!schedule_work(&assoc->del_work))
1489 /* already deleting - release local reference */
1490 nvmet_fc_tgt_a_put(assoc);
1491 }
1492 spin_unlock_irqrestore(&tgtport->lock, flags);
1493 }
1494
1495 /**
1496 * nvmet_fc_invalidate_host - transport entry point called by an LLDD
1497 * to remove references to a hosthandle for LS's.
1498 *
1499 * The nvmet-fc layer ensures that any references to the hosthandle
1500 * on the targetport are forgotten (set to NULL). The LLDD will
1501 * typically call this when a login with a remote host port has been
1502 * lost, thus LS's for the remote host port are no longer possible.
1503 *
1504 * If an LS request is outstanding to the targetport/hosthandle (or
1505 * issued concurrently with the call to invalidate the host), the
1506 * LLDD is responsible for terminating/aborting the LS and completing
1507 * the LS request. It is recommended that these terminations/aborts
1508 * occur after calling to invalidate the host handle to avoid additional
1509 * retries by the nvmet-fc transport. The nvmet-fc transport may
1510 * continue to reference host handle while it cleans up outstanding
1511 * NVME associations. The nvmet-fc transport will call the
1512 * ops->host_release() callback to notify the LLDD that all references
1513 * are complete and the related host handle can be recovered.
1514 * Note: if there are no references, the callback may be called before
1515 * the invalidate host call returns.
1516 *
1517 * @target_port: pointer to the (registered) target port that a prior
1518 * LS was received on and which supplied the transport the
1519 * hosthandle.
1520 * @hosthandle: the handle (pointer) that represents the host port
1521 * that no longer has connectivity and that LS's should
1522 * no longer be directed to.
1523 */
1524 void
nvmet_fc_invalidate_host(struct nvmet_fc_target_port * target_port,void * hosthandle)1525 nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
1526 void *hosthandle)
1527 {
1528 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1529 struct nvmet_fc_tgt_assoc *assoc, *next;
1530 unsigned long flags;
1531 bool noassoc = true;
1532
1533 spin_lock_irqsave(&tgtport->lock, flags);
1534 list_for_each_entry_safe(assoc, next,
1535 &tgtport->assoc_list, a_list) {
1536 if (!assoc->hostport ||
1537 assoc->hostport->hosthandle != hosthandle)
1538 continue;
1539 if (!nvmet_fc_tgt_a_get(assoc))
1540 continue;
1541 assoc->hostport->invalid = 1;
1542 noassoc = false;
1543 if (!schedule_work(&assoc->del_work))
1544 /* already deleting - release local reference */
1545 nvmet_fc_tgt_a_put(assoc);
1546 }
1547 spin_unlock_irqrestore(&tgtport->lock, flags);
1548
1549 /* if there's nothing to wait for - call the callback */
1550 if (noassoc && tgtport->ops->host_release)
1551 tgtport->ops->host_release(hosthandle);
1552 }
1553 EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host);
1554
1555 /*
1556 * nvmet layer has called to terminate an association
1557 */
1558 static void
nvmet_fc_delete_ctrl(struct nvmet_ctrl * ctrl)1559 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1560 {
1561 struct nvmet_fc_tgtport *tgtport, *next;
1562 struct nvmet_fc_tgt_assoc *assoc;
1563 struct nvmet_fc_tgt_queue *queue;
1564 unsigned long flags;
1565 bool found_ctrl = false;
1566
1567 /* this is a bit ugly, but don't want to make locks layered */
1568 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1569 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1570 tgt_list) {
1571 if (!nvmet_fc_tgtport_get(tgtport))
1572 continue;
1573 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1574
1575 spin_lock_irqsave(&tgtport->lock, flags);
1576 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1577 queue = assoc->queues[0];
1578 if (queue && queue->nvme_sq.ctrl == ctrl) {
1579 if (nvmet_fc_tgt_a_get(assoc))
1580 found_ctrl = true;
1581 break;
1582 }
1583 }
1584 spin_unlock_irqrestore(&tgtport->lock, flags);
1585
1586 nvmet_fc_tgtport_put(tgtport);
1587
1588 if (found_ctrl) {
1589 if (!schedule_work(&assoc->del_work))
1590 /* already deleting - release local reference */
1591 nvmet_fc_tgt_a_put(assoc);
1592 return;
1593 }
1594
1595 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1596 }
1597 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1598 }
1599
1600 /**
1601 * nvme_fc_unregister_targetport - transport entry point called by an
1602 * LLDD to deregister/remove a previously
1603 * registered a local NVME subsystem FC port.
1604 * @target_port: pointer to the (registered) target port that is to be
1605 * deregistered.
1606 *
1607 * Returns:
1608 * a completion status. Must be 0 upon success; a negative errno
1609 * (ex: -ENXIO) upon failure.
1610 */
1611 int
nvmet_fc_unregister_targetport(struct nvmet_fc_target_port * target_port)1612 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1613 {
1614 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1615
1616 nvmet_fc_portentry_unbind_tgt(tgtport);
1617
1618 /* terminate any outstanding associations */
1619 __nvmet_fc_free_assocs(tgtport);
1620
1621 /*
1622 * should terminate LS's as well. However, LS's will be generated
1623 * at the tail end of association termination, so they likely don't
1624 * exist yet. And even if they did, it's worthwhile to just let
1625 * them finish and targetport ref counting will clean things up.
1626 */
1627
1628 nvmet_fc_tgtport_put(tgtport);
1629
1630 return 0;
1631 }
1632 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1633
1634
1635 /* ********************** FC-NVME LS RCV Handling ************************* */
1636
1637
1638 static void
nvmet_fc_ls_create_association(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1639 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1640 struct nvmet_fc_ls_iod *iod)
1641 {
1642 struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc;
1643 struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc;
1644 struct nvmet_fc_tgt_queue *queue;
1645 int ret = 0;
1646
1647 memset(acc, 0, sizeof(*acc));
1648
1649 /*
1650 * FC-NVME spec changes. There are initiators sending different
1651 * lengths as padding sizes for Create Association Cmd descriptor
1652 * was incorrect.
1653 * Accept anything of "minimum" length. Assume format per 1.15
1654 * spec (with HOSTID reduced to 16 bytes), ignore how long the
1655 * trailing pad length is.
1656 */
1657 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
1658 ret = VERR_CR_ASSOC_LEN;
1659 else if (be32_to_cpu(rqst->desc_list_len) <
1660 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
1661 ret = VERR_CR_ASSOC_RQST_LEN;
1662 else if (rqst->assoc_cmd.desc_tag !=
1663 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1664 ret = VERR_CR_ASSOC_CMD;
1665 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
1666 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
1667 ret = VERR_CR_ASSOC_CMD_LEN;
1668 else if (!rqst->assoc_cmd.ersp_ratio ||
1669 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1670 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1671 ret = VERR_ERSP_RATIO;
1672
1673 else {
1674 /* new association w/ admin queue */
1675 iod->assoc = nvmet_fc_alloc_target_assoc(
1676 tgtport, iod->hosthandle);
1677 if (!iod->assoc)
1678 ret = VERR_ASSOC_ALLOC_FAIL;
1679 else {
1680 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1681 be16_to_cpu(rqst->assoc_cmd.sqsize));
1682 if (!queue) {
1683 ret = VERR_QUEUE_ALLOC_FAIL;
1684 nvmet_fc_tgt_a_put(iod->assoc);
1685 }
1686 }
1687 }
1688
1689 if (ret) {
1690 dev_err(tgtport->dev,
1691 "Create Association LS failed: %s\n",
1692 validation_errors[ret]);
1693 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1694 sizeof(*acc), rqst->w0.ls_cmd,
1695 FCNVME_RJT_RC_LOGIC,
1696 FCNVME_RJT_EXP_NONE, 0);
1697 return;
1698 }
1699
1700 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1701 atomic_set(&queue->connected, 1);
1702 queue->sqhd = 0; /* best place to init value */
1703
1704 dev_info(tgtport->dev,
1705 "{%d:%d} Association created\n",
1706 tgtport->fc_target_port.port_num, iod->assoc->a_id);
1707
1708 /* format a response */
1709
1710 iod->lsrsp->rsplen = sizeof(*acc);
1711
1712 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1713 fcnvme_lsdesc_len(
1714 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1715 FCNVME_LS_CREATE_ASSOCIATION);
1716 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1717 acc->associd.desc_len =
1718 fcnvme_lsdesc_len(
1719 sizeof(struct fcnvme_lsdesc_assoc_id));
1720 acc->associd.association_id =
1721 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1722 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1723 acc->connectid.desc_len =
1724 fcnvme_lsdesc_len(
1725 sizeof(struct fcnvme_lsdesc_conn_id));
1726 acc->connectid.connection_id = acc->associd.association_id;
1727 }
1728
1729 static void
nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1730 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1731 struct nvmet_fc_ls_iod *iod)
1732 {
1733 struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn;
1734 struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn;
1735 struct nvmet_fc_tgt_queue *queue;
1736 int ret = 0;
1737
1738 memset(acc, 0, sizeof(*acc));
1739
1740 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1741 ret = VERR_CR_CONN_LEN;
1742 else if (rqst->desc_list_len !=
1743 fcnvme_lsdesc_len(
1744 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1745 ret = VERR_CR_CONN_RQST_LEN;
1746 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1747 ret = VERR_ASSOC_ID;
1748 else if (rqst->associd.desc_len !=
1749 fcnvme_lsdesc_len(
1750 sizeof(struct fcnvme_lsdesc_assoc_id)))
1751 ret = VERR_ASSOC_ID_LEN;
1752 else if (rqst->connect_cmd.desc_tag !=
1753 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1754 ret = VERR_CR_CONN_CMD;
1755 else if (rqst->connect_cmd.desc_len !=
1756 fcnvme_lsdesc_len(
1757 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1758 ret = VERR_CR_CONN_CMD_LEN;
1759 else if (!rqst->connect_cmd.ersp_ratio ||
1760 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1761 be16_to_cpu(rqst->connect_cmd.sqsize)))
1762 ret = VERR_ERSP_RATIO;
1763
1764 else {
1765 /* new io queue */
1766 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1767 be64_to_cpu(rqst->associd.association_id));
1768 if (!iod->assoc)
1769 ret = VERR_NO_ASSOC;
1770 else {
1771 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1772 be16_to_cpu(rqst->connect_cmd.qid),
1773 be16_to_cpu(rqst->connect_cmd.sqsize));
1774 if (!queue)
1775 ret = VERR_QUEUE_ALLOC_FAIL;
1776
1777 /* release get taken in nvmet_fc_find_target_assoc */
1778 nvmet_fc_tgt_a_put(iod->assoc);
1779 }
1780 }
1781
1782 if (ret) {
1783 dev_err(tgtport->dev,
1784 "Create Connection LS failed: %s\n",
1785 validation_errors[ret]);
1786 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1787 sizeof(*acc), rqst->w0.ls_cmd,
1788 (ret == VERR_NO_ASSOC) ?
1789 FCNVME_RJT_RC_INV_ASSOC :
1790 FCNVME_RJT_RC_LOGIC,
1791 FCNVME_RJT_EXP_NONE, 0);
1792 return;
1793 }
1794
1795 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1796 atomic_set(&queue->connected, 1);
1797 queue->sqhd = 0; /* best place to init value */
1798
1799 /* format a response */
1800
1801 iod->lsrsp->rsplen = sizeof(*acc);
1802
1803 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1804 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1805 FCNVME_LS_CREATE_CONNECTION);
1806 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1807 acc->connectid.desc_len =
1808 fcnvme_lsdesc_len(
1809 sizeof(struct fcnvme_lsdesc_conn_id));
1810 acc->connectid.connection_id =
1811 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1812 be16_to_cpu(rqst->connect_cmd.qid)));
1813 }
1814
1815 /*
1816 * Returns true if the LS response is to be transmit
1817 * Returns false if the LS response is to be delayed
1818 */
1819 static int
nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1820 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1821 struct nvmet_fc_ls_iod *iod)
1822 {
1823 struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1824 &iod->rqstbuf->rq_dis_assoc;
1825 struct fcnvme_ls_disconnect_assoc_acc *acc =
1826 &iod->rspbuf->rsp_dis_assoc;
1827 struct nvmet_fc_tgt_assoc *assoc = NULL;
1828 struct nvmet_fc_ls_iod *oldls = NULL;
1829 unsigned long flags;
1830 int ret = 0;
1831
1832 memset(acc, 0, sizeof(*acc));
1833
1834 ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst);
1835 if (!ret) {
1836 /* match an active association - takes an assoc ref if !NULL */
1837 assoc = nvmet_fc_find_target_assoc(tgtport,
1838 be64_to_cpu(rqst->associd.association_id));
1839 iod->assoc = assoc;
1840 if (!assoc)
1841 ret = VERR_NO_ASSOC;
1842 }
1843
1844 if (ret || !assoc) {
1845 dev_err(tgtport->dev,
1846 "Disconnect LS failed: %s\n",
1847 validation_errors[ret]);
1848 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1849 sizeof(*acc), rqst->w0.ls_cmd,
1850 (ret == VERR_NO_ASSOC) ?
1851 FCNVME_RJT_RC_INV_ASSOC :
1852 FCNVME_RJT_RC_LOGIC,
1853 FCNVME_RJT_EXP_NONE, 0);
1854 return true;
1855 }
1856
1857 /* format a response */
1858
1859 iod->lsrsp->rsplen = sizeof(*acc);
1860
1861 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1862 fcnvme_lsdesc_len(
1863 sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
1864 FCNVME_LS_DISCONNECT_ASSOC);
1865
1866 /* release get taken in nvmet_fc_find_target_assoc */
1867 nvmet_fc_tgt_a_put(assoc);
1868
1869 /*
1870 * The rules for LS response says the response cannot
1871 * go back until ABTS's have been sent for all outstanding
1872 * I/O and a Disconnect Association LS has been sent.
1873 * So... save off the Disconnect LS to send the response
1874 * later. If there was a prior LS already saved, replace
1875 * it with the newer one and send a can't perform reject
1876 * on the older one.
1877 */
1878 spin_lock_irqsave(&tgtport->lock, flags);
1879 oldls = assoc->rcv_disconn;
1880 assoc->rcv_disconn = iod;
1881 spin_unlock_irqrestore(&tgtport->lock, flags);
1882
1883 nvmet_fc_delete_target_assoc(assoc);
1884
1885 if (oldls) {
1886 dev_info(tgtport->dev,
1887 "{%d:%d} Multiple Disconnect Association LS's "
1888 "received\n",
1889 tgtport->fc_target_port.port_num, assoc->a_id);
1890 /* overwrite good response with bogus failure */
1891 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf,
1892 sizeof(*iod->rspbuf),
1893 /* ok to use rqst, LS is same */
1894 rqst->w0.ls_cmd,
1895 FCNVME_RJT_RC_UNAB,
1896 FCNVME_RJT_EXP_NONE, 0);
1897 nvmet_fc_xmt_ls_rsp(tgtport, oldls);
1898 }
1899
1900 return false;
1901 }
1902
1903
1904 /* *********************** NVME Ctrl Routines **************************** */
1905
1906
1907 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1908
1909 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1910
1911 static void
nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp * lsrsp)1912 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
1913 {
1914 struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private;
1915 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1916
1917 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1918 sizeof(*iod->rspbuf), DMA_TO_DEVICE);
1919 nvmet_fc_free_ls_iod(tgtport, iod);
1920 nvmet_fc_tgtport_put(tgtport);
1921 }
1922
1923 static void
nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1924 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1925 struct nvmet_fc_ls_iod *iod)
1926 {
1927 int ret;
1928
1929 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1930 sizeof(*iod->rspbuf), DMA_TO_DEVICE);
1931
1932 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp);
1933 if (ret)
1934 nvmet_fc_xmt_ls_rsp_done(iod->lsrsp);
1935 }
1936
1937 /*
1938 * Actual processing routine for received FC-NVME LS Requests from the LLD
1939 */
1940 static void
nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_ls_iod * iod)1941 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1942 struct nvmet_fc_ls_iod *iod)
1943 {
1944 struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0;
1945 bool sendrsp = true;
1946
1947 iod->lsrsp->nvme_fc_private = iod;
1948 iod->lsrsp->rspbuf = iod->rspbuf;
1949 iod->lsrsp->rspdma = iod->rspdma;
1950 iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done;
1951 /* Be preventative. handlers will later set to valid length */
1952 iod->lsrsp->rsplen = 0;
1953
1954 iod->assoc = NULL;
1955
1956 /*
1957 * handlers:
1958 * parse request input, execute the request, and format the
1959 * LS response
1960 */
1961 switch (w0->ls_cmd) {
1962 case FCNVME_LS_CREATE_ASSOCIATION:
1963 /* Creates Association and initial Admin Queue/Connection */
1964 nvmet_fc_ls_create_association(tgtport, iod);
1965 break;
1966 case FCNVME_LS_CREATE_CONNECTION:
1967 /* Creates an IO Queue/Connection */
1968 nvmet_fc_ls_create_connection(tgtport, iod);
1969 break;
1970 case FCNVME_LS_DISCONNECT_ASSOC:
1971 /* Terminate a Queue/Connection or the Association */
1972 sendrsp = nvmet_fc_ls_disconnect(tgtport, iod);
1973 break;
1974 default:
1975 iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf,
1976 sizeof(*iod->rspbuf), w0->ls_cmd,
1977 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1978 }
1979
1980 if (sendrsp)
1981 nvmet_fc_xmt_ls_rsp(tgtport, iod);
1982 }
1983
1984 /*
1985 * Actual processing routine for received FC-NVME LS Requests from the LLD
1986 */
1987 static void
nvmet_fc_handle_ls_rqst_work(struct work_struct * work)1988 nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1989 {
1990 struct nvmet_fc_ls_iod *iod =
1991 container_of(work, struct nvmet_fc_ls_iod, work);
1992 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1993
1994 nvmet_fc_handle_ls_rqst(tgtport, iod);
1995 }
1996
1997
1998 /**
1999 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
2000 * upon the reception of a NVME LS request.
2001 *
2002 * The nvmet-fc layer will copy payload to an internal structure for
2003 * processing. As such, upon completion of the routine, the LLDD may
2004 * immediately free/reuse the LS request buffer passed in the call.
2005 *
2006 * If this routine returns error, the LLDD should abort the exchange.
2007 *
2008 * @target_port: pointer to the (registered) target port the LS was
2009 * received on.
2010 * @lsrsp: pointer to a lsrsp structure to be used to reference
2011 * the exchange corresponding to the LS.
2012 * @lsreqbuf: pointer to the buffer containing the LS Request
2013 * @lsreqbuf_len: length, in bytes, of the received LS request
2014 */
2015 int
nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port * target_port,void * hosthandle,struct nvmefc_ls_rsp * lsrsp,void * lsreqbuf,u32 lsreqbuf_len)2016 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
2017 void *hosthandle,
2018 struct nvmefc_ls_rsp *lsrsp,
2019 void *lsreqbuf, u32 lsreqbuf_len)
2020 {
2021 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2022 struct nvmet_fc_ls_iod *iod;
2023 struct fcnvme_ls_rqst_w0 *w0 = (struct fcnvme_ls_rqst_w0 *)lsreqbuf;
2024
2025 if (lsreqbuf_len > sizeof(union nvmefc_ls_requests)) {
2026 dev_info(tgtport->dev,
2027 "RCV %s LS failed: payload too large (%d)\n",
2028 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2029 nvmefc_ls_names[w0->ls_cmd] : "",
2030 lsreqbuf_len);
2031 return -E2BIG;
2032 }
2033
2034 if (!nvmet_fc_tgtport_get(tgtport)) {
2035 dev_info(tgtport->dev,
2036 "RCV %s LS failed: target deleting\n",
2037 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2038 nvmefc_ls_names[w0->ls_cmd] : "");
2039 return -ESHUTDOWN;
2040 }
2041
2042 iod = nvmet_fc_alloc_ls_iod(tgtport);
2043 if (!iod) {
2044 dev_info(tgtport->dev,
2045 "RCV %s LS failed: context allocation failed\n",
2046 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ?
2047 nvmefc_ls_names[w0->ls_cmd] : "");
2048 nvmet_fc_tgtport_put(tgtport);
2049 return -ENOENT;
2050 }
2051
2052 iod->lsrsp = lsrsp;
2053 iod->fcpreq = NULL;
2054 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
2055 iod->rqstdatalen = lsreqbuf_len;
2056 iod->hosthandle = hosthandle;
2057
2058 schedule_work(&iod->work);
2059
2060 return 0;
2061 }
2062 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
2063
2064
2065 /*
2066 * **********************
2067 * Start of FCP handling
2068 * **********************
2069 */
2070
2071 static int
nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod * fod)2072 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
2073 {
2074 struct scatterlist *sg;
2075 unsigned int nent;
2076
2077 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
2078 if (!sg)
2079 goto out;
2080
2081 fod->data_sg = sg;
2082 fod->data_sg_cnt = nent;
2083 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
2084 ((fod->io_dir == NVMET_FCP_WRITE) ?
2085 DMA_FROM_DEVICE : DMA_TO_DEVICE));
2086 /* note: write from initiator perspective */
2087 fod->next_sg = fod->data_sg;
2088
2089 return 0;
2090
2091 out:
2092 return NVME_SC_INTERNAL;
2093 }
2094
2095 static void
nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod * fod)2096 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
2097 {
2098 if (!fod->data_sg || !fod->data_sg_cnt)
2099 return;
2100
2101 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
2102 ((fod->io_dir == NVMET_FCP_WRITE) ?
2103 DMA_FROM_DEVICE : DMA_TO_DEVICE));
2104 sgl_free(fod->data_sg);
2105 fod->data_sg = NULL;
2106 fod->data_sg_cnt = 0;
2107 }
2108
2109
2110 static bool
queue_90percent_full(struct nvmet_fc_tgt_queue * q,u32 sqhd)2111 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
2112 {
2113 u32 sqtail, used;
2114
2115 /* egad, this is ugly. And sqtail is just a best guess */
2116 sqtail = atomic_read(&q->sqtail) % q->sqsize;
2117
2118 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
2119 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
2120 }
2121
2122 /*
2123 * Prep RSP payload.
2124 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
2125 */
2126 static void
nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod)2127 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
2128 struct nvmet_fc_fcp_iod *fod)
2129 {
2130 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
2131 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2132 struct nvme_completion *cqe = &ersp->cqe;
2133 u32 *cqewd = (u32 *)cqe;
2134 bool send_ersp = false;
2135 u32 rsn, rspcnt, xfr_length;
2136
2137 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
2138 xfr_length = fod->req.transfer_len;
2139 else
2140 xfr_length = fod->offset;
2141
2142 /*
2143 * check to see if we can send a 0's rsp.
2144 * Note: to send a 0's response, the NVME-FC host transport will
2145 * recreate the CQE. The host transport knows: sq id, SQHD (last
2146 * seen in an ersp), and command_id. Thus it will create a
2147 * zero-filled CQE with those known fields filled in. Transport
2148 * must send an ersp for any condition where the cqe won't match
2149 * this.
2150 *
2151 * Here are the FC-NVME mandated cases where we must send an ersp:
2152 * every N responses, where N=ersp_ratio
2153 * force fabric commands to send ersp's (not in FC-NVME but good
2154 * practice)
2155 * normal cmds: any time status is non-zero, or status is zero
2156 * but words 0 or 1 are non-zero.
2157 * the SQ is 90% or more full
2158 * the cmd is a fused command
2159 * transferred data length not equal to cmd iu length
2160 */
2161 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
2162 if (!(rspcnt % fod->queue->ersp_ratio) ||
2163 nvme_is_fabrics((struct nvme_command *) sqe) ||
2164 xfr_length != fod->req.transfer_len ||
2165 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
2166 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
2167 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
2168 send_ersp = true;
2169
2170 /* re-set the fields */
2171 fod->fcpreq->rspaddr = ersp;
2172 fod->fcpreq->rspdma = fod->rspdma;
2173
2174 if (!send_ersp) {
2175 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
2176 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
2177 } else {
2178 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
2179 rsn = atomic_inc_return(&fod->queue->rsn);
2180 ersp->rsn = cpu_to_be32(rsn);
2181 ersp->xfrd_len = cpu_to_be32(xfr_length);
2182 fod->fcpreq->rsplen = sizeof(*ersp);
2183 }
2184
2185 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
2186 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
2187 }
2188
2189 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
2190
2191 static void
nvmet_fc_abort_op(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod)2192 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
2193 struct nvmet_fc_fcp_iod *fod)
2194 {
2195 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2196
2197 /* data no longer needed */
2198 nvmet_fc_free_tgt_pgs(fod);
2199
2200 /*
2201 * if an ABTS was received or we issued the fcp_abort early
2202 * don't call abort routine again.
2203 */
2204 /* no need to take lock - lock was taken earlier to get here */
2205 if (!fod->aborted)
2206 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
2207
2208 nvmet_fc_free_fcp_iod(fod->queue, fod);
2209 }
2210
2211 static void
nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod)2212 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
2213 struct nvmet_fc_fcp_iod *fod)
2214 {
2215 int ret;
2216
2217 fod->fcpreq->op = NVMET_FCOP_RSP;
2218 fod->fcpreq->timeout = 0;
2219
2220 nvmet_fc_prep_fcp_rsp(tgtport, fod);
2221
2222 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2223 if (ret)
2224 nvmet_fc_abort_op(tgtport, fod);
2225 }
2226
2227 static void
nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod,u8 op)2228 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
2229 struct nvmet_fc_fcp_iod *fod, u8 op)
2230 {
2231 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2232 struct scatterlist *sg = fod->next_sg;
2233 unsigned long flags;
2234 u32 remaininglen = fod->req.transfer_len - fod->offset;
2235 u32 tlen = 0;
2236 int ret;
2237
2238 fcpreq->op = op;
2239 fcpreq->offset = fod->offset;
2240 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
2241
2242 /*
2243 * for next sequence:
2244 * break at a sg element boundary
2245 * attempt to keep sequence length capped at
2246 * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
2247 * be longer if a single sg element is larger
2248 * than that amount. This is done to avoid creating
2249 * a new sg list to use for the tgtport api.
2250 */
2251 fcpreq->sg = sg;
2252 fcpreq->sg_cnt = 0;
2253 while (tlen < remaininglen &&
2254 fcpreq->sg_cnt < tgtport->max_sg_cnt &&
2255 tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
2256 fcpreq->sg_cnt++;
2257 tlen += sg_dma_len(sg);
2258 sg = sg_next(sg);
2259 }
2260 if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
2261 fcpreq->sg_cnt++;
2262 tlen += min_t(u32, sg_dma_len(sg), remaininglen);
2263 sg = sg_next(sg);
2264 }
2265 if (tlen < remaininglen)
2266 fod->next_sg = sg;
2267 else
2268 fod->next_sg = NULL;
2269
2270 fcpreq->transfer_length = tlen;
2271 fcpreq->transferred_length = 0;
2272 fcpreq->fcp_error = 0;
2273 fcpreq->rsplen = 0;
2274
2275 /*
2276 * If the last READDATA request: check if LLDD supports
2277 * combined xfr with response.
2278 */
2279 if ((op == NVMET_FCOP_READDATA) &&
2280 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
2281 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
2282 fcpreq->op = NVMET_FCOP_READDATA_RSP;
2283 nvmet_fc_prep_fcp_rsp(tgtport, fod);
2284 }
2285
2286 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
2287 if (ret) {
2288 /*
2289 * should be ok to set w/o lock as its in the thread of
2290 * execution (not an async timer routine) and doesn't
2291 * contend with any clearing action
2292 */
2293 fod->abort = true;
2294
2295 if (op == NVMET_FCOP_WRITEDATA) {
2296 spin_lock_irqsave(&fod->flock, flags);
2297 fod->writedataactive = false;
2298 spin_unlock_irqrestore(&fod->flock, flags);
2299 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2300 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
2301 fcpreq->fcp_error = ret;
2302 fcpreq->transferred_length = 0;
2303 nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
2304 }
2305 }
2306 }
2307
2308 static inline bool
__nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod * fod,bool abort)2309 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
2310 {
2311 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2312 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2313
2314 /* if in the middle of an io and we need to tear down */
2315 if (abort) {
2316 if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
2317 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2318 return true;
2319 }
2320
2321 nvmet_fc_abort_op(tgtport, fod);
2322 return true;
2323 }
2324
2325 return false;
2326 }
2327
2328 /*
2329 * actual done handler for FCP operations when completed by the lldd
2330 */
2331 static void
nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod * fod)2332 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
2333 {
2334 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
2335 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2336 unsigned long flags;
2337 bool abort;
2338
2339 spin_lock_irqsave(&fod->flock, flags);
2340 abort = fod->abort;
2341 fod->writedataactive = false;
2342 spin_unlock_irqrestore(&fod->flock, flags);
2343
2344 switch (fcpreq->op) {
2345
2346 case NVMET_FCOP_WRITEDATA:
2347 if (__nvmet_fc_fod_op_abort(fod, abort))
2348 return;
2349 if (fcpreq->fcp_error ||
2350 fcpreq->transferred_length != fcpreq->transfer_length) {
2351 spin_lock_irqsave(&fod->flock, flags);
2352 fod->abort = true;
2353 spin_unlock_irqrestore(&fod->flock, flags);
2354
2355 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
2356 return;
2357 }
2358
2359 fod->offset += fcpreq->transferred_length;
2360 if (fod->offset != fod->req.transfer_len) {
2361 spin_lock_irqsave(&fod->flock, flags);
2362 fod->writedataactive = true;
2363 spin_unlock_irqrestore(&fod->flock, flags);
2364
2365 /* transfer the next chunk */
2366 nvmet_fc_transfer_fcp_data(tgtport, fod,
2367 NVMET_FCOP_WRITEDATA);
2368 return;
2369 }
2370
2371 /* data transfer complete, resume with nvmet layer */
2372 fod->req.execute(&fod->req);
2373 break;
2374
2375 case NVMET_FCOP_READDATA:
2376 case NVMET_FCOP_READDATA_RSP:
2377 if (__nvmet_fc_fod_op_abort(fod, abort))
2378 return;
2379 if (fcpreq->fcp_error ||
2380 fcpreq->transferred_length != fcpreq->transfer_length) {
2381 nvmet_fc_abort_op(tgtport, fod);
2382 return;
2383 }
2384
2385 /* success */
2386
2387 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
2388 /* data no longer needed */
2389 nvmet_fc_free_tgt_pgs(fod);
2390 nvmet_fc_free_fcp_iod(fod->queue, fod);
2391 return;
2392 }
2393
2394 fod->offset += fcpreq->transferred_length;
2395 if (fod->offset != fod->req.transfer_len) {
2396 /* transfer the next chunk */
2397 nvmet_fc_transfer_fcp_data(tgtport, fod,
2398 NVMET_FCOP_READDATA);
2399 return;
2400 }
2401
2402 /* data transfer complete, send response */
2403
2404 /* data no longer needed */
2405 nvmet_fc_free_tgt_pgs(fod);
2406
2407 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2408
2409 break;
2410
2411 case NVMET_FCOP_RSP:
2412 if (__nvmet_fc_fod_op_abort(fod, abort))
2413 return;
2414 nvmet_fc_free_fcp_iod(fod->queue, fod);
2415 break;
2416
2417 default:
2418 break;
2419 }
2420 }
2421
2422 static void
nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req * fcpreq)2423 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
2424 {
2425 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2426
2427 nvmet_fc_fod_op_done(fod);
2428 }
2429
2430 /*
2431 * actual completion handler after execution by the nvmet layer
2432 */
2433 static void
__nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod,int status)2434 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
2435 struct nvmet_fc_fcp_iod *fod, int status)
2436 {
2437 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2438 struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2439 unsigned long flags;
2440 bool abort;
2441
2442 spin_lock_irqsave(&fod->flock, flags);
2443 abort = fod->abort;
2444 spin_unlock_irqrestore(&fod->flock, flags);
2445
2446 /* if we have a CQE, snoop the last sq_head value */
2447 if (!status)
2448 fod->queue->sqhd = cqe->sq_head;
2449
2450 if (abort) {
2451 nvmet_fc_abort_op(tgtport, fod);
2452 return;
2453 }
2454
2455 /* if an error handling the cmd post initial parsing */
2456 if (status) {
2457 /* fudge up a failed CQE status for our transport error */
2458 memset(cqe, 0, sizeof(*cqe));
2459 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
2460 cqe->sq_id = cpu_to_le16(fod->queue->qid);
2461 cqe->command_id = sqe->command_id;
2462 cqe->status = cpu_to_le16(status);
2463 } else {
2464
2465 /*
2466 * try to push the data even if the SQE status is non-zero.
2467 * There may be a status where data still was intended to
2468 * be moved
2469 */
2470 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2471 /* push the data over before sending rsp */
2472 nvmet_fc_transfer_fcp_data(tgtport, fod,
2473 NVMET_FCOP_READDATA);
2474 return;
2475 }
2476
2477 /* writes & no data - fall thru */
2478 }
2479
2480 /* data no longer needed */
2481 nvmet_fc_free_tgt_pgs(fod);
2482
2483 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2484 }
2485
2486
2487 static void
nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req * nvme_req)2488 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2489 {
2490 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2491 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2492
2493 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2494 }
2495
2496
2497 /*
2498 * Actual processing routine for received FC-NVME I/O Requests from the LLD
2499 */
2500 static void
nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport * tgtport,struct nvmet_fc_fcp_iod * fod)2501 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2502 struct nvmet_fc_fcp_iod *fod)
2503 {
2504 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2505 u32 xfrlen = be32_to_cpu(cmdiu->data_len);
2506 int ret;
2507
2508 /*
2509 * Fused commands are currently not supported in the linux
2510 * implementation.
2511 *
2512 * As such, the implementation of the FC transport does not
2513 * look at the fused commands and order delivery to the upper
2514 * layer until we have both based on csn.
2515 */
2516
2517 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2518
2519 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2520 fod->io_dir = NVMET_FCP_WRITE;
2521 if (!nvme_is_write(&cmdiu->sqe))
2522 goto transport_error;
2523 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2524 fod->io_dir = NVMET_FCP_READ;
2525 if (nvme_is_write(&cmdiu->sqe))
2526 goto transport_error;
2527 } else {
2528 fod->io_dir = NVMET_FCP_NODATA;
2529 if (xfrlen)
2530 goto transport_error;
2531 }
2532
2533 fod->req.cmd = &fod->cmdiubuf.sqe;
2534 fod->req.cqe = &fod->rspiubuf.cqe;
2535 if (!tgtport->pe)
2536 goto transport_error;
2537 fod->req.port = tgtport->pe->port;
2538
2539 /* clear any response payload */
2540 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2541
2542 fod->data_sg = NULL;
2543 fod->data_sg_cnt = 0;
2544
2545 ret = nvmet_req_init(&fod->req,
2546 &fod->queue->nvme_cq,
2547 &fod->queue->nvme_sq,
2548 &nvmet_fc_tgt_fcp_ops);
2549 if (!ret) {
2550 /* bad SQE content or invalid ctrl state */
2551 /* nvmet layer has already called op done to send rsp. */
2552 return;
2553 }
2554
2555 fod->req.transfer_len = xfrlen;
2556
2557 /* keep a running counter of tail position */
2558 atomic_inc(&fod->queue->sqtail);
2559
2560 if (fod->req.transfer_len) {
2561 ret = nvmet_fc_alloc_tgt_pgs(fod);
2562 if (ret) {
2563 nvmet_req_complete(&fod->req, ret);
2564 return;
2565 }
2566 }
2567 fod->req.sg = fod->data_sg;
2568 fod->req.sg_cnt = fod->data_sg_cnt;
2569 fod->offset = 0;
2570
2571 if (fod->io_dir == NVMET_FCP_WRITE) {
2572 /* pull the data over before invoking nvmet layer */
2573 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2574 return;
2575 }
2576
2577 /*
2578 * Reads or no data:
2579 *
2580 * can invoke the nvmet_layer now. If read data, cmd completion will
2581 * push the data
2582 */
2583 fod->req.execute(&fod->req);
2584 return;
2585
2586 transport_error:
2587 nvmet_fc_abort_op(tgtport, fod);
2588 }
2589
2590 /**
2591 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2592 * upon the reception of a NVME FCP CMD IU.
2593 *
2594 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2595 * layer for processing.
2596 *
2597 * The nvmet_fc layer allocates a local job structure (struct
2598 * nvmet_fc_fcp_iod) from the queue for the io and copies the
2599 * CMD IU buffer to the job structure. As such, on a successful
2600 * completion (returns 0), the LLDD may immediately free/reuse
2601 * the CMD IU buffer passed in the call.
2602 *
2603 * However, in some circumstances, due to the packetized nature of FC
2604 * and the api of the FC LLDD which may issue a hw command to send the
2605 * response, but the LLDD may not get the hw completion for that command
2606 * and upcall the nvmet_fc layer before a new command may be
2607 * asynchronously received - its possible for a command to be received
2608 * before the LLDD and nvmet_fc have recycled the job structure. It gives
2609 * the appearance of more commands received than fits in the sq.
2610 * To alleviate this scenario, a temporary queue is maintained in the
2611 * transport for pending LLDD requests waiting for a queue job structure.
2612 * In these "overrun" cases, a temporary queue element is allocated
2613 * the LLDD request and CMD iu buffer information remembered, and the
2614 * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2615 * structure is freed, it is immediately reallocated for anything on the
2616 * pending request list. The LLDDs defer_rcv() callback is called,
2617 * informing the LLDD that it may reuse the CMD IU buffer, and the io
2618 * is then started normally with the transport.
2619 *
2620 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
2621 * the completion as successful but must not reuse the CMD IU buffer
2622 * until the LLDD's defer_rcv() callback has been called for the
2623 * corresponding struct nvmefc_tgt_fcp_req pointer.
2624 *
2625 * If there is any other condition in which an error occurs, the
2626 * transport will return a non-zero status indicating the error.
2627 * In all cases other than -EOVERFLOW, the transport has not accepted the
2628 * request and the LLDD should abort the exchange.
2629 *
2630 * @target_port: pointer to the (registered) target port the FCP CMD IU
2631 * was received on.
2632 * @fcpreq: pointer to a fcpreq request structure to be used to reference
2633 * the exchange corresponding to the FCP Exchange.
2634 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
2635 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2636 */
2637 int
nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port * target_port,struct nvmefc_tgt_fcp_req * fcpreq,void * cmdiubuf,u32 cmdiubuf_len)2638 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2639 struct nvmefc_tgt_fcp_req *fcpreq,
2640 void *cmdiubuf, u32 cmdiubuf_len)
2641 {
2642 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2643 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2644 struct nvmet_fc_tgt_queue *queue;
2645 struct nvmet_fc_fcp_iod *fod;
2646 struct nvmet_fc_defer_fcp_req *deferfcp;
2647 unsigned long flags;
2648
2649 /* validate iu, so the connection id can be used to find the queue */
2650 if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2651 (cmdiu->format_id != NVME_CMD_FORMAT_ID) ||
2652 (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2653 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2654 return -EIO;
2655
2656 queue = nvmet_fc_find_target_queue(tgtport,
2657 be64_to_cpu(cmdiu->connection_id));
2658 if (!queue)
2659 return -ENOTCONN;
2660
2661 /*
2662 * note: reference taken by find_target_queue
2663 * After successful fod allocation, the fod will inherit the
2664 * ownership of that reference and will remove the reference
2665 * when the fod is freed.
2666 */
2667
2668 spin_lock_irqsave(&queue->qlock, flags);
2669
2670 fod = nvmet_fc_alloc_fcp_iod(queue);
2671 if (fod) {
2672 spin_unlock_irqrestore(&queue->qlock, flags);
2673
2674 fcpreq->nvmet_fc_private = fod;
2675 fod->fcpreq = fcpreq;
2676
2677 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2678
2679 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2680
2681 return 0;
2682 }
2683
2684 if (!tgtport->ops->defer_rcv) {
2685 spin_unlock_irqrestore(&queue->qlock, flags);
2686 /* release the queue lookup reference */
2687 nvmet_fc_tgt_q_put(queue);
2688 return -ENOENT;
2689 }
2690
2691 deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2692 struct nvmet_fc_defer_fcp_req, req_list);
2693 if (deferfcp) {
2694 /* Just re-use one that was previously allocated */
2695 list_del(&deferfcp->req_list);
2696 } else {
2697 spin_unlock_irqrestore(&queue->qlock, flags);
2698
2699 /* Now we need to dynamically allocate one */
2700 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
2701 if (!deferfcp) {
2702 /* release the queue lookup reference */
2703 nvmet_fc_tgt_q_put(queue);
2704 return -ENOMEM;
2705 }
2706 spin_lock_irqsave(&queue->qlock, flags);
2707 }
2708
2709 /* For now, use rspaddr / rsplen to save payload information */
2710 fcpreq->rspaddr = cmdiubuf;
2711 fcpreq->rsplen = cmdiubuf_len;
2712 deferfcp->fcp_req = fcpreq;
2713
2714 /* defer processing till a fod becomes available */
2715 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2716
2717 /* NOTE: the queue lookup reference is still valid */
2718
2719 spin_unlock_irqrestore(&queue->qlock, flags);
2720
2721 return -EOVERFLOW;
2722 }
2723 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2724
2725 /**
2726 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2727 * upon the reception of an ABTS for a FCP command
2728 *
2729 * Notify the transport that an ABTS has been received for a FCP command
2730 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
2731 * LLDD believes the command is still being worked on
2732 * (template_ops->fcp_req_release() has not been called).
2733 *
2734 * The transport will wait for any outstanding work (an op to the LLDD,
2735 * which the lldd should complete with error due to the ABTS; or the
2736 * completion from the nvmet layer of the nvme command), then will
2737 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
2738 * return the i/o context to the LLDD. The LLDD may send the BA_ACC
2739 * to the ABTS either after return from this function (assuming any
2740 * outstanding op work has been terminated) or upon the callback being
2741 * called.
2742 *
2743 * @target_port: pointer to the (registered) target port the FCP CMD IU
2744 * was received on.
2745 * @fcpreq: pointer to the fcpreq request structure that corresponds
2746 * to the exchange that received the ABTS.
2747 */
2748 void
nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port * target_port,struct nvmefc_tgt_fcp_req * fcpreq)2749 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2750 struct nvmefc_tgt_fcp_req *fcpreq)
2751 {
2752 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2753 struct nvmet_fc_tgt_queue *queue;
2754 unsigned long flags;
2755
2756 if (!fod || fod->fcpreq != fcpreq)
2757 /* job appears to have already completed, ignore abort */
2758 return;
2759
2760 queue = fod->queue;
2761
2762 spin_lock_irqsave(&queue->qlock, flags);
2763 if (fod->active) {
2764 /*
2765 * mark as abort. The abort handler, invoked upon completion
2766 * of any work, will detect the aborted status and do the
2767 * callback.
2768 */
2769 spin_lock(&fod->flock);
2770 fod->abort = true;
2771 fod->aborted = true;
2772 spin_unlock(&fod->flock);
2773 }
2774 spin_unlock_irqrestore(&queue->qlock, flags);
2775 }
2776 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2777
2778
2779 struct nvmet_fc_traddr {
2780 u64 nn;
2781 u64 pn;
2782 };
2783
2784 static int
__nvme_fc_parse_u64(substring_t * sstr,u64 * val)2785 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2786 {
2787 u64 token64;
2788
2789 if (match_u64(sstr, &token64))
2790 return -EINVAL;
2791 *val = token64;
2792
2793 return 0;
2794 }
2795
2796 /*
2797 * This routine validates and extracts the WWN's from the TRADDR string.
2798 * As kernel parsers need the 0x to determine number base, universally
2799 * build string to parse with 0x prefix before parsing name strings.
2800 */
2801 static int
nvme_fc_parse_traddr(struct nvmet_fc_traddr * traddr,char * buf,size_t blen)2802 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2803 {
2804 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2805 substring_t wwn = { name, &name[sizeof(name)-1] };
2806 int nnoffset, pnoffset;
2807
2808 /* validate if string is one of the 2 allowed formats */
2809 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2810 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2811 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2812 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2813 nnoffset = NVME_FC_TRADDR_OXNNLEN;
2814 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2815 NVME_FC_TRADDR_OXNNLEN;
2816 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2817 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2818 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2819 "pn-", NVME_FC_TRADDR_NNLEN))) {
2820 nnoffset = NVME_FC_TRADDR_NNLEN;
2821 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2822 } else
2823 goto out_einval;
2824
2825 name[0] = '0';
2826 name[1] = 'x';
2827 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2828
2829 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2830 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2831 goto out_einval;
2832
2833 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2834 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2835 goto out_einval;
2836
2837 return 0;
2838
2839 out_einval:
2840 pr_warn("%s: bad traddr string\n", __func__);
2841 return -EINVAL;
2842 }
2843
2844 static int
nvmet_fc_add_port(struct nvmet_port * port)2845 nvmet_fc_add_port(struct nvmet_port *port)
2846 {
2847 struct nvmet_fc_tgtport *tgtport;
2848 struct nvmet_fc_port_entry *pe;
2849 struct nvmet_fc_traddr traddr = { 0L, 0L };
2850 unsigned long flags;
2851 int ret;
2852
2853 /* validate the address info */
2854 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2855 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2856 return -EINVAL;
2857
2858 /* map the traddr address info to a target port */
2859
2860 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
2861 sizeof(port->disc_addr.traddr));
2862 if (ret)
2863 return ret;
2864
2865 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2866 if (!pe)
2867 return -ENOMEM;
2868
2869 ret = -ENXIO;
2870 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2871 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2872 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2873 (tgtport->fc_target_port.port_name == traddr.pn)) {
2874 /* a FC port can only be 1 nvmet port id */
2875 if (!tgtport->pe) {
2876 nvmet_fc_portentry_bind(tgtport, pe, port);
2877 ret = 0;
2878 } else
2879 ret = -EALREADY;
2880 break;
2881 }
2882 }
2883 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2884
2885 if (ret)
2886 kfree(pe);
2887
2888 return ret;
2889 }
2890
2891 static void
nvmet_fc_remove_port(struct nvmet_port * port)2892 nvmet_fc_remove_port(struct nvmet_port *port)
2893 {
2894 struct nvmet_fc_port_entry *pe = port->priv;
2895
2896 nvmet_fc_portentry_unbind(pe);
2897
2898 kfree(pe);
2899 }
2900
2901 static void
nvmet_fc_discovery_chg(struct nvmet_port * port)2902 nvmet_fc_discovery_chg(struct nvmet_port *port)
2903 {
2904 struct nvmet_fc_port_entry *pe = port->priv;
2905 struct nvmet_fc_tgtport *tgtport = pe->tgtport;
2906
2907 if (tgtport && tgtport->ops->discovery_event)
2908 tgtport->ops->discovery_event(&tgtport->fc_target_port);
2909 }
2910
2911 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2912 .owner = THIS_MODULE,
2913 .type = NVMF_TRTYPE_FC,
2914 .msdbd = 1,
2915 .add_port = nvmet_fc_add_port,
2916 .remove_port = nvmet_fc_remove_port,
2917 .queue_response = nvmet_fc_fcp_nvme_cmd_done,
2918 .delete_ctrl = nvmet_fc_delete_ctrl,
2919 .discovery_chg = nvmet_fc_discovery_chg,
2920 };
2921
nvmet_fc_init_module(void)2922 static int __init nvmet_fc_init_module(void)
2923 {
2924 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2925 }
2926
nvmet_fc_exit_module(void)2927 static void __exit nvmet_fc_exit_module(void)
2928 {
2929 /* sanity check - all lports should be removed */
2930 if (!list_empty(&nvmet_fc_target_list))
2931 pr_warn("%s: targetport list not empty\n", __func__);
2932
2933 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2934
2935 ida_destroy(&nvmet_fc_tgtport_cnt);
2936 }
2937
2938 module_init(nvmet_fc_init_module);
2939 module_exit(nvmet_fc_exit_module);
2940
2941 MODULE_LICENSE("GPL v2");
2942